@huggingface/inference 2.6.7 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/README.md +126 -27
  2. package/dist/index.cjs +78 -12
  3. package/dist/index.js +76 -12
  4. package/dist/src/HfInference.d.ts +28 -0
  5. package/dist/src/HfInference.d.ts.map +1 -0
  6. package/dist/src/index.d.ts +5 -0
  7. package/dist/src/index.d.ts.map +1 -0
  8. package/dist/src/lib/InferenceOutputError.d.ts +4 -0
  9. package/dist/src/lib/InferenceOutputError.d.ts.map +1 -0
  10. package/dist/src/lib/getDefaultTask.d.ts +12 -0
  11. package/dist/src/lib/getDefaultTask.d.ts.map +1 -0
  12. package/dist/src/lib/isUrl.d.ts +2 -0
  13. package/dist/src/lib/isUrl.d.ts.map +1 -0
  14. package/dist/src/lib/makeRequestOptions.d.ts +18 -0
  15. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -0
  16. package/dist/src/tasks/audio/audioClassification.d.ts +24 -0
  17. package/dist/src/tasks/audio/audioClassification.d.ts.map +1 -0
  18. package/dist/src/tasks/audio/audioToAudio.d.ts +28 -0
  19. package/dist/src/tasks/audio/audioToAudio.d.ts.map +1 -0
  20. package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts +19 -0
  21. package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -0
  22. package/dist/src/tasks/audio/textToSpeech.d.ts +14 -0
  23. package/dist/src/tasks/audio/textToSpeech.d.ts.map +1 -0
  24. package/dist/src/tasks/custom/request.d.ts +13 -0
  25. package/dist/src/tasks/custom/request.d.ts.map +1 -0
  26. package/dist/src/tasks/custom/streamingRequest.d.ts +13 -0
  27. package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -0
  28. package/dist/src/tasks/cv/imageClassification.d.ts +24 -0
  29. package/dist/src/tasks/cv/imageClassification.d.ts.map +1 -0
  30. package/dist/src/tasks/cv/imageSegmentation.d.ts +28 -0
  31. package/dist/src/tasks/cv/imageSegmentation.d.ts.map +1 -0
  32. package/dist/src/tasks/cv/imageToImage.d.ts +55 -0
  33. package/dist/src/tasks/cv/imageToImage.d.ts.map +1 -0
  34. package/dist/src/tasks/cv/imageToText.d.ts +18 -0
  35. package/dist/src/tasks/cv/imageToText.d.ts.map +1 -0
  36. package/dist/src/tasks/cv/objectDetection.d.ts +33 -0
  37. package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -0
  38. package/dist/src/tasks/cv/textToImage.d.ts +36 -0
  39. package/dist/src/tasks/cv/textToImage.d.ts.map +1 -0
  40. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +26 -0
  41. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -0
  42. package/dist/src/tasks/index.d.ts +32 -0
  43. package/dist/src/tasks/index.d.ts.map +1 -0
  44. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +35 -0
  45. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -0
  46. package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts +27 -0
  47. package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -0
  48. package/dist/src/tasks/nlp/chatCompletion.d.ts +7 -0
  49. package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -0
  50. package/dist/src/tasks/nlp/chatCompletionStream.d.ts +7 -0
  51. package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -0
  52. package/dist/src/tasks/nlp/featureExtraction.d.ts +19 -0
  53. package/dist/src/tasks/nlp/featureExtraction.d.ts.map +1 -0
  54. package/dist/src/tasks/nlp/fillMask.d.ts +27 -0
  55. package/dist/src/tasks/nlp/fillMask.d.ts.map +1 -0
  56. package/dist/src/tasks/nlp/questionAnswering.d.ts +30 -0
  57. package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -0
  58. package/dist/src/tasks/nlp/sentenceSimilarity.d.ts +19 -0
  59. package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -0
  60. package/dist/src/tasks/nlp/summarization.d.ts +48 -0
  61. package/dist/src/tasks/nlp/summarization.d.ts.map +1 -0
  62. package/dist/src/tasks/nlp/tableQuestionAnswering.d.ts +36 -0
  63. package/dist/src/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -0
  64. package/dist/src/tasks/nlp/textClassification.d.ts +22 -0
  65. package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -0
  66. package/dist/src/tasks/nlp/textGeneration.d.ts +8 -0
  67. package/dist/src/tasks/nlp/textGeneration.d.ts.map +1 -0
  68. package/dist/src/tasks/nlp/textGenerationStream.d.ts +81 -0
  69. package/dist/src/tasks/nlp/textGenerationStream.d.ts.map +1 -0
  70. package/dist/src/tasks/nlp/tokenClassification.d.ts +51 -0
  71. package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -0
  72. package/dist/src/tasks/nlp/translation.d.ts +19 -0
  73. package/dist/src/tasks/nlp/translation.d.ts.map +1 -0
  74. package/dist/src/tasks/nlp/zeroShotClassification.d.ts +28 -0
  75. package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -0
  76. package/dist/src/tasks/tabular/tabularClassification.d.ts +20 -0
  77. package/dist/src/tasks/tabular/tabularClassification.d.ts.map +1 -0
  78. package/dist/src/tasks/tabular/tabularRegression.d.ts +20 -0
  79. package/dist/src/tasks/tabular/tabularRegression.d.ts.map +1 -0
  80. package/dist/src/types.d.ts +69 -0
  81. package/dist/src/types.d.ts.map +1 -0
  82. package/dist/src/utils/base64FromBytes.d.ts +2 -0
  83. package/dist/src/utils/base64FromBytes.d.ts.map +1 -0
  84. package/dist/src/utils/distributive-omit.d.ts +9 -0
  85. package/dist/src/utils/distributive-omit.d.ts.map +1 -0
  86. package/dist/src/utils/isBackend.d.ts +2 -0
  87. package/dist/src/utils/isBackend.d.ts.map +1 -0
  88. package/dist/src/utils/isFrontend.d.ts +2 -0
  89. package/dist/src/utils/isFrontend.d.ts.map +1 -0
  90. package/dist/src/utils/omit.d.ts +5 -0
  91. package/dist/src/utils/omit.d.ts.map +1 -0
  92. package/dist/src/utils/pick.d.ts +5 -0
  93. package/dist/src/utils/pick.d.ts.map +1 -0
  94. package/dist/src/utils/toArray.d.ts +2 -0
  95. package/dist/src/utils/toArray.d.ts.map +1 -0
  96. package/dist/src/utils/typedInclude.d.ts +2 -0
  97. package/dist/src/utils/typedInclude.d.ts.map +1 -0
  98. package/dist/src/vendor/fetch-event-source/parse.d.ts +69 -0
  99. package/dist/src/vendor/fetch-event-source/parse.d.ts.map +1 -0
  100. package/dist/src/vendor/fetch-event-source/parse.spec.d.ts +2 -0
  101. package/dist/src/vendor/fetch-event-source/parse.spec.d.ts.map +1 -0
  102. package/dist/test/HfInference.spec.d.ts +2 -0
  103. package/dist/test/HfInference.spec.d.ts.map +1 -0
  104. package/dist/test/expect-closeto.d.ts +2 -0
  105. package/dist/test/expect-closeto.d.ts.map +1 -0
  106. package/dist/test/test-files.d.ts +2 -0
  107. package/dist/test/test-files.d.ts.map +1 -0
  108. package/dist/test/vcr.d.ts +2 -0
  109. package/dist/test/vcr.d.ts.map +1 -0
  110. package/package.json +9 -7
  111. package/src/HfInference.ts +4 -4
  112. package/src/lib/makeRequestOptions.ts +17 -7
  113. package/src/tasks/custom/request.ts +5 -0
  114. package/src/tasks/custom/streamingRequest.ts +8 -0
  115. package/src/tasks/cv/imageToImage.ts +1 -1
  116. package/src/tasks/cv/zeroShotImageClassification.ts +1 -1
  117. package/src/tasks/index.ts +2 -0
  118. package/src/tasks/multimodal/documentQuestionAnswering.ts +1 -1
  119. package/src/tasks/multimodal/visualQuestionAnswering.ts +1 -1
  120. package/src/tasks/nlp/chatCompletion.ts +32 -0
  121. package/src/tasks/nlp/chatCompletionStream.ts +17 -0
  122. package/src/tasks/nlp/textGeneration.ts +2 -202
  123. package/src/tasks/nlp/textGenerationStream.ts +2 -1
  124. package/src/types.ts +14 -3
  125. package/src/utils/base64FromBytes.ts +11 -0
  126. package/src/utils/{distributive-omit.d.ts → distributive-omit.ts} +0 -2
  127. package/src/utils/isBackend.ts +6 -0
  128. package/src/utils/isFrontend.ts +3 -0
  129. package/dist/index.d.ts +0 -1536
@@ -0,0 +1,18 @@
1
+ import type { InferenceTask, Options, RequestArgs } from "../types";
2
+ /**
3
+ * Helper that prepares request arguments
4
+ */
5
+ export declare function makeRequestOptions(args: RequestArgs & {
6
+ data?: Blob | ArrayBuffer;
7
+ stream?: boolean;
8
+ }, options?: Options & {
9
+ /** When a model can be used for multiple tasks, and we want to run a non-default task */
10
+ forceTask?: string | InferenceTask;
11
+ /** To load default model if needed */
12
+ taskHint?: InferenceTask;
13
+ chatCompletion?: boolean;
14
+ }): Promise<{
15
+ url: string;
16
+ info: RequestInit;
17
+ }>;
18
+ //# sourceMappingURL=makeRequestOptions.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAYpE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAkG7C"}
@@ -0,0 +1,24 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type AudioClassificationArgs = BaseArgs & {
3
+ /**
4
+ * Binary audio data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface AudioClassificationOutputValue {
9
+ /**
10
+ * The label for the class (model specific)
11
+ */
12
+ label: string;
13
+ /**
14
+ * A float that represents how likely it is that the audio file belongs to this class.
15
+ */
16
+ score: number;
17
+ }
18
+ export type AudioClassificationReturn = AudioClassificationOutputValue[];
19
+ /**
20
+ * This task reads some audio input and outputs the likelihood of classes.
21
+ * Recommended model: superb/hubert-large-superb-er
22
+ */
23
+ export declare function audioClassification(args: AudioClassificationArgs, options?: Options): Promise<AudioClassificationReturn>;
24
+ //# sourceMappingURL=audioClassification.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"audioClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/audioClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG;IAChD;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,yBAAyB,GAAG,8BAA8B,EAAE,CAAC;AAEzE;;;GAGG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAWpC"}
@@ -0,0 +1,28 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type AudioToAudioArgs = BaseArgs & {
3
+ /**
4
+ * Binary audio data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface AudioToAudioOutputValue {
9
+ /**
10
+ * The label for the audio output (model specific)
11
+ */
12
+ label: string;
13
+ /**
14
+ * Base64 encoded audio output.
15
+ */
16
+ blob: string;
17
+ /**
18
+ * Content-type for blob, e.g. audio/flac
19
+ */
20
+ "content-type": string;
21
+ }
22
+ export type AudioToAudioReturn = AudioToAudioOutputValue[];
23
+ /**
24
+ * This task reads some audio input and outputs one or multiple audio files.
25
+ * Example model: speechbrain/sepformer-wham does audio source separation.
26
+ */
27
+ export declare function audioToAudio(args: AudioToAudioArgs, options?: Options): Promise<AudioToAudioReturn>;
28
+ //# sourceMappingURL=audioToAudio.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"audioToAudio.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/audioToAudio.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG;IACzC;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,uBAAuB;IACvC;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IAEb;;OAEG;IACH,cAAc,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,MAAM,kBAAkB,GAAG,uBAAuB,EAAE,CAAC;AAE3D;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAczG"}
@@ -0,0 +1,19 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type AutomaticSpeechRecognitionArgs = BaseArgs & {
3
+ /**
4
+ * Binary audio data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface AutomaticSpeechRecognitionOutput {
9
+ /**
10
+ * The text that was recognized from the audio
11
+ */
12
+ text: string;
13
+ }
14
+ /**
15
+ * This task reads some audio input and outputs the said words within the audio files.
16
+ * Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
17
+ */
18
+ export declare function automaticSpeechRecognition(args: AutomaticSpeechRecognitionArgs, options?: Options): Promise<AutomaticSpeechRecognitionOutput>;
19
+ //# sourceMappingURL=automaticSpeechRecognition.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,8BAA8B,GAAG,QAAQ,GAAG;IACvD;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;CACb;AAED;;;GAGG;AACH,wBAAsB,0BAA0B,CAC/C,IAAI,EAAE,8BAA8B,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,gCAAgC,CAAC,CAU3C"}
@@ -0,0 +1,14 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type TextToSpeechArgs = BaseArgs & {
3
+ /**
4
+ * The text to generate an audio from
5
+ */
6
+ inputs: string;
7
+ };
8
+ export type TextToSpeechOutput = Blob;
9
+ /**
10
+ * This task synthesize an audio of a voice pronouncing a given text.
11
+ * Recommended model: espnet/kan-bayashi_ljspeech_vits
12
+ */
13
+ export declare function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<TextToSpeechOutput>;
14
+ //# sourceMappingURL=textToSpeech.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"textToSpeech.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/textToSpeech.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG;IACzC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,IAAI,CAAC;AAEtC;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAUzG"}
@@ -0,0 +1,13 @@
1
+ import type { InferenceTask, Options, RequestArgs } from "../../types";
2
+ /**
3
+ * Primitive to make custom calls to Inference Endpoints
4
+ */
5
+ export declare function request<T>(args: RequestArgs, options?: Options & {
6
+ /** When a model can be used for multiple tasks, and we want to run a non-default task */
7
+ task?: string | InferenceTask;
8
+ /** To load default model if needed */
9
+ taskHint?: InferenceTask;
10
+ /** Is chat completion compatible */
11
+ chatCompletion?: boolean;
12
+ }): Promise<T>;
13
+ //# sourceMappingURL=request.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;GAEG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CA6BZ"}
@@ -0,0 +1,13 @@
1
+ import type { InferenceTask, Options, RequestArgs } from "../../types";
2
+ /**
3
+ * Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
4
+ */
5
+ export declare function streamingRequest<T>(args: RequestArgs, options?: Options & {
6
+ /** When a model can be used for multiple tasks, and we want to run a non-default task */
7
+ task?: string | InferenceTask;
8
+ /** To load default model if needed */
9
+ taskHint?: InferenceTask;
10
+ /** Is chat completion compatible */
11
+ chatCompletion?: boolean;
12
+ }): AsyncGenerator<T>;
13
+ //# sourceMappingURL=streamingRequest.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAKvE;;GAEG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAuEnB"}
@@ -0,0 +1,24 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type ImageClassificationArgs = BaseArgs & {
3
+ /**
4
+ * Binary image data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface ImageClassificationOutputValue {
9
+ /**
10
+ * The label for the class (model specific)
11
+ */
12
+ label: string;
13
+ /**
14
+ * A float that represents how likely it is that the image file belongs to this class.
15
+ */
16
+ score: number;
17
+ }
18
+ export type ImageClassificationOutput = ImageClassificationOutputValue[];
19
+ /**
20
+ * This task reads some image input and outputs the likelihood of classes.
21
+ * Recommended model: google/vit-base-patch16-224
22
+ */
23
+ export declare function imageClassification(args: ImageClassificationArgs, options?: Options): Promise<ImageClassificationOutput>;
24
+ //# sourceMappingURL=imageClassification.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"imageClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG;IAChD;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,yBAAyB,GAAG,8BAA8B,EAAE,CAAC;AAEzE;;;GAGG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAWpC"}
@@ -0,0 +1,28 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type ImageSegmentationArgs = BaseArgs & {
3
+ /**
4
+ * Binary image data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface ImageSegmentationOutputValue {
9
+ /**
10
+ * The label for the class (model specific) of a segment.
11
+ */
12
+ label: string;
13
+ /**
14
+ * A str (base64 str of a single channel black-and-white img) representing the mask of a segment.
15
+ */
16
+ mask: string;
17
+ /**
18
+ * A float that represents how likely it is that the detected object belongs to the given class.
19
+ */
20
+ score: number;
21
+ }
22
+ export type ImageSegmentationOutput = ImageSegmentationOutputValue[];
23
+ /**
24
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
25
+ * Recommended model: facebook/detr-resnet-50-panoptic
26
+ */
27
+ export declare function imageSegmentation(args: ImageSegmentationArgs, options?: Options): Promise<ImageSegmentationOutput>;
28
+ //# sourceMappingURL=imageSegmentation.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"imageSegmentation.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageSegmentation.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,uBAAuB,GAAG,4BAA4B,EAAE,CAAC;AAErE;;;GAGG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAYlC"}
@@ -0,0 +1,55 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type ImageToImageArgs = BaseArgs & {
3
+ /**
4
+ * The initial image condition
5
+ *
6
+ **/
7
+ inputs: Blob | ArrayBuffer;
8
+ parameters?: {
9
+ /**
10
+ * The text prompt to guide the image generation.
11
+ */
12
+ prompt?: string;
13
+ /**
14
+ * strengh param only works for SD img2img and alt diffusion img2img models
15
+ * Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
16
+ * will be used as a starting point, adding more noise to it the larger the `strength`. The number of
17
+ * denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
18
+ * be maximum and the denoising process will run for the full number of iterations specified in
19
+ * `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
20
+ **/
21
+ strength?: number;
22
+ /**
23
+ * An optional negative prompt for the image generation
24
+ */
25
+ negative_prompt?: string;
26
+ /**
27
+ * The height in pixels of the generated image
28
+ */
29
+ height?: number;
30
+ /**
31
+ * The width in pixels of the generated image
32
+ */
33
+ width?: number;
34
+ /**
35
+ * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.
36
+ */
37
+ num_inference_steps?: number;
38
+ /**
39
+ * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
40
+ */
41
+ guidance_scale?: number;
42
+ /**
43
+ * guess_mode only works for ControlNet models, defaults to False In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
44
+ * you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
45
+ */
46
+ guess_mode?: boolean;
47
+ };
48
+ };
49
+ export type ImageToImageOutput = Blob;
50
+ /**
51
+ * This task reads some text input and outputs an image.
52
+ * Recommended model: lllyasviel/sd-controlnet-depth
53
+ */
54
+ export declare function imageToImage(args: ImageToImageArgs, options?: Options): Promise<ImageToImageOutput>;
55
+ //# sourceMappingURL=imageToImage.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"imageToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToImage.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAe,MAAM,aAAa,CAAC;AAIlE,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG;IACzC;;;QAGI;IACJ,MAAM,EAAE,IAAI,GAAG,WAAW,CAAC;IAE3B,UAAU,CAAC,EAAE;QACZ;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB;;;;;;;YAOI;QACJ,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB;;WAEG;QACH,eAAe,CAAC,EAAE,MAAM,CAAC;QACzB;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;QACf;;WAEG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAC7B;;WAEG;QACH,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB;;;WAGG;QACH,UAAU,CAAC,EAAE,OAAO,CAAC;KACrB,CAAC;CACF,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,IAAI,CAAC;AAEtC;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAyBzG"}
@@ -0,0 +1,18 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type ImageToTextArgs = BaseArgs & {
3
+ /**
4
+ * Binary image data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface ImageToTextOutput {
9
+ /**
10
+ * The generated caption
11
+ */
12
+ generated_text: string;
13
+ }
14
+ /**
15
+ * This task reads some image input and outputs the text caption.
16
+ */
17
+ export declare function imageToText(args: ImageToTextArgs, options?: Options): Promise<ImageToTextOutput>;
18
+ //# sourceMappingURL=imageToText.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"imageToText.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToText.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG;IACxC;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,cAAc,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAatG"}
@@ -0,0 +1,33 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type ObjectDetectionArgs = BaseArgs & {
3
+ /**
4
+ * Binary image data
5
+ */
6
+ data: Blob | ArrayBuffer;
7
+ };
8
+ export interface ObjectDetectionOutputValue {
9
+ /**
10
+ * A dict (with keys [xmin,ymin,xmax,ymax]) representing the bounding box of a detected object.
11
+ */
12
+ box: {
13
+ xmax: number;
14
+ xmin: number;
15
+ ymax: number;
16
+ ymin: number;
17
+ };
18
+ /**
19
+ * The label for the class (model specific) of a detected object.
20
+ */
21
+ label: string;
22
+ /**
23
+ * A float that represents how likely it is that the detected object belongs to the given class.
24
+ */
25
+ score: number;
26
+ }
27
+ export type ObjectDetectionOutput = ObjectDetectionOutputValue[];
28
+ /**
29
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
30
+ * Recommended model: facebook/detr-resnet-50
31
+ */
32
+ export declare function objectDetection(args: ObjectDetectionArgs, options?: Options): Promise<ObjectDetectionOutput>;
33
+ //# sourceMappingURL=objectDetection.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"objectDetection.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/objectDetection.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,mBAAmB,GAAG,QAAQ,GAAG;IAC5C;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,0BAA0B;IAC1C;;OAEG;IACH,GAAG,EAAE;QACJ,IAAI,EAAE,MAAM,CAAC;QACb,IAAI,EAAE,MAAM,CAAC;QACb,IAAI,EAAE,MAAM,CAAC;QACb,IAAI,EAAE,MAAM,CAAC;KACb,CAAC;IACF;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,qBAAqB,GAAG,0BAA0B,EAAE,CAAC;AAEjE;;;GAGG;AACH,wBAAsB,eAAe,CAAC,IAAI,EAAE,mBAAmB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,qBAAqB,CAAC,CAsBlH"}
@@ -0,0 +1,36 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type TextToImageArgs = BaseArgs & {
3
+ /**
4
+ * The text to generate an image from
5
+ */
6
+ inputs: string;
7
+ parameters?: {
8
+ /**
9
+ * An optional negative prompt for the image generation
10
+ */
11
+ negative_prompt?: string;
12
+ /**
13
+ * The height in pixels of the generated image
14
+ */
15
+ height?: number;
16
+ /**
17
+ * The width in pixels of the generated image
18
+ */
19
+ width?: number;
20
+ /**
21
+ * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.
22
+ */
23
+ num_inference_steps?: number;
24
+ /**
25
+ * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
26
+ */
27
+ guidance_scale?: number;
28
+ };
29
+ };
30
+ export type TextToImageOutput = Blob;
31
+ /**
32
+ * This task reads some text input and outputs an image.
33
+ * Recommended model: stabilityai/stable-diffusion-2
34
+ */
35
+ export declare function textToImage(args: TextToImageArgs, options?: Options): Promise<TextToImageOutput>;
36
+ //# sourceMappingURL=textToImage.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"textToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/textToImage.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG;IACxC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IAEf,UAAU,CAAC,EAAE;QACZ;;WAEG;QACH,eAAe,CAAC,EAAE,MAAM,CAAC;QACzB;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;QACf;;WAEG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAC7B;;WAEG;QACH,cAAc,CAAC,EAAE,MAAM,CAAC;KACxB,CAAC;CACF,CAAC;AAEF,MAAM,MAAM,iBAAiB,GAAG,IAAI,CAAC;AAErC;;;GAGG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAUtG"}
@@ -0,0 +1,26 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type ZeroShotImageClassificationArgs = BaseArgs & {
3
+ inputs: {
4
+ /**
5
+ * Binary image data
6
+ */
7
+ image: Blob | ArrayBuffer;
8
+ };
9
+ parameters: {
10
+ /**
11
+ * A list of strings that are potential classes for inputs. (max 10)
12
+ */
13
+ candidate_labels: string[];
14
+ };
15
+ };
16
+ export interface ZeroShotImageClassificationOutputValue {
17
+ label: string;
18
+ score: number;
19
+ }
20
+ export type ZeroShotImageClassificationOutput = ZeroShotImageClassificationOutputValue[];
21
+ /**
22
+ * Classify an image to specified classes.
23
+ * Recommended model: openai/clip-vit-large-patch14-336
24
+ */
25
+ export declare function zeroShotImageClassification(args: ZeroShotImageClassificationArgs, options?: Options): Promise<ZeroShotImageClassificationOutput>;
26
+ //# sourceMappingURL=zeroShotImageClassification.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"zeroShotImageClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/zeroShotImageClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAKrD,MAAM,MAAM,+BAA+B,GAAG,QAAQ,GAAG;IACxD,MAAM,EAAE;QACP;;WAEG;QACH,KAAK,EAAE,IAAI,GAAG,WAAW,CAAC;KAC1B,CAAC;IACF,UAAU,EAAE;QACX;;WAEG;QACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;KAC3B,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,sCAAsC;IACtD,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,iCAAiC,GAAG,sCAAsC,EAAE,CAAC;AAEzF;;;GAGG;AACH,wBAAsB,2BAA2B,CAChD,IAAI,EAAE,+BAA+B,EACrC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,iCAAiC,CAAC,CAsB5C"}
@@ -0,0 +1,32 @@
1
+ export * from "./custom/request";
2
+ export * from "./custom/streamingRequest";
3
+ export * from "./audio/audioClassification";
4
+ export * from "./audio/automaticSpeechRecognition";
5
+ export * from "./audio/textToSpeech";
6
+ export * from "./audio/audioToAudio";
7
+ export * from "./cv/imageClassification";
8
+ export * from "./cv/imageSegmentation";
9
+ export * from "./cv/imageToText";
10
+ export * from "./cv/objectDetection";
11
+ export * from "./cv/textToImage";
12
+ export * from "./cv/imageToImage";
13
+ export * from "./cv/zeroShotImageClassification";
14
+ export * from "./nlp/featureExtraction";
15
+ export * from "./nlp/fillMask";
16
+ export * from "./nlp/questionAnswering";
17
+ export * from "./nlp/sentenceSimilarity";
18
+ export * from "./nlp/summarization";
19
+ export * from "./nlp/tableQuestionAnswering";
20
+ export * from "./nlp/textClassification";
21
+ export * from "./nlp/textGeneration";
22
+ export * from "./nlp/textGenerationStream";
23
+ export * from "./nlp/tokenClassification";
24
+ export * from "./nlp/translation";
25
+ export * from "./nlp/zeroShotClassification";
26
+ export * from "./nlp/chatCompletion";
27
+ export * from "./nlp/chatCompletionStream";
28
+ export * from "./multimodal/documentQuestionAnswering";
29
+ export * from "./multimodal/visualQuestionAnswering";
30
+ export * from "./tabular/tabularRegression";
31
+ export * from "./tabular/tabularClassification";
32
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AACA,cAAc,kBAAkB,CAAC;AACjC,cAAc,2BAA2B,CAAC;AAG1C,cAAc,6BAA6B,CAAC;AAC5C,cAAc,oCAAoC,CAAC;AACnD,cAAc,sBAAsB,CAAC;AACrC,cAAc,sBAAsB,CAAC;AAGrC,cAAc,0BAA0B,CAAC;AACzC,cAAc,wBAAwB,CAAC;AACvC,cAAc,kBAAkB,CAAC;AACjC,cAAc,sBAAsB,CAAC;AACrC,cAAc,kBAAkB,CAAC;AACjC,cAAc,mBAAmB,CAAC;AAClC,cAAc,kCAAkC,CAAC;AAGjD,cAAc,yBAAyB,CAAC;AACxC,cAAc,gBAAgB,CAAC;AAC/B,cAAc,yBAAyB,CAAC;AACxC,cAAc,0BAA0B,CAAC;AACzC,cAAc,qBAAqB,CAAC;AACpC,cAAc,8BAA8B,CAAC;AAC7C,cAAc,0BAA0B,CAAC;AACzC,cAAc,sBAAsB,CAAC;AACrC,cAAc,4BAA4B,CAAC;AAC3C,cAAc,2BAA2B,CAAC;AAC1C,cAAc,mBAAmB,CAAC;AAClC,cAAc,8BAA8B,CAAC;AAC7C,cAAc,sBAAsB,CAAC;AACrC,cAAc,4BAA4B,CAAC;AAG3C,cAAc,wCAAwC,CAAC;AACvD,cAAc,sCAAsC,CAAC;AAGrD,cAAc,6BAA6B,CAAC;AAC5C,cAAc,iCAAiC,CAAC"}
@@ -0,0 +1,35 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type DocumentQuestionAnsweringArgs = BaseArgs & {
3
+ inputs: {
4
+ /**
5
+ * Raw image
6
+ *
7
+ * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()`
8
+ **/
9
+ image: Blob | ArrayBuffer;
10
+ question: string;
11
+ };
12
+ };
13
+ export interface DocumentQuestionAnsweringOutput {
14
+ /**
15
+ * A string that’s the answer within the document.
16
+ */
17
+ answer: string;
18
+ /**
19
+ * ?
20
+ */
21
+ end?: number;
22
+ /**
23
+ * A float that represents how likely that the answer is correct
24
+ */
25
+ score?: number;
26
+ /**
27
+ * ?
28
+ */
29
+ start?: number;
30
+ }
31
+ /**
32
+ * Answers a question on a document image. Recommended model: impira/layoutlm-document-qa.
33
+ */
34
+ export declare function documentQuestionAnswering(args: DocumentQuestionAnsweringArgs, options?: Options): Promise<DocumentQuestionAnsweringOutput>;
35
+ //# sourceMappingURL=documentQuestionAnswering.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"documentQuestionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/multimodal/documentQuestionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAMrD,MAAM,MAAM,6BAA6B,GAAG,QAAQ,GAAG;IACtD,MAAM,EAAE;QACP;;;;YAII;QACJ,KAAK,EAAE,IAAI,GAAG,WAAW,CAAC;QAC1B,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,+BAA+B;IAC/C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,GAAG,CAAC,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;CACf;AAED;;GAEG;AACH,wBAAsB,yBAAyB,CAC9C,IAAI,EAAE,6BAA6B,EACnC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,+BAA+B,CAAC,CA4B1C"}
@@ -0,0 +1,27 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type VisualQuestionAnsweringArgs = BaseArgs & {
3
+ inputs: {
4
+ /**
5
+ * Raw image
6
+ *
7
+ * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()`
8
+ **/
9
+ image: Blob | ArrayBuffer;
10
+ question: string;
11
+ };
12
+ };
13
+ export interface VisualQuestionAnsweringOutput {
14
+ /**
15
+ * A string that’s the answer to a visual question.
16
+ */
17
+ answer: string;
18
+ /**
19
+ * Answer correctness score.
20
+ */
21
+ score: number;
22
+ }
23
+ /**
24
+ * Answers a question on an image. Recommended model: dandelin/vilt-b32-finetuned-vqa.
25
+ */
26
+ export declare function visualQuestionAnswering(args: VisualQuestionAnsweringArgs, options?: Options): Promise<VisualQuestionAnsweringOutput>;
27
+ //# sourceMappingURL=visualQuestionAnswering.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"visualQuestionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/multimodal/visualQuestionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAe,MAAM,aAAa,CAAC;AAIlE,MAAM,MAAM,2BAA2B,GAAG,QAAQ,GAAG;IACpD,MAAM,EAAE;QACP;;;;YAII;QACJ,KAAK,EAAE,IAAI,GAAG,WAAW,CAAC;QAC1B,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED;;GAEG;AACH,wBAAsB,uBAAuB,CAC5C,IAAI,EAAE,2BAA2B,EACjC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,6BAA6B,CAAC,CAwBxC"}
@@ -0,0 +1,7 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ import type { ChatCompletionInput, ChatCompletionOutput } from "@huggingface/tasks";
3
+ /**
4
+ * Use the chat completion endpoint to generate a response to a prompt, using OpenAI message completion API no stream
5
+ */
6
+ export declare function chatCompletion(args: BaseArgs & ChatCompletionInput, options?: Options): Promise<ChatCompletionOutput>;
7
+ //# sourceMappingURL=chatCompletion.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"chatCompletion.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletion.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF;;GAEG;AAEH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAmB/B"}
@@ -0,0 +1,7 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ import type { ChatCompletionInput, ChatCompletionStreamOutput } from "@huggingface/tasks";
3
+ /**
4
+ * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
5
+ */
6
+ export declare function chatCompletionStream(args: BaseArgs & ChatCompletionInput, options?: Options): AsyncGenerator<ChatCompletionStreamOutput>;
7
+ //# sourceMappingURL=chatCompletionStream.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"chatCompletionStream.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletionStream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,0BAA0B,EAAE,MAAM,oBAAoB,CAAC;AAE1F;;GAEG;AACH,wBAAuB,oBAAoB,CAC1C,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,cAAc,CAAC,0BAA0B,CAAC,CAM5C"}
@@ -0,0 +1,19 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type FeatureExtractionArgs = BaseArgs & {
3
+ /**
4
+ * The inputs is a string or a list of strings to get the features from.
5
+ *
6
+ * inputs: "That is a happy person",
7
+ *
8
+ */
9
+ inputs: string | string[];
10
+ };
11
+ /**
12
+ * Returned values are a multidimensional array of floats (dimension depending on if you sent a string or a list of string, and if the automatic reduction, usually mean_pooling for instance was applied for you or not. This should be explained on the model's README).
13
+ */
14
+ export type FeatureExtractionOutput = (number | number[] | number[][])[];
15
+ /**
16
+ * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
17
+ */
18
+ export declare function featureExtraction(args: FeatureExtractionArgs, options?: Options): Promise<FeatureExtractionOutput>;
19
+ //# sourceMappingURL=featureExtraction.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"featureExtraction.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/featureExtraction.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C;;;;;OAKG;IACH,MAAM,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC1B,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,uBAAuB,GAAG,CAAC,MAAM,GAAG,MAAM,EAAE,GAAG,MAAM,EAAE,EAAE,CAAC,EAAE,CAAC;AAEzE;;GAEG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAyBlC"}
@@ -0,0 +1,27 @@
1
+ import type { BaseArgs, Options } from "../../types";
2
+ export type FillMaskArgs = BaseArgs & {
3
+ inputs: string;
4
+ };
5
+ export type FillMaskOutput = {
6
+ /**
7
+ * The probability for this token.
8
+ */
9
+ score: number;
10
+ /**
11
+ * The actual sequence of tokens that ran against the model (may contain special tokens)
12
+ */
13
+ sequence: string;
14
+ /**
15
+ * The id of the token
16
+ */
17
+ token: number;
18
+ /**
19
+ * The string representation of the token
20
+ */
21
+ token_str: string;
22
+ }[];
23
+ /**
24
+ * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models.
25
+ */
26
+ export declare function fillMask(args: FillMaskArgs, options?: Options): Promise<FillMaskOutput>;
27
+ //# sourceMappingURL=fillMask.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"fillMask.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/fillMask.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,YAAY,GAAG,QAAQ,GAAG;IACrC,MAAM,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC5B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;CAClB,EAAE,CAAC;AAEJ;;GAEG;AACH,wBAAsB,QAAQ,CAAC,IAAI,EAAE,YAAY,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,cAAc,CAAC,CAoB7F"}