@huggingface/inference 1.8.0 → 2.0.0-rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +57 -8
  2. package/dist/index.js +440 -354
  3. package/dist/index.mjs +423 -353
  4. package/package.json +7 -9
  5. package/src/HfInference.ts +43 -1112
  6. package/src/index.ts +3 -1
  7. package/src/lib/InferenceOutputError.ts +8 -0
  8. package/src/lib/makeRequestOptions.ts +55 -0
  9. package/src/tasks/audio/audioClassification.ts +41 -0
  10. package/src/tasks/audio/automaticSpeechRecognition.ts +33 -0
  11. package/src/tasks/custom/request.ts +39 -0
  12. package/src/tasks/custom/streamingRequest.ts +76 -0
  13. package/src/tasks/cv/imageClassification.ts +40 -0
  14. package/src/tasks/cv/imageSegmentation.ts +45 -0
  15. package/src/tasks/cv/imageToText.ts +30 -0
  16. package/src/tasks/cv/objectDetection.ts +58 -0
  17. package/src/tasks/cv/textToImage.ts +48 -0
  18. package/src/tasks/index.ts +29 -0
  19. package/src/tasks/nlp/conversational.ts +81 -0
  20. package/src/tasks/nlp/featureExtraction.ts +51 -0
  21. package/src/tasks/nlp/fillMask.ts +48 -0
  22. package/src/tasks/nlp/questionAnswering.ts +48 -0
  23. package/src/tasks/nlp/sentenceSimilarity.ts +36 -0
  24. package/src/tasks/nlp/summarization.ts +59 -0
  25. package/src/tasks/nlp/tableQuestionAnswering.ts +58 -0
  26. package/src/tasks/nlp/textClassification.ts +37 -0
  27. package/src/tasks/nlp/textGeneration.ts +67 -0
  28. package/src/tasks/nlp/textGenerationStream.ts +92 -0
  29. package/src/tasks/nlp/tokenClassification.ts +78 -0
  30. package/src/tasks/nlp/translation.ts +29 -0
  31. package/src/tasks/nlp/zeroShotClassification.ts +55 -0
  32. package/src/types.ts +42 -0
  33. package/src/utils/distributive-omit.d.ts +15 -0
  34. package/dist/index.d.ts +0 -677
package/README.md CHANGED
@@ -1,8 +1,10 @@
1
1
  # 🤗 Hugging Face Inference API
2
2
 
3
- A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index).
3
+ A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index). It also works with [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index).
4
4
 
5
- Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README) or try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference).
5
+ Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README).
6
+
7
+ You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference) or see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs).
6
8
 
7
9
  ## Install
8
10
 
@@ -16,16 +18,16 @@ pnpm add @huggingface/inference
16
18
 
17
19
  ## Usage
18
20
 
19
- ❗**Important note:** Using an API key is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your API key for **free**.
21
+ ❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
20
22
 
21
- Your API key should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the API key.
23
+ Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token.
22
24
 
23
25
  ### Basic examples
24
26
 
25
27
  ```typescript
26
28
  import { HfInference } from '@huggingface/inference'
27
29
 
28
- const hf = new HfInference('your api key')
30
+ const hf = new HfInference('your access token')
29
31
 
30
32
  // Natural Language
31
33
 
@@ -43,7 +45,7 @@ await hf.summarization({
43
45
  }
44
46
  })
45
47
 
46
- await hf.questionAnswer({
48
+ await hf.questionAnswering({
47
49
  model: 'deepset/roberta-base-squad2',
48
50
  inputs: {
49
51
  question: 'What is the capital of France?',
@@ -51,7 +53,7 @@ await hf.questionAnswer({
51
53
  }
52
54
  })
53
55
 
54
- await hf.tableQuestionAnswer({
56
+ await hf.tableQuestionAnswering({
55
57
  model: 'google/tapas-base-finetuned-wtq',
56
58
  inputs: {
57
59
  query: 'How many stars does the transformers repository have?',
@@ -109,7 +111,7 @@ await hf.conversational({
109
111
  }
110
112
  })
111
113
 
112
- await hf.featureExtraction({
114
+ await hf.sentenceSimilarity({
113
115
  model: 'sentence-transformers/paraphrase-xlm-r-multilingual-v1',
114
116
  inputs: {
115
117
  source_sentence: 'That is a happy person',
@@ -121,6 +123,11 @@ await hf.featureExtraction({
121
123
  }
122
124
  })
123
125
 
126
+ await hf.featureExtraction({
127
+ model: "sentence-transformers/distilbert-base-nli-mean-tokens",
128
+ inputs: "That is a happy person",
129
+ });
130
+
124
131
  // Audio
125
132
 
126
133
  await hf.automaticSpeechRecognition({
@@ -162,6 +169,30 @@ await hf.imageToText({
162
169
  data: readFileSync('test/cats.png'),
163
170
  model: 'nlpconnect/vit-gpt2-image-captioning'
164
171
  })
172
+
173
+ // Custom call, for models with custom parameters / outputs
174
+ await hf.request({
175
+ model: 'my-custom-model',
176
+ inputs: 'hello world',
177
+ parameters: {
178
+ custom_param: 'some magic',
179
+ }
180
+ })
181
+
182
+ // Custom streaming call, for models with custom parameters / outputs
183
+ for await (const output of hf.streamingRequest({
184
+ model: 'my-custom-model',
185
+ inputs: 'hello world',
186
+ parameters: {
187
+ custom_param: 'some magic',
188
+ }
189
+ })) {
190
+ ...
191
+ }
192
+
193
+ // Using your own inference endpoint: https://hf.co/docs/inference-endpoints/
194
+ const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
195
+ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'});
165
196
  ```
166
197
 
167
198
  ## Supported Tasks
@@ -181,6 +212,7 @@ await hf.imageToText({
181
212
  - [x] Zero-shot classification
182
213
  - [x] Conversational
183
214
  - [x] Feature extraction
215
+ - [x] Sentence Similarity
184
216
 
185
217
  ### Audio
186
218
 
@@ -195,6 +227,23 @@ await hf.imageToText({
195
227
  - [x] Text to image
196
228
  - [x] Image to text
197
229
 
230
+ ## Tree-shaking
231
+
232
+ You can import the functions you need directly from the module, rather than using the `HfInference` class:
233
+
234
+ ```ts
235
+ import {textGeneration} from "@huggingface/inference";
236
+
237
+ await textGeneration({
238
+ accessToken: "hf_...",
239
+ model: "model_or_endpoint",
240
+ inputs: ...,
241
+ parameters: ...
242
+ })
243
+ ```
244
+
245
+ This will enable tree-shaking by your bundler.
246
+
198
247
  ## Running tests
199
248
 
200
249
  ```console