@huggingface/inference 1.7.1 → 2.0.0-rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +65 -10
- package/dist/index.js +442 -341
- package/dist/index.mjs +424 -340
- package/package.json +10 -13
- package/src/HfInference.ts +42 -1062
- package/src/index.ts +3 -1
- package/src/lib/InferenceOutputError.ts +8 -0
- package/src/lib/makeRequestOptions.ts +55 -0
- package/src/tasks/audio/audioClassification.ts +41 -0
- package/src/tasks/audio/automaticSpeechRecognition.ts +33 -0
- package/src/tasks/custom/request.ts +39 -0
- package/src/tasks/custom/streamingRequest.ts +76 -0
- package/src/tasks/cv/imageClassification.ts +40 -0
- package/src/tasks/cv/imageSegmentation.ts +45 -0
- package/src/tasks/cv/imageToText.ts +30 -0
- package/src/tasks/cv/objectDetection.ts +58 -0
- package/src/tasks/cv/textToImage.ts +48 -0
- package/src/tasks/index.ts +29 -0
- package/src/tasks/nlp/conversational.ts +81 -0
- package/src/tasks/nlp/featureExtraction.ts +51 -0
- package/src/tasks/nlp/fillMask.ts +48 -0
- package/src/tasks/nlp/questionAnswering.ts +48 -0
- package/src/tasks/nlp/sentenceSimilarity.ts +36 -0
- package/src/tasks/nlp/summarization.ts +59 -0
- package/src/tasks/nlp/tableQuestionAnswering.ts +58 -0
- package/src/tasks/nlp/textClassification.ts +37 -0
- package/src/tasks/nlp/textGeneration.ts +67 -0
- package/src/tasks/nlp/textGenerationStream.ts +92 -0
- package/src/tasks/nlp/tokenClassification.ts +78 -0
- package/src/tasks/nlp/translation.ts +29 -0
- package/src/tasks/nlp/zeroShotClassification.ts +55 -0
- package/src/types.ts +42 -0
- package/src/utils/omit.ts +11 -0
- package/src/utils/pick.ts +16 -0
- package/src/utils/typedInclude.ts +3 -0
- package/dist/index.d.ts +0 -643
- package/src/utils/env-predicates.ts +0 -7
- /package/src/utils/{to-array.ts → toArray.ts} +0 -0
package/README.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
# 🤗 Hugging Face Inference API
|
|
2
2
|
|
|
3
|
-
A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index).
|
|
4
|
-
|
|
5
|
-
Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README) or try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference).
|
|
3
|
+
A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index). It also works with [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index).
|
|
6
4
|
|
|
5
|
+
Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README).
|
|
7
6
|
|
|
7
|
+
You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference) or see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs).
|
|
8
8
|
|
|
9
9
|
## Install
|
|
10
10
|
|
|
@@ -18,16 +18,16 @@ pnpm add @huggingface/inference
|
|
|
18
18
|
|
|
19
19
|
## Usage
|
|
20
20
|
|
|
21
|
-
❗**Important note:** Using an
|
|
21
|
+
❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
|
|
22
22
|
|
|
23
|
-
Your
|
|
23
|
+
Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token.
|
|
24
24
|
|
|
25
25
|
### Basic examples
|
|
26
26
|
|
|
27
27
|
```typescript
|
|
28
28
|
import { HfInference } from '@huggingface/inference'
|
|
29
29
|
|
|
30
|
-
const hf = new HfInference('your
|
|
30
|
+
const hf = new HfInference('your access token')
|
|
31
31
|
|
|
32
32
|
// Natural Language
|
|
33
33
|
|
|
@@ -45,7 +45,7 @@ await hf.summarization({
|
|
|
45
45
|
}
|
|
46
46
|
})
|
|
47
47
|
|
|
48
|
-
await hf.
|
|
48
|
+
await hf.questionAnswering({
|
|
49
49
|
model: 'deepset/roberta-base-squad2',
|
|
50
50
|
inputs: {
|
|
51
51
|
question: 'What is the capital of France?',
|
|
@@ -53,7 +53,7 @@ await hf.questionAnswer({
|
|
|
53
53
|
}
|
|
54
54
|
})
|
|
55
55
|
|
|
56
|
-
await hf.
|
|
56
|
+
await hf.tableQuestionAnswering({
|
|
57
57
|
model: 'google/tapas-base-finetuned-wtq',
|
|
58
58
|
inputs: {
|
|
59
59
|
query: 'How many stars does the transformers repository have?',
|
|
@@ -111,7 +111,7 @@ await hf.conversational({
|
|
|
111
111
|
}
|
|
112
112
|
})
|
|
113
113
|
|
|
114
|
-
await hf.
|
|
114
|
+
await hf.sentenceSimilarity({
|
|
115
115
|
model: 'sentence-transformers/paraphrase-xlm-r-multilingual-v1',
|
|
116
116
|
inputs: {
|
|
117
117
|
source_sentence: 'That is a happy person',
|
|
@@ -123,6 +123,11 @@ await hf.featureExtraction({
|
|
|
123
123
|
}
|
|
124
124
|
})
|
|
125
125
|
|
|
126
|
+
await hf.featureExtraction({
|
|
127
|
+
model: "sentence-transformers/distilbert-base-nli-mean-tokens",
|
|
128
|
+
inputs: "That is a happy person",
|
|
129
|
+
});
|
|
130
|
+
|
|
126
131
|
// Audio
|
|
127
132
|
|
|
128
133
|
await hf.automaticSpeechRecognition({
|
|
@@ -154,9 +159,40 @@ await hf.imageSegmentation({
|
|
|
154
159
|
|
|
155
160
|
await hf.textToImage({
|
|
156
161
|
inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]',
|
|
157
|
-
negative_prompt: 'blurry',
|
|
158
162
|
model: 'stabilityai/stable-diffusion-2',
|
|
163
|
+
parameters: {
|
|
164
|
+
negative_prompt: 'blurry',
|
|
165
|
+
}
|
|
166
|
+
})
|
|
167
|
+
|
|
168
|
+
await hf.imageToText({
|
|
169
|
+
data: readFileSync('test/cats.png'),
|
|
170
|
+
model: 'nlpconnect/vit-gpt2-image-captioning'
|
|
171
|
+
})
|
|
172
|
+
|
|
173
|
+
// Custom call, for models with custom parameters / outputs
|
|
174
|
+
await hf.request({
|
|
175
|
+
model: 'my-custom-model',
|
|
176
|
+
inputs: 'hello world',
|
|
177
|
+
parameters: {
|
|
178
|
+
custom_param: 'some magic',
|
|
179
|
+
}
|
|
159
180
|
})
|
|
181
|
+
|
|
182
|
+
// Custom streaming call, for models with custom parameters / outputs
|
|
183
|
+
for await (const output of hf.streamingRequest({
|
|
184
|
+
model: 'my-custom-model',
|
|
185
|
+
inputs: 'hello world',
|
|
186
|
+
parameters: {
|
|
187
|
+
custom_param: 'some magic',
|
|
188
|
+
}
|
|
189
|
+
})) {
|
|
190
|
+
...
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// Using your own inference endpoint: https://hf.co/docs/inference-endpoints/
|
|
194
|
+
const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
|
|
195
|
+
const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'});
|
|
160
196
|
```
|
|
161
197
|
|
|
162
198
|
## Supported Tasks
|
|
@@ -176,6 +212,7 @@ await hf.textToImage({
|
|
|
176
212
|
- [x] Zero-shot classification
|
|
177
213
|
- [x] Conversational
|
|
178
214
|
- [x] Feature extraction
|
|
215
|
+
- [x] Sentence Similarity
|
|
179
216
|
|
|
180
217
|
### Audio
|
|
181
218
|
|
|
@@ -188,6 +225,24 @@ await hf.textToImage({
|
|
|
188
225
|
- [x] Object detection
|
|
189
226
|
- [x] Image segmentation
|
|
190
227
|
- [x] Text to image
|
|
228
|
+
- [x] Image to text
|
|
229
|
+
|
|
230
|
+
## Tree-shaking
|
|
231
|
+
|
|
232
|
+
You can import the functions you need directly from the module, rather than using the `HfInference` class:
|
|
233
|
+
|
|
234
|
+
```ts
|
|
235
|
+
import {textGeneration} from "@huggingface/inference";
|
|
236
|
+
|
|
237
|
+
await textGeneration({
|
|
238
|
+
accessToken: "hf_...",
|
|
239
|
+
model: "model_or_endpoint",
|
|
240
|
+
inputs: ...,
|
|
241
|
+
parameters: ...
|
|
242
|
+
})
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
This will enable tree-shaking by your bundler.
|
|
191
246
|
|
|
192
247
|
## Running tests
|
|
193
248
|
|