@huggingface/inference 1.5.2 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,7 +18,9 @@ pnpm add @huggingface/inference
18
18
 
19
19
  ## Usage
20
20
 
21
- ❗**Important note:** Using an API key is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your API key. Note that this API key should be kept private and used exclusively for experimental purposes on the web. If you need to protect it, we suggest setting up a proxy server since we currently do not provide an OAuth solution.
21
+ ❗**Important note:** Using an API key is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your API key for **free**.
22
+
23
+ Your API key should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the API key.
22
24
 
23
25
  ### Basic examples
24
26
 
@@ -149,7 +151,7 @@ await hf.textToImage({
149
151
  })
150
152
  ```
151
153
 
152
- ## Supported APIs
154
+ ## Supported Tasks
153
155
 
154
156
  ### Natural Language Processing
155
157
 
@@ -184,3 +186,9 @@ await hf.textToImage({
184
186
  ```console
185
187
  HF_ACCESS_TOKEN="your access token" npm run test
186
188
  ```
189
+
190
+ ## Finding appropriate models
191
+
192
+ We have an informative documentation project called [Tasks](https://huggingface.co/tasks) to list available models for each task and explain how each task works in detail.
193
+
194
+ It also contain demos, example outputs and other resources should you want to dig more into the ML-side of things.
package/dist/index.d.ts CHANGED
@@ -453,7 +453,7 @@ type TextToImageArgs = Args & {
453
453
  */
454
454
  negative_prompt?: string;
455
455
  };
456
- type TextToImageReturn = ArrayBuffer;
456
+ type TextToImageReturn = Blob;
457
457
  declare class HfInference {
458
458
  private readonly apiKey;
459
459
  private readonly defaultOptions;
package/dist/index.js CHANGED
@@ -185,17 +185,15 @@ var HfInference = class {
185
185
  wait_for_model: true
186
186
  });
187
187
  }
188
- let output;
189
188
  if (options?.blob) {
190
189
  if (!response.ok) {
191
190
  throw new Error("An error occurred while fetching the blob");
192
191
  }
193
- return await response.arrayBuffer();
194
- } else {
195
- output = await response.json();
196
- if (output.error) {
197
- throw new Error(output.error);
198
- }
192
+ return await response.blob();
193
+ }
194
+ const output = await response.json();
195
+ if (output.error) {
196
+ throw new Error(output.error);
199
197
  }
200
198
  return output;
201
199
  }
package/dist/index.mjs CHANGED
@@ -160,17 +160,15 @@ var HfInference = class {
160
160
  wait_for_model: true
161
161
  });
162
162
  }
163
- let output;
164
163
  if (options?.blob) {
165
164
  if (!response.ok) {
166
165
  throw new Error("An error occurred while fetching the blob");
167
166
  }
168
- return await response.arrayBuffer();
169
- } else {
170
- output = await response.json();
171
- if (output.error) {
172
- throw new Error(output.error);
173
- }
167
+ return await response.blob();
168
+ }
169
+ const output = await response.json();
170
+ if (output.error) {
171
+ throw new Error(output.error);
174
172
  }
175
173
  return output;
176
174
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "1.5.2",
3
+ "version": "1.6.0",
4
4
  "license": "MIT",
5
5
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
6
6
  "description": "Typescript wrapper for the Hugging Face Inference API",
@@ -498,7 +498,7 @@ export type TextToImageArgs = Args & {
498
498
  negative_prompt?: string;
499
499
  };
500
500
 
501
- export type TextToImageReturn = ArrayBuffer;
501
+ export type TextToImageReturn = Blob;
502
502
 
503
503
  export class HfInference {
504
504
  private readonly apiKey: string;
@@ -712,20 +712,17 @@ export class HfInference {
712
712
  });
713
713
  }
714
714
 
715
- let output;
716
-
717
715
  if (options?.blob) {
718
716
  if (!response.ok) {
719
717
  throw new Error("An error occurred while fetching the blob");
720
718
  }
721
- return await response.arrayBuffer();
722
- } else {
723
- output = await response.json();
724
- if (output.error) {
725
- throw new Error(output.error);
726
- }
719
+ return await response.blob();
727
720
  }
728
721
 
722
+ const output = await response.json();
723
+ if (output.error) {
724
+ throw new Error(output.error);
725
+ }
729
726
  return output;
730
727
  }
731
728