@huggingface/inference 2.6.1 → 2.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -24,6 +24,11 @@ export interface Options {
24
24
  * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
25
25
  */
26
26
  fetch?: typeof fetch;
27
+
28
+ /**
29
+ * (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
30
+ */
31
+ includeCredentials?: string | boolean;
27
32
  }
28
33
 
29
34
  export type InferenceTask =
@@ -174,8 +179,6 @@ export function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise
174
179
  export function request<T>(
175
180
  args: RequestArgs,
176
181
  options?: Options & {
177
- /** For internal HF use, which is why it's not exposed in {@link Options} */
178
- includeCredentials?: boolean;
179
182
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
180
183
  task?: string | InferenceTask;
181
184
  /** To load default model if needed */
@@ -188,8 +191,6 @@ export function request<T>(
188
191
  export function streamingRequest<T>(
189
192
  args: RequestArgs,
190
193
  options?: Options & {
191
- /** For internal HF use, which is why it's not exposed in {@link Options} */
192
- includeCredentials?: boolean;
193
194
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
194
195
  task?: string | InferenceTask;
195
196
  /** To load default model if needed */
@@ -1054,8 +1055,6 @@ export class HfInference {
1054
1055
  request<T>(
1055
1056
  args: Omit<RequestArgs, 'accessToken'>,
1056
1057
  options?: Options & {
1057
- /** For internal HF use, which is why it's not exposed in {@link Options} */
1058
- includeCredentials?: boolean;
1059
1058
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
1060
1059
  task?: string | InferenceTask;
1061
1060
  /** To load default model if needed */
@@ -1068,8 +1067,6 @@ export class HfInference {
1068
1067
  streamingRequest<T>(
1069
1068
  args: Omit<RequestArgs, 'accessToken'>,
1070
1069
  options?: Options & {
1071
- /** For internal HF use, which is why it's not exposed in {@link Options} */
1072
- includeCredentials?: boolean;
1073
1070
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
1074
1071
  task?: string | InferenceTask;
1075
1072
  /** To load default model if needed */
@@ -1263,8 +1260,6 @@ export class HfInferenceEndpoint {
1263
1260
  request<T>(
1264
1261
  args: Omit<RequestArgs, 'accessToken' | 'model'>,
1265
1262
  options?: Options & {
1266
- /** For internal HF use, which is why it's not exposed in {@link Options} */
1267
- includeCredentials?: boolean;
1268
1263
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
1269
1264
  task?: string | InferenceTask;
1270
1265
  /** To load default model if needed */
@@ -1277,8 +1272,6 @@ export class HfInferenceEndpoint {
1277
1272
  streamingRequest<T>(
1278
1273
  args: Omit<RequestArgs, 'accessToken' | 'model'>,
1279
1274
  options?: Options & {
1280
- /** For internal HF use, which is why it's not exposed in {@link Options} */
1281
- includeCredentials?: boolean;
1282
1275
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
1283
1276
  task?: string | InferenceTask;
1284
1277
  /** To load default model if needed */
package/dist/index.js CHANGED
@@ -177,6 +177,14 @@ async function makeRequestOptions(args, options) {
177
177
  }
178
178
  return `${HF_INFERENCE_API_BASE_URL}/models/${model}`;
179
179
  })();
180
+ let credentials;
181
+ if (typeof includeCredentials === "string") {
182
+ credentials = includeCredentials;
183
+ } else if (typeof includeCredentials === "boolean") {
184
+ credentials = includeCredentials ? "include" : void 0;
185
+ } else if (includeCredentials === void 0) {
186
+ credentials = "same-origin";
187
+ }
180
188
  const info = {
181
189
  headers,
182
190
  method: "POST",
@@ -184,7 +192,7 @@ async function makeRequestOptions(args, options) {
184
192
  ...otherArgs,
185
193
  options: options && otherOptions
186
194
  }),
187
- credentials: includeCredentials ? "include" : "same-origin"
195
+ credentials
188
196
  };
189
197
  return { url, info };
190
198
  }
@@ -332,7 +340,7 @@ async function* streamingRequest(args, options) {
332
340
  }
333
341
  throw new Error(`Server response contains error: ${response.status}`);
334
342
  }
335
- if (response.headers.get("content-type") !== "text/event-stream") {
343
+ if (!response.headers.get("content-type")?.startsWith("text/event-stream")) {
336
344
  throw new Error(
337
345
  `Server does not support event stream content type, it returned ` + response.headers.get("content-type")
338
346
  );
package/dist/index.mjs CHANGED
@@ -125,6 +125,14 @@ async function makeRequestOptions(args, options) {
125
125
  }
126
126
  return `${HF_INFERENCE_API_BASE_URL}/models/${model}`;
127
127
  })();
128
+ let credentials;
129
+ if (typeof includeCredentials === "string") {
130
+ credentials = includeCredentials;
131
+ } else if (typeof includeCredentials === "boolean") {
132
+ credentials = includeCredentials ? "include" : void 0;
133
+ } else if (includeCredentials === void 0) {
134
+ credentials = "same-origin";
135
+ }
128
136
  const info = {
129
137
  headers,
130
138
  method: "POST",
@@ -132,7 +140,7 @@ async function makeRequestOptions(args, options) {
132
140
  ...otherArgs,
133
141
  options: options && otherOptions
134
142
  }),
135
- credentials: includeCredentials ? "include" : "same-origin"
143
+ credentials
136
144
  };
137
145
  return { url, info };
138
146
  }
@@ -280,7 +288,7 @@ async function* streamingRequest(args, options) {
280
288
  }
281
289
  throw new Error(`Server response contains error: ${response.status}`);
282
290
  }
283
- if (response.headers.get("content-type") !== "text/event-stream") {
291
+ if (!response.headers.get("content-type")?.startsWith("text/event-stream")) {
284
292
  throw new Error(
285
293
  `Server does not support event stream content type, it returned ` + response.headers.get("content-type")
286
294
  );
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "2.6.1",
3
+ "version": "2.6.2",
4
4
  "packageManager": "pnpm@8.3.1",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -18,8 +18,6 @@ export async function makeRequestOptions(
18
18
  stream?: boolean;
19
19
  },
20
20
  options?: Options & {
21
- /** For internal HF use, which is why it's not exposed in {@link Options} */
22
- includeCredentials?: boolean;
23
21
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
24
22
  forceTask?: string | InferenceTask;
25
23
  /** To load default model if needed */
@@ -83,6 +81,21 @@ export async function makeRequestOptions(
83
81
  return `${HF_INFERENCE_API_BASE_URL}/models/${model}`;
84
82
  })();
85
83
 
84
+ // Let users configure credentials, or disable them all together (or keep default behavior).
85
+ // ---
86
+ // This used to be an internal property only and never exposed to users. This means that most usages will never define this value
87
+ // So in order to make this backwards compatible, if it's undefined we go to "same-origin" (default behaviour before).
88
+ // If it's a boolean and set to true then set to "include". If false, don't define credentials at all (useful for edge runtimes)
89
+ // Then finally, if it's a string, use it as-is.
90
+ let credentials: RequestCredentials | undefined;
91
+ if (typeof includeCredentials === "string") {
92
+ credentials = includeCredentials as RequestCredentials;
93
+ } else if (typeof includeCredentials === "boolean") {
94
+ credentials = includeCredentials ? "include" : undefined;
95
+ } else if (includeCredentials === undefined) {
96
+ credentials = "same-origin";
97
+ }
98
+
86
99
  const info: RequestInit = {
87
100
  headers,
88
101
  method: "POST",
@@ -92,7 +105,7 @@ export async function makeRequestOptions(
92
105
  ...otherArgs,
93
106
  options: options && otherOptions,
94
107
  }),
95
- credentials: includeCredentials ? "include" : "same-origin",
108
+ credentials,
96
109
  };
97
110
 
98
111
  return { url, info };
@@ -7,8 +7,6 @@ import { makeRequestOptions } from "../../lib/makeRequestOptions";
7
7
  export async function request<T>(
8
8
  args: RequestArgs,
9
9
  options?: Options & {
10
- /** For internal HF use, which is why it's not exposed in {@link Options} */
11
- includeCredentials?: boolean;
12
10
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
13
11
  task?: string | InferenceTask;
14
12
  /** To load default model if needed */
@@ -9,8 +9,6 @@ import { getLines, getMessages } from "../../vendor/fetch-event-source/parse";
9
9
  export async function* streamingRequest<T>(
10
10
  args: RequestArgs,
11
11
  options?: Options & {
12
- /** For internal HF use, which is why it's not exposed in {@link Options} */
13
- includeCredentials?: boolean;
14
12
  /** When a model can be used for multiple tasks, and we want to run a non-default task */
15
13
  task?: string | InferenceTask;
16
14
  /** To load default model if needed */
@@ -36,7 +34,7 @@ export async function* streamingRequest<T>(
36
34
 
37
35
  throw new Error(`Server response contains error: ${response.status}`);
38
36
  }
39
- if (response.headers.get("content-type") !== "text/event-stream") {
37
+ if (!response.headers.get("content-type")?.startsWith("text/event-stream")) {
40
38
  throw new Error(
41
39
  `Server does not support event stream content type, it returned ` + response.headers.get("content-type")
42
40
  );
package/src/types.ts CHANGED
@@ -24,6 +24,11 @@ export interface Options {
24
24
  * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
25
25
  */
26
26
  fetch?: typeof fetch;
27
+
28
+ /**
29
+ * (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
30
+ */
31
+ includeCredentials?: string | boolean;
27
32
  }
28
33
 
29
34
  export type InferenceTask =