@huggingface/tasks 0.13.1-test2 → 0.13.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,9 +47,9 @@ const snippetTextGeneration = (model, accessToken, opts) => {
47
47
  return [
48
48
  {
49
49
  client: "huggingface.js",
50
- content: `import { HfInference } from "@huggingface/inference"
50
+ content: `import { HfInference } from "@huggingface/inference";
51
51
 
52
- const client = new HfInference("${accessToken || `{API_TOKEN}`}")
52
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
53
53
 
54
54
  let out = "";
55
55
 
@@ -69,12 +69,12 @@ for await (const chunk of stream) {
69
69
  },
70
70
  {
71
71
  client: "openai",
72
- content: `import { OpenAI } from "openai"
72
+ content: `import { OpenAI } from "openai";
73
73
 
74
74
  const client = new OpenAI({
75
75
  baseURL: "https://api-inference.huggingface.co/v1/",
76
76
  apiKey: "${accessToken || `{API_TOKEN}`}"
77
- })
77
+ });
78
78
 
79
79
  let out = "";
80
80
 
@@ -99,9 +99,9 @@ for await (const chunk of stream) {
99
99
  return [
100
100
  {
101
101
  client: "huggingface.js",
102
- content: `import { HfInference } from "@huggingface/inference"
102
+ content: `import { HfInference } from "@huggingface/inference";
103
103
 
104
- const client = new HfInference("${accessToken || `{API_TOKEN}`}")
104
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
105
105
 
106
106
  const chatCompletion = await client.chatCompletion({
107
107
  model: "${model.id}",
@@ -113,12 +113,12 @@ console.log(chatCompletion.choices[0].message);`,
113
113
  },
114
114
  {
115
115
  client: "openai",
116
- content: `import { OpenAI } from "openai"
116
+ content: `import { OpenAI } from "openai";
117
117
 
118
118
  const client = new OpenAI({
119
119
  baseURL: "https://api-inference.huggingface.co/v1/",
120
120
  apiKey: "${accessToken || `{API_TOKEN}`}"
121
- })
121
+ });
122
122
 
123
123
  const chatCompletion = await client.chat.completions.create({
124
124
  model: "${model.id}",
@@ -11,9 +11,9 @@ const js_js_1 = require("./js.js");
11
11
  inference: "",
12
12
  };
13
13
  const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token");
14
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
14
+ (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
15
15
 
16
- const client = new HfInference("api_token")
16
+ const client = new HfInference("api_token");
17
17
 
18
18
  let out = "";
19
19
 
@@ -44,9 +44,9 @@ for await (const chunk of stream) {
44
44
  inference: "",
45
45
  };
46
46
  const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token", { streaming: false });
47
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
47
+ (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
48
48
 
49
- const client = new HfInference("api_token")
49
+ const client = new HfInference("api_token");
50
50
 
51
51
  const chatCompletion = await client.chatCompletion({
52
52
  model: "meta-llama/Llama-3.1-8B-Instruct",
@@ -69,9 +69,9 @@ console.log(chatCompletion.choices[0].message);`);
69
69
  inference: "",
70
70
  };
71
71
  const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token");
72
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
72
+ (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
73
73
 
74
- const client = new HfInference("api_token")
74
+ const client = new HfInference("api_token");
75
75
 
76
76
  let out = "";
77
77
 
@@ -113,9 +113,9 @@ for await (const chunk of stream) {
113
113
  inference: "",
114
114
  };
115
115
  const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token");
116
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
116
+ (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
117
117
 
118
- const client = new HfInference("api_token")
118
+ const client = new HfInference("api_token");
119
119
 
120
120
  let out = "";
121
121
 
@@ -41,9 +41,9 @@ export const snippetTextGeneration = (model, accessToken, opts) => {
41
41
  return [
42
42
  {
43
43
  client: "huggingface.js",
44
- content: `import { HfInference } from "@huggingface/inference"
44
+ content: `import { HfInference } from "@huggingface/inference";
45
45
 
46
- const client = new HfInference("${accessToken || `{API_TOKEN}`}")
46
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
47
47
 
48
48
  let out = "";
49
49
 
@@ -63,12 +63,12 @@ for await (const chunk of stream) {
63
63
  },
64
64
  {
65
65
  client: "openai",
66
- content: `import { OpenAI } from "openai"
66
+ content: `import { OpenAI } from "openai";
67
67
 
68
68
  const client = new OpenAI({
69
69
  baseURL: "https://api-inference.huggingface.co/v1/",
70
70
  apiKey: "${accessToken || `{API_TOKEN}`}"
71
- })
71
+ });
72
72
 
73
73
  let out = "";
74
74
 
@@ -93,9 +93,9 @@ for await (const chunk of stream) {
93
93
  return [
94
94
  {
95
95
  client: "huggingface.js",
96
- content: `import { HfInference } from "@huggingface/inference"
96
+ content: `import { HfInference } from "@huggingface/inference";
97
97
 
98
- const client = new HfInference("${accessToken || `{API_TOKEN}`}")
98
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
99
99
 
100
100
  const chatCompletion = await client.chatCompletion({
101
101
  model: "${model.id}",
@@ -107,12 +107,12 @@ console.log(chatCompletion.choices[0].message);`,
107
107
  },
108
108
  {
109
109
  client: "openai",
110
- content: `import { OpenAI } from "openai"
110
+ content: `import { OpenAI } from "openai";
111
111
 
112
112
  const client = new OpenAI({
113
113
  baseURL: "https://api-inference.huggingface.co/v1/",
114
114
  apiKey: "${accessToken || `{API_TOKEN}`}"
115
- })
115
+ });
116
116
 
117
117
  const chatCompletion = await client.chat.completions.create({
118
118
  model: "${model.id}",
@@ -9,9 +9,9 @@ describe("inference API snippets", () => {
9
9
  inference: "",
10
10
  };
11
11
  const snippet = getJsInferenceSnippet(model, "api_token");
12
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
12
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
13
13
 
14
- const client = new HfInference("api_token")
14
+ const client = new HfInference("api_token");
15
15
 
16
16
  let out = "";
17
17
 
@@ -42,9 +42,9 @@ for await (const chunk of stream) {
42
42
  inference: "",
43
43
  };
44
44
  const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false });
45
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
45
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
46
46
 
47
- const client = new HfInference("api_token")
47
+ const client = new HfInference("api_token");
48
48
 
49
49
  const chatCompletion = await client.chatCompletion({
50
50
  model: "meta-llama/Llama-3.1-8B-Instruct",
@@ -67,9 +67,9 @@ console.log(chatCompletion.choices[0].message);`);
67
67
  inference: "",
68
68
  };
69
69
  const snippet = getJsInferenceSnippet(model, "api_token");
70
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
70
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
71
71
 
72
- const client = new HfInference("api_token")
72
+ const client = new HfInference("api_token");
73
73
 
74
74
  let out = "";
75
75
 
@@ -111,9 +111,9 @@ for await (const chunk of stream) {
111
111
  inference: "",
112
112
  };
113
113
  const snippet = getJsInferenceSnippet(model, "api_token");
114
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
114
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
115
115
 
116
- const client = new HfInference("api_token")
116
+ const client = new HfInference("api_token");
117
117
 
118
118
  let out = "";
119
119
 
package/package.json CHANGED
@@ -1,16 +1,12 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.13.1-test2",
4
+ "version": "0.13.2",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
8
8
  "access": "public"
9
9
  },
10
- "sources": "./src/index.ts",
11
- "main": "./dist/commonjs/index.js",
12
- "module": "./dist/esm/index.js",
13
- "types": "./dist/commonjs/index.d.ts",
14
10
  "exports": {
15
11
  "./package.json": "./package.json",
16
12
  ".": {
@@ -24,10 +20,12 @@
24
20
  }
25
21
  }
26
22
  },
23
+ "source": "./src/index.ts",
27
24
  "type": "module",
28
25
  "files": [
29
26
  "dist",
30
- "src"
27
+ "src",
28
+ "tsconfig.json"
31
29
  ],
32
30
  "keywords": [
33
31
  "huggingface",
@@ -42,6 +40,9 @@
42
40
  ".": "./src/index.ts"
43
41
  }
44
42
  },
43
+ "main": "./dist/commonjs/index.js",
44
+ "types": "./dist/commonjs/index.d.ts",
45
+ "module": "./dist/esm/index.js",
45
46
  "scripts": {
46
47
  "lint": "eslint --quiet --fix --ext .cjs,.ts .",
47
48
  "lint:check": "eslint --ext .cjs,.ts .",
@@ -12,9 +12,9 @@ describe("inference API snippets", () => {
12
12
  };
13
13
  const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
14
14
 
15
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
15
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
16
16
 
17
- const client = new HfInference("api_token")
17
+ const client = new HfInference("api_token");
18
18
 
19
19
  let out = "";
20
20
 
@@ -47,9 +47,9 @@ for await (const chunk of stream) {
47
47
  };
48
48
  const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
49
49
 
50
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
50
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
51
51
 
52
- const client = new HfInference("api_token")
52
+ const client = new HfInference("api_token");
53
53
 
54
54
  const chatCompletion = await client.chatCompletion({
55
55
  model: "meta-llama/Llama-3.1-8B-Instruct",
@@ -74,9 +74,9 @@ console.log(chatCompletion.choices[0].message);`);
74
74
  };
75
75
  const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
76
76
 
77
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
77
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
78
78
 
79
- const client = new HfInference("api_token")
79
+ const client = new HfInference("api_token");
80
80
 
81
81
  let out = "";
82
82
 
@@ -120,9 +120,9 @@ for await (const chunk of stream) {
120
120
  };
121
121
  const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
122
122
 
123
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
123
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
124
124
 
125
- const client = new HfInference("api_token")
125
+ const client = new HfInference("api_token");
126
126
 
127
127
  let out = "";
128
128
 
@@ -58,9 +58,9 @@ export const snippetTextGeneration = (
58
58
  return [
59
59
  {
60
60
  client: "huggingface.js",
61
- content: `import { HfInference } from "@huggingface/inference"
61
+ content: `import { HfInference } from "@huggingface/inference";
62
62
 
63
- const client = new HfInference("${accessToken || `{API_TOKEN}`}")
63
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
64
64
 
65
65
  let out = "";
66
66
 
@@ -80,12 +80,12 @@ for await (const chunk of stream) {
80
80
  },
81
81
  {
82
82
  client: "openai",
83
- content: `import { OpenAI } from "openai"
83
+ content: `import { OpenAI } from "openai";
84
84
 
85
85
  const client = new OpenAI({
86
86
  baseURL: "https://api-inference.huggingface.co/v1/",
87
87
  apiKey: "${accessToken || `{API_TOKEN}`}"
88
- })
88
+ });
89
89
 
90
90
  let out = "";
91
91
 
@@ -109,9 +109,9 @@ for await (const chunk of stream) {
109
109
  return [
110
110
  {
111
111
  client: "huggingface.js",
112
- content: `import { HfInference } from "@huggingface/inference"
112
+ content: `import { HfInference } from "@huggingface/inference";
113
113
 
114
- const client = new HfInference("${accessToken || `{API_TOKEN}`}")
114
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
115
115
 
116
116
  const chatCompletion = await client.chatCompletion({
117
117
  model: "${model.id}",
@@ -123,12 +123,12 @@ console.log(chatCompletion.choices[0].message);`,
123
123
  },
124
124
  {
125
125
  client: "openai",
126
- content: `import { OpenAI } from "openai"
126
+ content: `import { OpenAI } from "openai";
127
127
 
128
128
  const client = new OpenAI({
129
129
  baseURL: "https://api-inference.huggingface.co/v1/",
130
130
  apiKey: "${accessToken || `{API_TOKEN}`}"
131
- })
131
+ });
132
132
 
133
133
  const chatCompletion = await client.chat.completions.create({
134
134
  model: "${model.id}",
package/tsconfig.json ADDED
@@ -0,0 +1,20 @@
1
+ {
2
+ "compilerOptions": {
3
+ "allowSyntheticDefaultImports": true,
4
+ "lib": ["ES2022", "DOM"],
5
+ "module": "NodeNext",
6
+ "target": "ESNext",
7
+ "moduleResolution": "nodenext",
8
+ "forceConsistentCasingInFileNames": true,
9
+ "strict": true,
10
+ "noImplicitAny": true,
11
+ "strictNullChecks": true,
12
+ "skipLibCheck": true,
13
+ "noImplicitOverride": true,
14
+ "outDir": "./dist",
15
+ "declaration": true,
16
+ "declarationMap": true
17
+ },
18
+ "include": ["src"],
19
+ "exclude": ["dist"]
20
+ }