@huggingface/tasks 0.13.0 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -5938,7 +5938,7 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5938
5938
  prettyLabel: "open-oasis",
5939
5939
  repoName: "open-oasis",
5940
5940
  repoUrl: "https://github.com/etched-ai/open-oasis",
5941
- countDownloads: `path:"oasis500m.pt"`
5941
+ countDownloads: `path:"oasis500m.safetensors"`
5942
5942
  },
5943
5943
  open_clip: {
5944
5944
  prettyLabel: "OpenCLIP",
@@ -6524,8 +6524,8 @@ var curlSnippets = {
6524
6524
  "object-detection": snippetFile,
6525
6525
  "image-segmentation": snippetFile
6526
6526
  };
6527
- function getCurlInferenceSnippet(model, accessToken) {
6528
- return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
6527
+ function getCurlInferenceSnippet(model, accessToken, opts) {
6528
+ return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" } : { content: "" };
6529
6529
  }
6530
6530
  function hasCurlInferenceSnippet(model) {
6531
6531
  return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
@@ -6905,7 +6905,7 @@ for await (const chunk of stream) {
6905
6905
  return [
6906
6906
  {
6907
6907
  client: "huggingface.js",
6908
- content: `import { HfInference } from '@huggingface/inference'
6908
+ content: `import { HfInference } from "@huggingface/inference"
6909
6909
 
6910
6910
  const client = new HfInference("${accessToken || `{API_TOKEN}`}")
6911
6911
 
@@ -7067,8 +7067,8 @@ var jsSnippets = {
7067
7067
  "object-detection": snippetFile3,
7068
7068
  "image-segmentation": snippetFile3
7069
7069
  };
7070
- function getJsInferenceSnippet(model, accessToken) {
7071
- return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
7070
+ function getJsInferenceSnippet(model, accessToken, opts) {
7071
+ return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" } : { content: "" };
7072
7072
  }
7073
7073
  function hasJsInferenceSnippet(model) {
7074
7074
  return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
package/dist/index.js CHANGED
@@ -5896,7 +5896,7 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5896
5896
  prettyLabel: "open-oasis",
5897
5897
  repoName: "open-oasis",
5898
5898
  repoUrl: "https://github.com/etched-ai/open-oasis",
5899
- countDownloads: `path:"oasis500m.pt"`
5899
+ countDownloads: `path:"oasis500m.safetensors"`
5900
5900
  },
5901
5901
  open_clip: {
5902
5902
  prettyLabel: "OpenCLIP",
@@ -6482,8 +6482,8 @@ var curlSnippets = {
6482
6482
  "object-detection": snippetFile,
6483
6483
  "image-segmentation": snippetFile
6484
6484
  };
6485
- function getCurlInferenceSnippet(model, accessToken) {
6486
- return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
6485
+ function getCurlInferenceSnippet(model, accessToken, opts) {
6486
+ return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" } : { content: "" };
6487
6487
  }
6488
6488
  function hasCurlInferenceSnippet(model) {
6489
6489
  return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
@@ -6863,7 +6863,7 @@ for await (const chunk of stream) {
6863
6863
  return [
6864
6864
  {
6865
6865
  client: "huggingface.js",
6866
- content: `import { HfInference } from '@huggingface/inference'
6866
+ content: `import { HfInference } from "@huggingface/inference"
6867
6867
 
6868
6868
  const client = new HfInference("${accessToken || `{API_TOKEN}`}")
6869
6869
 
@@ -7025,8 +7025,8 @@ var jsSnippets = {
7025
7025
  "object-detection": snippetFile3,
7026
7026
  "image-segmentation": snippetFile3
7027
7027
  };
7028
- function getJsInferenceSnippet(model, accessToken) {
7029
- return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
7028
+ function getJsInferenceSnippet(model, accessToken, opts) {
7029
+ return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" } : { content: "" };
7030
7030
  }
7031
7031
  function hasJsInferenceSnippet(model) {
7032
7032
  return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
@@ -12,6 +12,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
12
12
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
13
13
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
14
14
  export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet>>;
15
- export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet;
15
+ export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>): InferenceSnippet;
16
16
  export declare function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean;
17
17
  //# sourceMappingURL=curl.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAkCF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,gBAAgB,CAItG;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAkCF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,CAIlB;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
@@ -14,6 +14,6 @@ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken:
14
14
  export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
15
15
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
16
16
  export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet | InferenceSnippet[]>>;
17
- export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet | InferenceSnippet[];
17
+ export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>): InferenceSnippet | InferenceSnippet[];
18
18
  export declare function hasJsInferenceSnippet(model: ModelDataMinimal): boolean;
19
19
  //# sourceMappingURL=js.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA2GrC,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,GACjB,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA2GrC,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.13.0",
4
+ "version": "0.13.1",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
@@ -480,7 +480,7 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
480
480
  prettyLabel: "open-oasis",
481
481
  repoName: "open-oasis",
482
482
  repoUrl: "https://github.com/etched-ai/open-oasis",
483
- countDownloads: `path:"oasis500m.pt"`,
483
+ countDownloads: `path:"oasis500m.safetensors"`,
484
484
  },
485
485
  open_clip: {
486
486
  prettyLabel: "OpenCLIP",
@@ -1,6 +1,6 @@
1
1
  import type { ModelDataMinimal } from "./types";
2
2
  import { describe, expect, it } from "vitest";
3
- import { snippetTextGeneration } from "./curl";
3
+ import { getCurlInferenceSnippet } from "./curl";
4
4
 
5
5
  describe("inference API snippets", () => {
6
6
  it("conversational llm", async () => {
@@ -10,7 +10,7 @@ describe("inference API snippets", () => {
10
10
  tags: ["conversational"],
11
11
  inference: "",
12
12
  };
13
- const snippet = snippetTextGeneration(model, "api_token");
13
+ const snippet = getCurlInferenceSnippet(model, "api_token");
14
14
 
15
15
  expect(snippet.content)
16
16
  .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
@@ -29,6 +29,32 @@ describe("inference API snippets", () => {
29
29
  }'`);
30
30
  });
31
31
 
32
+ it("conversational llm non-streaming", async () => {
33
+ const model: ModelDataMinimal = {
34
+ id: "meta-llama/Llama-3.1-8B-Instruct",
35
+ pipeline_tag: "text-generation",
36
+ tags: ["conversational"],
37
+ inference: "",
38
+ };
39
+ const snippet = getCurlInferenceSnippet(model, "api_token", { streaming: false });
40
+
41
+ expect(snippet.content)
42
+ .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
43
+ -H "Authorization: Bearer api_token" \\
44
+ -H 'Content-Type: application/json' \\
45
+ --data '{
46
+ "model": "meta-llama/Llama-3.1-8B-Instruct",
47
+ "messages": [
48
+ {
49
+ "role": "user",
50
+ "content": "What is the capital of France?"
51
+ }
52
+ ],
53
+ "max_tokens": 500,
54
+ "stream": false
55
+ }'`);
56
+ });
57
+
32
58
  it("conversational vlm", async () => {
33
59
  const model: ModelDataMinimal = {
34
60
  id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
@@ -36,7 +62,7 @@ describe("inference API snippets", () => {
36
62
  tags: ["conversational"],
37
63
  inference: "",
38
64
  };
39
- const snippet = snippetTextGeneration(model, "api_token");
65
+ const snippet = getCurlInferenceSnippet(model, "api_token");
40
66
 
41
67
  expect(snippet.content)
42
68
  .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions' \\
@@ -105,9 +105,13 @@ export const curlSnippets: Partial<
105
105
  "image-segmentation": snippetFile,
106
106
  };
107
107
 
108
- export function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet {
108
+ export function getCurlInferenceSnippet(
109
+ model: ModelDataMinimal,
110
+ accessToken: string,
111
+ opts?: Record<string, unknown>
112
+ ): InferenceSnippet {
109
113
  return model.pipeline_tag && model.pipeline_tag in curlSnippets
110
- ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" }
114
+ ? curlSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" }
111
115
  : { content: "" };
112
116
  }
113
117
 
@@ -1,6 +1,6 @@
1
1
  import type { InferenceSnippet, ModelDataMinimal } from "./types";
2
2
  import { describe, expect, it } from "vitest";
3
- import { snippetTextGeneration } from "./js";
3
+ import { getJsInferenceSnippet } from "./js";
4
4
 
5
5
  describe("inference API snippets", () => {
6
6
  it("conversational llm", async () => {
@@ -10,7 +10,7 @@ describe("inference API snippets", () => {
10
10
  tags: ["conversational"],
11
11
  inference: "",
12
12
  };
13
- const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
13
+ const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
14
14
 
15
15
  expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
16
16
 
@@ -38,6 +38,33 @@ for await (const chunk of stream) {
38
38
  }`);
39
39
  });
40
40
 
41
+ it("conversational llm non-streaming", async () => {
42
+ const model: ModelDataMinimal = {
43
+ id: "meta-llama/Llama-3.1-8B-Instruct",
44
+ pipeline_tag: "text-generation",
45
+ tags: ["conversational"],
46
+ inference: "",
47
+ };
48
+ const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
49
+
50
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
51
+
52
+ const client = new HfInference("api_token")
53
+
54
+ const chatCompletion = await client.chatCompletion({
55
+ model: "meta-llama/Llama-3.1-8B-Instruct",
56
+ messages: [
57
+ {
58
+ role: "user",
59
+ content: "What is the capital of France?"
60
+ }
61
+ ],
62
+ max_tokens: 500
63
+ });
64
+
65
+ console.log(chatCompletion.choices[0].message);`);
66
+ });
67
+
41
68
  it("conversational vlm", async () => {
42
69
  const model: ModelDataMinimal = {
43
70
  id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
@@ -45,7 +72,7 @@ for await (const chunk of stream) {
45
72
  tags: ["conversational"],
46
73
  inference: "",
47
74
  };
48
- const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
75
+ const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
49
76
 
50
77
  expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
51
78
 
@@ -75,6 +102,41 @@ const stream = client.chatCompletionStream({
75
102
  max_tokens: 500
76
103
  });
77
104
 
105
+ for await (const chunk of stream) {
106
+ if (chunk.choices && chunk.choices.length > 0) {
107
+ const newContent = chunk.choices[0].delta.content;
108
+ out += newContent;
109
+ console.log(newContent);
110
+ }
111
+ }`);
112
+ });
113
+
114
+ it("conversational llm", async () => {
115
+ const model: ModelDataMinimal = {
116
+ id: "meta-llama/Llama-3.1-8B-Instruct",
117
+ pipeline_tag: "text-generation",
118
+ tags: ["conversational"],
119
+ inference: "",
120
+ };
121
+ const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
122
+
123
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
124
+
125
+ const client = new HfInference("api_token")
126
+
127
+ let out = "";
128
+
129
+ const stream = client.chatCompletionStream({
130
+ model: "meta-llama/Llama-3.1-8B-Instruct",
131
+ messages: [
132
+ {
133
+ role: "user",
134
+ content: "What is the capital of France?"
135
+ }
136
+ ],
137
+ max_tokens: 500
138
+ });
139
+
78
140
  for await (const chunk of stream) {
79
141
  if (chunk.choices && chunk.choices.length > 0) {
80
142
  const newContent = chunk.choices[0].delta.content;
@@ -109,7 +109,7 @@ for await (const chunk of stream) {
109
109
  return [
110
110
  {
111
111
  client: "huggingface.js",
112
- content: `import { HfInference } from '@huggingface/inference'
112
+ content: `import { HfInference } from "@huggingface/inference"
113
113
 
114
114
  const client = new HfInference("${accessToken || `{API_TOKEN}`}")
115
115
 
@@ -292,10 +292,11 @@ export const jsSnippets: Partial<
292
292
 
293
293
  export function getJsInferenceSnippet(
294
294
  model: ModelDataMinimal,
295
- accessToken: string
295
+ accessToken: string,
296
+ opts?: Record<string, unknown>
296
297
  ): InferenceSnippet | InferenceSnippet[] {
297
298
  return model.pipeline_tag && model.pipeline_tag in jsSnippets
298
- ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" }
299
+ ? jsSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" }
299
300
  : { content: "" };
300
301
  }
301
302
 
@@ -1,6 +1,6 @@
1
- import type { ModelDataMinimal } from "./types";
1
+ import type { InferenceSnippet, ModelDataMinimal } from "./types";
2
2
  import { describe, expect, it } from "vitest";
3
- import { snippetConversational } from "./python";
3
+ import { getPythonInferenceSnippet } from "./python";
4
4
 
5
5
  describe("inference API snippets", () => {
6
6
  it("conversational llm", async () => {
@@ -10,7 +10,7 @@ describe("inference API snippets", () => {
10
10
  tags: ["conversational"],
11
11
  inference: "",
12
12
  };
13
- const snippet = snippetConversational(model, "api_token");
13
+ const snippet = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];
14
14
 
15
15
  expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
16
16
 
@@ -34,6 +34,35 @@ for chunk in stream:
34
34
  print(chunk.choices[0].delta.content, end="")`);
35
35
  });
36
36
 
37
+ it("conversational llm non-streaming", async () => {
38
+ const model: ModelDataMinimal = {
39
+ id: "meta-llama/Llama-3.1-8B-Instruct",
40
+ pipeline_tag: "text-generation",
41
+ tags: ["conversational"],
42
+ inference: "",
43
+ };
44
+ const snippet = getPythonInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
45
+
46
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
47
+
48
+ client = InferenceClient(api_key="api_token")
49
+
50
+ messages = [
51
+ {
52
+ "role": "user",
53
+ "content": "What is the capital of France?"
54
+ }
55
+ ]
56
+
57
+ completion = client.chat.completions.create(
58
+ model="meta-llama/Llama-3.1-8B-Instruct",
59
+ messages=messages,
60
+ max_tokens=500
61
+ )
62
+
63
+ print(completion.choices[0].message)`);
64
+ });
65
+
37
66
  it("conversational vlm", async () => {
38
67
  const model: ModelDataMinimal = {
39
68
  id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
@@ -41,7 +70,7 @@ for chunk in stream:
41
70
  tags: ["conversational"],
42
71
  inference: "",
43
72
  };
44
- const snippet = snippetConversational(model, "api_token");
73
+ const snippet = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];
45
74
 
46
75
  expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
47
76