@huggingface/tasks 0.15.9 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import type { PipelineType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
5
5
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
6
- export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
6
+ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
7
7
  streaming?: boolean;
8
8
  messages?: ChatCompletionInputMessage[];
9
9
  temperature?: GenerationParameters["temperature"];
@@ -12,6 +12,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
12
12
  }) => InferenceSnippet[];
13
13
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
14
14
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
15
- export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
16
- export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
15
+ export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
16
+ export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
17
17
  //# sourceMappingURL=curl.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAmC,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AAC3G,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,SAC3B;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA0ClB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAClC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAclB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAalB,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAmC,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AAC3G,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA2ClB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAClC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAclB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAalB,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -22,11 +22,12 @@ curl https://router.huggingface.co/hf-inference/models/${model.id} \\
22
22
  ];
23
23
  };
24
24
  exports.snippetBasic = snippetBasic;
25
- const snippetTextGeneration = (model, accessToken, provider, opts) => {
25
+ const snippetTextGeneration = (model, accessToken, provider, providerModelId, opts) => {
26
26
  if (model.tags.includes("conversational")) {
27
27
  const baseUrl = provider === "hf-inference"
28
28
  ? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
29
29
  : inference_providers_js_1.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";
30
+ const modelId = providerModelId ?? model.id;
30
31
  // Conversational model detected, so we display a code snippet that features the Messages API
31
32
  const streaming = opts?.streaming ?? true;
32
33
  const exampleMessages = (0, inputs_js_1.getModelInputSnippet)(model);
@@ -43,7 +44,7 @@ const snippetTextGeneration = (model, accessToken, provider, opts) => {
43
44
  -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
44
45
  -H 'Content-Type: application/json' \\
45
46
  --data '{
46
- "model": "${model.id}",
47
+ "model": "${modelId}",
47
48
  "messages": ${(0, common_js_1.stringifyMessages)(messages, {
48
49
  indent: "\t",
49
50
  attributeKeyQuotes: true,
@@ -121,8 +122,8 @@ exports.curlSnippets = {
121
122
  "object-detection": exports.snippetFile,
122
123
  "image-segmentation": exports.snippetFile,
123
124
  };
124
- function getCurlInferenceSnippet(model, accessToken, provider, opts) {
125
+ function getCurlInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
125
126
  return model.pipeline_tag && model.pipeline_tag in exports.curlSnippets
126
- ? exports.curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
127
+ ? exports.curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
127
128
  : [];
128
129
  }
@@ -3,7 +3,7 @@ import type { PipelineType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
5
5
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
6
- export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
6
+ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
7
7
  streaming?: boolean;
8
8
  messages?: ChatCompletionInputMessage[];
9
9
  temperature?: GenerationParameters["temperature"];
@@ -15,6 +15,6 @@ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken:
15
15
  export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
16
16
  export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
17
17
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
18
- export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
19
- export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
18
+ export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
19
+ export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
20
20
  //# sourceMappingURL=js.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AACzF,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAgBrE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA8ClB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,SAC3B;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA+GlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAAgB,EA2B5G,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA4ClB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgDlB,CAAC;AAEF,eAAO,MAAM,iCAAiC,UACtC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAsBlB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA6BlB,CAAC;AAEF,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AACzF,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAgBrE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA8ClB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA+GlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAAgB,EA2B5G,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA4ClB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgDlB,CAAC;AAEF,eAAO,MAAM,iCAAiC,UACtC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAsBlB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA6BlB,CAAC;AAEF,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -66,7 +66,7 @@ query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((respons
66
66
  ];
67
67
  };
68
68
  exports.snippetBasic = snippetBasic;
69
- const snippetTextGeneration = (model, accessToken, provider, opts) => {
69
+ const snippetTextGeneration = (model, accessToken, provider, providerModelId, opts) => {
70
70
  if (model.tags.includes("conversational")) {
71
71
  // Conversational model detected, so we display a code snippet that features the Messages API
72
72
  const streaming = opts?.streaming ?? true;
@@ -119,7 +119,7 @@ const client = new OpenAI({
119
119
  let out = "";
120
120
 
121
121
  const stream = await client.chat.completions.create({
122
- model: "${model.id}",
122
+ model: "${providerModelId ?? model.id}",
123
123
  messages: ${messagesStr},
124
124
  ${configStr}
125
125
  stream: true,
@@ -163,7 +163,7 @@ const client = new OpenAI({
163
163
  });
164
164
 
165
165
  const chatCompletion = await client.chat.completions.create({
166
- model: "${model.id}",
166
+ model: "${providerModelId ?? model.id}",
167
167
  messages: ${messagesStr},
168
168
  ${configStr}
169
169
  });
@@ -382,8 +382,8 @@ exports.jsSnippets = {
382
382
  "object-detection": exports.snippetFile,
383
383
  "image-segmentation": exports.snippetFile,
384
384
  };
385
- function getJsInferenceSnippet(model, accessToken, provider, opts) {
385
+ function getJsInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
386
386
  return model.pipeline_tag && model.pipeline_tag in exports.jsSnippets
387
- ? exports.jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
387
+ ? exports.jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
388
388
  : [];
389
389
  }
@@ -2,7 +2,7 @@ import { type SnippetInferenceProvider } from "../inference-providers.js";
2
2
  import type { PipelineType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
5
- export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
5
+ export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
6
6
  streaming?: boolean;
7
7
  messages?: ChatCompletionInputMessage[];
8
8
  temperature?: GenerationParameters["temperature"];
@@ -13,10 +13,10 @@ export declare const snippetZeroShotClassification: (model: ModelDataMinimal) =>
13
13
  export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet[];
14
14
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
15
15
  export declare const snippetFile: (model: ModelDataMinimal) => InferenceSnippet[];
16
- export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
16
+ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string) => InferenceSnippet[];
17
17
  export declare const snippetTabular: (model: ModelDataMinimal) => InferenceSnippet[];
18
18
  export declare const snippetTextToAudio: (model: ModelDataMinimal) => InferenceSnippet[];
19
19
  export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => InferenceSnippet[];
20
- export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
21
- export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
20
+ export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
21
+ export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
22
22
  //# sourceMappingURL=python.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,EAGN,KAAK,wBAAwB,EAC7B,MAAM,2BAA2B,CAAC;AACnC,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAyCrE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,SAC3B;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAiGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAAgB,EAevF,CAAC;AAEF,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAAgB,EAqB5F,CAAC;AAEF,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgClB,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAAgB,EAcrE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAqDlB,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAAgB,EAcxE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAAgB,EAuC5E,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAAgB,EAiB1F,CAAC;AAEF,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CA0BpB"}
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,EAEN,KAAK,wBAAwB,EAC7B,MAAM,2BAA2B,CAAC;AACnC,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAyCrE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAiGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAAgB,EAevF,CAAC;AAEF,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAAgB,EAqB5F,CAAC;AAEF,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgClB,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAAgB,EAcrE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,KACtB,gBAAgB,EAoDlB,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAAgB,EAcxE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAAgB,EAuC5E,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAAgB,EAiB1F,CAAC;AAEF,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CA0BpB"}
@@ -41,7 +41,7 @@ client = InferenceClient(
41
41
  provider="${provider}",
42
42
  api_key="${accessToken || "{API_TOKEN}"}"
43
43
  )`;
44
- const snippetConversational = (model, accessToken, provider, opts) => {
44
+ const snippetConversational = (model, accessToken, provider, providerModelId, opts) => {
45
45
  const streaming = opts?.streaming ?? true;
46
46
  const exampleMessages = (0, inputs_js_1.getModelInputSnippet)(model);
47
47
  const messages = opts?.messages ?? exampleMessages;
@@ -87,7 +87,7 @@ client = OpenAI(
87
87
  messages = ${messagesStr}
88
88
 
89
89
  stream = client.chat.completions.create(
90
- model="${model.id}",
90
+ model="${providerModelId ?? model.id}",
91
91
  messages=messages,
92
92
  ${configStr}
93
93
  stream=True
@@ -128,7 +128,7 @@ client = OpenAI(
128
128
  messages = ${messagesStr}
129
129
 
130
130
  completion = client.chat.completions.create(
131
- model="${model.id}",
131
+ model="${providerModelId ?? model.id}",
132
132
  messages=messages,
133
133
  ${configStr}
134
134
  )
@@ -229,7 +229,7 @@ output = query(${(0, inputs_js_1.getModelInputSnippet)(model)})`,
229
229
  ];
230
230
  };
231
231
  exports.snippetFile = snippetFile;
232
- const snippetTextToImage = (model, accessToken, provider) => {
232
+ const snippetTextToImage = (model, accessToken, provider, providerModelId) => {
233
233
  return [
234
234
  {
235
235
  client: "huggingface_hub",
@@ -250,8 +250,7 @@ image = client.text_to_image(
250
250
  import fal_client
251
251
 
252
252
  result = fal_client.subscribe(
253
- # replace with correct id from fal.ai
254
- "fal-ai/${model.id}",
253
+ "${providerModelId ?? model.id}",
255
254
  arguments={
256
255
  "prompt": ${(0, inputs_js_1.getModelInputSnippet)(model)},
257
256
  },
@@ -391,14 +390,14 @@ exports.pythonSnippets = {
391
390
  "image-to-text": exports.snippetFile,
392
391
  "zero-shot-image-classification": exports.snippetZeroShotImageClassification,
393
392
  };
394
- function getPythonInferenceSnippet(model, accessToken, provider, opts) {
393
+ function getPythonInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
395
394
  if (model.tags.includes("conversational")) {
396
395
  // Conversational model detected, so we display a code snippet that features the Messages API
397
- return (0, exports.snippetConversational)(model, accessToken, provider, opts);
396
+ return (0, exports.snippetConversational)(model, accessToken, provider, providerModelId, opts);
398
397
  }
399
398
  else {
400
399
  const snippets = model.pipeline_tag && model.pipeline_tag in exports.pythonSnippets
401
- ? exports.pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider) ?? []
400
+ ? exports.pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? []
402
401
  : [];
403
402
  return snippets.map((snippet) => {
404
403
  return {
@@ -3,7 +3,7 @@ import type { PipelineType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
5
5
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
6
- export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
6
+ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
7
7
  streaming?: boolean;
8
8
  messages?: ChatCompletionInputMessage[];
9
9
  temperature?: GenerationParameters["temperature"];
@@ -12,6 +12,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
12
12
  }) => InferenceSnippet[];
13
13
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
14
14
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
15
- export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
16
- export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
15
+ export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
16
+ export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
17
17
  //# sourceMappingURL=curl.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAmC,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AAC3G,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,SAC3B;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA0ClB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAClC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAclB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAalB,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAmC,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AAC3G,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA2ClB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAClC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAclB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAalB,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -17,11 +17,12 @@ curl https://router.huggingface.co/hf-inference/models/${model.id} \\
17
17
  },
18
18
  ];
19
19
  };
20
- export const snippetTextGeneration = (model, accessToken, provider, opts) => {
20
+ export const snippetTextGeneration = (model, accessToken, provider, providerModelId, opts) => {
21
21
  if (model.tags.includes("conversational")) {
22
22
  const baseUrl = provider === "hf-inference"
23
23
  ? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
24
24
  : HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";
25
+ const modelId = providerModelId ?? model.id;
25
26
  // Conversational model detected, so we display a code snippet that features the Messages API
26
27
  const streaming = opts?.streaming ?? true;
27
28
  const exampleMessages = getModelInputSnippet(model);
@@ -38,7 +39,7 @@ export const snippetTextGeneration = (model, accessToken, provider, opts) => {
38
39
  -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
39
40
  -H 'Content-Type: application/json' \\
40
41
  --data '{
41
- "model": "${model.id}",
42
+ "model": "${modelId}",
42
43
  "messages": ${stringifyMessages(messages, {
43
44
  indent: "\t",
44
45
  attributeKeyQuotes: true,
@@ -113,8 +114,8 @@ export const curlSnippets = {
113
114
  "object-detection": snippetFile,
114
115
  "image-segmentation": snippetFile,
115
116
  };
116
- export function getCurlInferenceSnippet(model, accessToken, provider, opts) {
117
+ export function getCurlInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
117
118
  return model.pipeline_tag && model.pipeline_tag in curlSnippets
118
- ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
119
+ ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
119
120
  : [];
120
121
  }
@@ -3,7 +3,7 @@ import type { PipelineType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
5
5
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
6
- export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
6
+ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
7
7
  streaming?: boolean;
8
8
  messages?: ChatCompletionInputMessage[];
9
9
  temperature?: GenerationParameters["temperature"];
@@ -15,6 +15,6 @@ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken:
15
15
  export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
16
16
  export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
17
17
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
18
- export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
19
- export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
18
+ export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
19
+ export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
20
20
  //# sourceMappingURL=js.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AACzF,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAgBrE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA8ClB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,SAC3B;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA+GlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAAgB,EA2B5G,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA4ClB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgDlB,CAAC;AAEF,eAAO,MAAM,iCAAiC,UACtC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAsBlB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA6BlB,CAAC;AAEF,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AACzF,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAgBrE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA8ClB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA+GlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAAgB,EA2B5G,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA4ClB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgDlB,CAAC;AAEF,eAAO,MAAM,iCAAiC,UACtC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAsBlB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA6BlB,CAAC;AAEF,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -61,7 +61,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
61
61
  },
62
62
  ];
63
63
  };
64
- export const snippetTextGeneration = (model, accessToken, provider, opts) => {
64
+ export const snippetTextGeneration = (model, accessToken, provider, providerModelId, opts) => {
65
65
  if (model.tags.includes("conversational")) {
66
66
  // Conversational model detected, so we display a code snippet that features the Messages API
67
67
  const streaming = opts?.streaming ?? true;
@@ -114,7 +114,7 @@ const client = new OpenAI({
114
114
  let out = "";
115
115
 
116
116
  const stream = await client.chat.completions.create({
117
- model: "${model.id}",
117
+ model: "${providerModelId ?? model.id}",
118
118
  messages: ${messagesStr},
119
119
  ${configStr}
120
120
  stream: true,
@@ -158,7 +158,7 @@ const client = new OpenAI({
158
158
  });
159
159
 
160
160
  const chatCompletion = await client.chat.completions.create({
161
- model: "${model.id}",
161
+ model: "${providerModelId ?? model.id}",
162
162
  messages: ${messagesStr},
163
163
  ${configStr}
164
164
  });
@@ -371,8 +371,8 @@ export const jsSnippets = {
371
371
  "object-detection": snippetFile,
372
372
  "image-segmentation": snippetFile,
373
373
  };
374
- export function getJsInferenceSnippet(model, accessToken, provider, opts) {
374
+ export function getJsInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
375
375
  return model.pipeline_tag && model.pipeline_tag in jsSnippets
376
- ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
376
+ ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
377
377
  : [];
378
378
  }
@@ -2,7 +2,7 @@ import { type SnippetInferenceProvider } from "../inference-providers.js";
2
2
  import type { PipelineType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
5
- export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
5
+ export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
6
6
  streaming?: boolean;
7
7
  messages?: ChatCompletionInputMessage[];
8
8
  temperature?: GenerationParameters["temperature"];
@@ -13,10 +13,10 @@ export declare const snippetZeroShotClassification: (model: ModelDataMinimal) =>
13
13
  export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet[];
14
14
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
15
15
  export declare const snippetFile: (model: ModelDataMinimal) => InferenceSnippet[];
16
- export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
16
+ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string) => InferenceSnippet[];
17
17
  export declare const snippetTabular: (model: ModelDataMinimal) => InferenceSnippet[];
18
18
  export declare const snippetTextToAudio: (model: ModelDataMinimal) => InferenceSnippet[];
19
19
  export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => InferenceSnippet[];
20
- export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
21
- export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
20
+ export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
21
+ export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
22
22
  //# sourceMappingURL=python.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,EAGN,KAAK,wBAAwB,EAC7B,MAAM,2BAA2B,CAAC;AACnC,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAyCrE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,SAC3B;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAiGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAAgB,EAevF,CAAC;AAEF,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAAgB,EAqB5F,CAAC;AAEF,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgClB,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAAgB,EAcrE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAqDlB,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAAgB,EAcxE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAAgB,EAuC5E,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAAgB,EAiB1F,CAAC;AAEF,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CA0BpB"}
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,EAEN,KAAK,wBAAwB,EAC7B,MAAM,2BAA2B,CAAC;AACnC,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAyCrE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAiGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAAgB,EAevF,CAAC;AAEF,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAAgB,EAqB5F,CAAC;AAEF,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgClB,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAAgB,EAcrE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,KACtB,gBAAgB,EAoDlB,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAAgB,EAcxE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAAgB,EAuC5E,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAAgB,EAiB1F,CAAC;AAEF,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CA0BpB"}
@@ -37,7 +37,7 @@ client = InferenceClient(
37
37
  provider="${provider}",
38
38
  api_key="${accessToken || "{API_TOKEN}"}"
39
39
  )`;
40
- export const snippetConversational = (model, accessToken, provider, opts) => {
40
+ export const snippetConversational = (model, accessToken, provider, providerModelId, opts) => {
41
41
  const streaming = opts?.streaming ?? true;
42
42
  const exampleMessages = getModelInputSnippet(model);
43
43
  const messages = opts?.messages ?? exampleMessages;
@@ -83,7 +83,7 @@ client = OpenAI(
83
83
  messages = ${messagesStr}
84
84
 
85
85
  stream = client.chat.completions.create(
86
- model="${model.id}",
86
+ model="${providerModelId ?? model.id}",
87
87
  messages=messages,
88
88
  ${configStr}
89
89
  stream=True
@@ -124,7 +124,7 @@ client = OpenAI(
124
124
  messages = ${messagesStr}
125
125
 
126
126
  completion = client.chat.completions.create(
127
- model="${model.id}",
127
+ model="${providerModelId ?? model.id}",
128
128
  messages=messages,
129
129
  ${configStr}
130
130
  )
@@ -220,7 +220,7 @@ output = query(${getModelInputSnippet(model)})`,
220
220
  },
221
221
  ];
222
222
  };
223
- export const snippetTextToImage = (model, accessToken, provider) => {
223
+ export const snippetTextToImage = (model, accessToken, provider, providerModelId) => {
224
224
  return [
225
225
  {
226
226
  client: "huggingface_hub",
@@ -241,8 +241,7 @@ image = client.text_to_image(
241
241
  import fal_client
242
242
 
243
243
  result = fal_client.subscribe(
244
- # replace with correct id from fal.ai
245
- "fal-ai/${model.id}",
244
+ "${providerModelId ?? model.id}",
246
245
  arguments={
247
246
  "prompt": ${getModelInputSnippet(model)},
248
247
  },
@@ -378,14 +377,14 @@ export const pythonSnippets = {
378
377
  "image-to-text": snippetFile,
379
378
  "zero-shot-image-classification": snippetZeroShotImageClassification,
380
379
  };
381
- export function getPythonInferenceSnippet(model, accessToken, provider, opts) {
380
+ export function getPythonInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
382
381
  if (model.tags.includes("conversational")) {
383
382
  // Conversational model detected, so we display a code snippet that features the Messages API
384
- return snippetConversational(model, accessToken, provider, opts);
383
+ return snippetConversational(model, accessToken, provider, providerModelId, opts);
385
384
  }
386
385
  else {
387
386
  const snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets
388
- ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider) ?? []
387
+ ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? []
389
388
  : [];
390
389
  return snippets.map((snippet) => {
391
390
  return {
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.15.9",
4
+ "version": "0.16.0",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
@@ -30,6 +30,7 @@ export const snippetTextGeneration = (
30
30
  model: ModelDataMinimal,
31
31
  accessToken: string,
32
32
  provider: SnippetInferenceProvider,
33
+ providerModelId?: string,
33
34
  opts?: {
34
35
  streaming?: boolean;
35
36
  messages?: ChatCompletionInputMessage[];
@@ -43,6 +44,7 @@ export const snippetTextGeneration = (
43
44
  provider === "hf-inference"
44
45
  ? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
45
46
  : HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";
47
+ const modelId = providerModelId ?? model.id;
46
48
 
47
49
  // Conversational model detected, so we display a code snippet that features the Messages API
48
50
  const streaming = opts?.streaming ?? true;
@@ -61,17 +63,17 @@ export const snippetTextGeneration = (
61
63
  -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
62
64
  -H 'Content-Type: application/json' \\
63
65
  --data '{
64
- "model": "${model.id}",
66
+ "model": "${modelId}",
65
67
  "messages": ${stringifyMessages(messages, {
66
- indent: "\t",
67
- attributeKeyQuotes: true,
68
- customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
69
- })},
68
+ indent: "\t",
69
+ attributeKeyQuotes: true,
70
+ customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
71
+ })},
70
72
  ${stringifyGenerationConfig(config, {
71
- indent: "\n ",
72
- attributeKeyQuotes: true,
73
- attributeValueConnector: ": ",
74
- })}
73
+ indent: "\n ",
74
+ attributeKeyQuotes: true,
75
+ attributeValueConnector: ": ",
76
+ })}
75
77
  "stream": ${!!streaming}
76
78
  }'`,
77
79
  },
@@ -127,6 +129,7 @@ export const curlSnippets: Partial<
127
129
  model: ModelDataMinimal,
128
130
  accessToken: string,
129
131
  provider: SnippetInferenceProvider,
132
+ providerModelId?: string,
130
133
  opts?: Record<string, unknown>
131
134
  ) => InferenceSnippet[]
132
135
  >
@@ -161,9 +164,10 @@ export function getCurlInferenceSnippet(
161
164
  model: ModelDataMinimal,
162
165
  accessToken: string,
163
166
  provider: SnippetInferenceProvider,
167
+ providerModelId?: string,
164
168
  opts?: Record<string, unknown>
165
169
  ): InferenceSnippet[] {
166
170
  return model.pipeline_tag && model.pipeline_tag in curlSnippets
167
- ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
171
+ ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
168
172
  : [];
169
173
  }
@@ -27,9 +27,9 @@ export const snippetBasic = (
27
27
  return [
28
28
  ...(model.pipeline_tag && model.pipeline_tag in HFJS_METHODS
29
29
  ? [
30
- {
31
- client: "huggingface.js",
32
- content: `\
30
+ {
31
+ client: "huggingface.js",
32
+ content: `\
33
33
  import { HfInference } from "@huggingface/inference";
34
34
 
35
35
  const client = new HfInference("${accessToken || `{API_TOKEN}`}");
@@ -42,8 +42,8 @@ const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
42
42
 
43
43
  console.log(output);
44
44
  `,
45
- },
46
- ]
45
+ },
46
+ ]
47
47
  : []),
48
48
  {
49
49
  client: "fetch",
@@ -75,6 +75,7 @@ export const snippetTextGeneration = (
75
75
  model: ModelDataMinimal,
76
76
  accessToken: string,
77
77
  provider: SnippetInferenceProvider,
78
+ providerModelId?: string,
78
79
  opts?: {
79
80
  streaming?: boolean;
80
81
  messages?: ChatCompletionInputMessage[];
@@ -137,7 +138,7 @@ const client = new OpenAI({
137
138
  let out = "";
138
139
 
139
140
  const stream = await client.chat.completions.create({
140
- model: "${model.id}",
141
+ model: "${providerModelId ?? model.id}",
141
142
  messages: ${messagesStr},
142
143
  ${configStr}
143
144
  stream: true,
@@ -180,7 +181,7 @@ const client = new OpenAI({
180
181
  });
181
182
 
182
183
  const chatCompletion = await client.chat.completions.create({
183
- model: "${model.id}",
184
+ model: "${providerModelId ?? model.id}",
184
185
  messages: ${messagesStr},
185
186
  ${configStr}
186
187
  });
@@ -216,8 +217,8 @@ export const snippetZeroShotClassification = (model: ModelDataMinimal, accessTok
216
217
  }
217
218
 
218
219
  query({"inputs": ${getModelInputSnippet(
219
- model
220
- )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
220
+ model
221
+ )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
221
222
  console.log(JSON.stringify(response));
222
223
  });`,
223
224
  },
@@ -248,9 +249,9 @@ const image = await client.textToImage({
248
249
  },
249
250
  ...(provider === "hf-inference"
250
251
  ? [
251
- {
252
- client: "fetch",
253
- content: `async function query(data) {
252
+ {
253
+ client: "fetch",
254
+ content: `async function query(data) {
254
255
  const response = await fetch(
255
256
  "https://router.huggingface.co/hf-inference/models/${model.id}",
256
257
  {
@@ -268,8 +269,8 @@ const image = await client.textToImage({
268
269
  query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
269
270
  // Use image
270
271
  });`,
271
- },
272
- ]
272
+ },
273
+ ]
273
274
  : []),
274
275
  ];
275
276
  };
@@ -398,6 +399,7 @@ export const jsSnippets: Partial<
398
399
  model: ModelDataMinimal,
399
400
  accessToken: string,
400
401
  provider: SnippetInferenceProvider,
402
+ providerModelId?: string,
401
403
  opts?: Record<string, unknown>
402
404
  ) => InferenceSnippet[]
403
405
  >
@@ -432,9 +434,10 @@ export function getJsInferenceSnippet(
432
434
  model: ModelDataMinimal,
433
435
  accessToken: string,
434
436
  provider: SnippetInferenceProvider,
437
+ providerModelId?: string,
435
438
  opts?: Record<string, unknown>
436
439
  ): InferenceSnippet[] {
437
440
  return model.pipeline_tag && model.pipeline_tag in jsSnippets
438
- ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
441
+ ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
439
442
  : [];
440
443
  }
@@ -1,5 +1,4 @@
1
1
  import {
2
- HF_HUB_INFERENCE_PROXY_TEMPLATE,
3
2
  openAIbaseUrl,
4
3
  type SnippetInferenceProvider,
5
4
  } from "../inference-providers.js";
@@ -52,6 +51,7 @@ export const snippetConversational = (
52
51
  model: ModelDataMinimal,
53
52
  accessToken: string,
54
53
  provider: SnippetInferenceProvider,
54
+ providerModelId?: string,
55
55
  opts?: {
56
56
  streaming?: boolean;
57
57
  messages?: ChatCompletionInputMessage[];
@@ -107,7 +107,7 @@ client = OpenAI(
107
107
  messages = ${messagesStr}
108
108
 
109
109
  stream = client.chat.completions.create(
110
- model="${model.id}",
110
+ model="${providerModelId ?? model.id}",
111
111
  messages=messages,
112
112
  ${configStr}
113
113
  stream=True
@@ -147,7 +147,7 @@ client = OpenAI(
147
147
  messages = ${messagesStr}
148
148
 
149
149
  completion = client.chat.completions.create(
150
- model="${model.id}",
150
+ model="${providerModelId ?? model.id}",
151
151
  messages=messages,
152
152
  ${configStr}
153
153
  )
@@ -206,9 +206,9 @@ export const snippetBasic = (
206
206
  return [
207
207
  ...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
208
208
  ? [
209
- {
210
- client: "huggingface_hub",
211
- content: `\
209
+ {
210
+ client: "huggingface_hub",
211
+ content: `\
212
212
  ${snippetImportInferenceClient(accessToken, provider)}
213
213
 
214
214
  result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
@@ -219,8 +219,8 @@ result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
219
219
 
220
220
  print(result)
221
221
  `,
222
- },
223
- ]
222
+ },
223
+ ]
224
224
  : []),
225
225
  {
226
226
  client: "requests",
@@ -255,7 +255,8 @@ output = query(${getModelInputSnippet(model)})`,
255
255
  export const snippetTextToImage = (
256
256
  model: ModelDataMinimal,
257
257
  accessToken: string,
258
- provider: SnippetInferenceProvider
258
+ provider: SnippetInferenceProvider,
259
+ providerModelId?: string,
259
260
  ): InferenceSnippet[] => {
260
261
  return [
261
262
  {
@@ -271,28 +272,27 @@ image = client.text_to_image(
271
272
  },
272
273
  ...(provider === "fal-ai"
273
274
  ? [
274
- {
275
- client: "fal-client",
276
- content: `\
275
+ {
276
+ client: "fal-client",
277
+ content: `\
277
278
  import fal_client
278
279
 
279
280
  result = fal_client.subscribe(
280
- # replace with correct id from fal.ai
281
- "fal-ai/${model.id}",
281
+ "${providerModelId ?? model.id}",
282
282
  arguments={
283
283
  "prompt": ${getModelInputSnippet(model)},
284
284
  },
285
285
  )
286
286
  print(result)
287
287
  `,
288
- },
289
- ]
288
+ },
289
+ ]
290
290
  : []),
291
291
  ...(provider === "hf-inference"
292
292
  ? [
293
- {
294
- client: "requests",
295
- content: `\
293
+ {
294
+ client: "requests",
295
+ content: `\
296
296
  def query(payload):
297
297
  response = requests.post(API_URL, headers=headers, json=payload)
298
298
  return response.content
@@ -305,8 +305,8 @@ image_bytes = query({
305
305
  import io
306
306
  from PIL import Image
307
307
  image = Image.open(io.BytesIO(image_bytes))`,
308
- },
309
- ]
308
+ },
309
+ ]
310
310
  : []),
311
311
  ];
312
312
  };
@@ -394,6 +394,7 @@ export const pythonSnippets: Partial<
394
394
  model: ModelDataMinimal,
395
395
  accessToken: string,
396
396
  provider: SnippetInferenceProvider,
397
+ providerModelId?: string,
397
398
  opts?: Record<string, unknown>
398
399
  ) => InferenceSnippet[]
399
400
  >
@@ -432,15 +433,16 @@ export function getPythonInferenceSnippet(
432
433
  model: ModelDataMinimal,
433
434
  accessToken: string,
434
435
  provider: SnippetInferenceProvider,
436
+ providerModelId?: string,
435
437
  opts?: Record<string, unknown>
436
438
  ): InferenceSnippet[] {
437
439
  if (model.tags.includes("conversational")) {
438
440
  // Conversational model detected, so we display a code snippet that features the Messages API
439
- return snippetConversational(model, accessToken, provider, opts);
441
+ return snippetConversational(model, accessToken, provider, providerModelId, opts);
440
442
  } else {
441
443
  const snippets =
442
444
  model.pipeline_tag && model.pipeline_tag in pythonSnippets
443
- ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider) ?? []
445
+ ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? []
444
446
  : [];
445
447
 
446
448
  return snippets.map((snippet) => {