@huggingface/tasks 0.12.29 → 0.12.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -6358,6 +6358,7 @@ var modelInputSnippets = {
6358
6358
  "tabular-classification": inputsTabularPrediction,
6359
6359
  "text-classification": inputsTextClassification,
6360
6360
  "text-generation": inputsTextGeneration,
6361
+ "image-text-to-text": inputsTextGeneration,
6361
6362
  "text-to-image": inputsTextToImage,
6362
6363
  "text-to-speech": inputsTextToSpeech,
6363
6364
  "text-to-audio": inputsTextToAudio,
@@ -6402,34 +6403,22 @@ __export(curl_exports, {
6402
6403
 
6403
6404
  // src/snippets/common.ts
6404
6405
  function stringifyMessages(messages, opts) {
6405
- const keyRole = opts.attributeKeyQuotes ? `"role"` : "role";
6406
- const keyContent = opts.attributeKeyQuotes ? `"content"` : "content";
6407
- const messagesStringified = messages.map(({ role, content }) => {
6408
- if (typeof content === "string") {
6409
- content = JSON.stringify(content).slice(1, -1);
6410
- if (opts.customContentEscaper) {
6411
- content = opts.customContentEscaper(content);
6412
- }
6413
- return `{ ${keyRole}: "${role}", ${keyContent}: "${content}" }`;
6414
- } else {
6415
- 2;
6416
- content = content.map(({ image_url, text, type }) => ({
6417
- type,
6418
- image_url,
6419
- ...text ? { text: JSON.stringify(text).slice(1, -1) } : void 0
6420
- }));
6421
- content = JSON.stringify(content).slice(1, -1);
6422
- if (opts.customContentEscaper) {
6423
- content = opts.customContentEscaper(content);
6424
- }
6425
- return `{ ${keyRole}: "${role}", ${keyContent}: [${content}] }`;
6426
- }
6427
- });
6428
- return opts.start + messagesStringified.join(opts.sep) + opts.end;
6406
+ let messagesStr = JSON.stringify(messages, null, " ");
6407
+ if (opts?.indent) {
6408
+ messagesStr = messagesStr.replaceAll("\n", `
6409
+ ${opts.indent}`);
6410
+ }
6411
+ if (!opts?.attributeKeyQuotes) {
6412
+ messagesStr = messagesStr.replace(/"([^"]+)":/g, "$1:");
6413
+ }
6414
+ if (opts?.customContentEscaper) {
6415
+ messagesStr = opts.customContentEscaper(messagesStr);
6416
+ }
6417
+ return messagesStr;
6429
6418
  }
6430
6419
  function stringifyGenerationConfig(config, opts) {
6431
6420
  const quote = opts.attributeKeyQuotes ? `"` : "";
6432
- return opts.start + Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(opts.sep) + opts.end;
6421
+ return Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(`,${opts.indent}`);
6433
6422
  }
6434
6423
 
6435
6424
  // src/snippets/curl.ts
@@ -6457,18 +6446,12 @@ var snippetTextGeneration = (model, accessToken, opts) => {
6457
6446
  --data '{
6458
6447
  "model": "${model.id}",
6459
6448
  "messages": ${stringifyMessages(messages, {
6460
- sep: ",\n ",
6461
- start: `[
6462
- `,
6463
- end: `
6464
- ]`,
6449
+ indent: " ",
6465
6450
  attributeKeyQuotes: true,
6466
6451
  customContentEscaper: (str) => str.replace(/'/g, "'\\''")
6467
6452
  })},
6468
6453
  ${stringifyGenerationConfig(config, {
6469
- sep: ",\n ",
6470
- start: "",
6471
- end: "",
6454
+ indent: "\n ",
6472
6455
  attributeKeyQuotes: true,
6473
6456
  attributeValueConnector: ": "
6474
6457
  })},
@@ -6545,23 +6528,14 @@ var snippetConversational = (model, accessToken, opts) => {
6545
6528
  const streaming = opts?.streaming ?? true;
6546
6529
  const exampleMessages = getModelInputSnippet(model);
6547
6530
  const messages = opts?.messages ?? exampleMessages;
6548
- const messagesStr = stringifyMessages(messages, {
6549
- sep: ",\n ",
6550
- start: `[
6551
- `,
6552
- end: `
6553
- ]`,
6554
- attributeKeyQuotes: true
6555
- });
6531
+ const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
6556
6532
  const config = {
6557
6533
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6558
6534
  max_tokens: opts?.max_tokens ?? 500,
6559
6535
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6560
6536
  };
6561
6537
  const configStr = stringifyGenerationConfig(config, {
6562
- sep: ",\n ",
6563
- start: "",
6564
- end: "",
6538
+ indent: "\n ",
6565
6539
  attributeValueConnector: "="
6566
6540
  });
6567
6541
  if (streaming) {
@@ -6582,7 +6556,7 @@ stream = client.chat.completions.create(
6582
6556
  )
6583
6557
 
6584
6558
  for chunk in stream:
6585
- print(chunk.choices[0].delta.content)`
6559
+ print(chunk.choices[0].delta.content, end="")`
6586
6560
  },
6587
6561
  {
6588
6562
  client: "openai",
@@ -6603,7 +6577,7 @@ stream = client.chat.completions.create(
6603
6577
  )
6604
6578
 
6605
6579
  for chunk in stream:
6606
- print(chunk.choices[0].delta.content)`
6580
+ print(chunk.choices[0].delta.content, end="")`
6607
6581
  }
6608
6582
  ];
6609
6583
  } else {
@@ -6843,16 +6817,14 @@ var snippetTextGeneration2 = (model, accessToken, opts) => {
6843
6817
  const streaming = opts?.streaming ?? true;
6844
6818
  const exampleMessages = getModelInputSnippet(model);
6845
6819
  const messages = opts?.messages ?? exampleMessages;
6846
- const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6820
+ const messagesStr = stringifyMessages(messages, { indent: " " });
6847
6821
  const config = {
6848
6822
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6849
6823
  max_tokens: opts?.max_tokens ?? 500,
6850
6824
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6851
6825
  };
6852
6826
  const configStr = stringifyGenerationConfig(config, {
6853
- sep: ",\n ",
6854
- start: "",
6855
- end: "",
6827
+ indent: "\n ",
6856
6828
  attributeValueConnector: ": "
6857
6829
  });
6858
6830
  if (streaming) {
package/dist/index.js CHANGED
@@ -6316,6 +6316,7 @@ var modelInputSnippets = {
6316
6316
  "tabular-classification": inputsTabularPrediction,
6317
6317
  "text-classification": inputsTextClassification,
6318
6318
  "text-generation": inputsTextGeneration,
6319
+ "image-text-to-text": inputsTextGeneration,
6319
6320
  "text-to-image": inputsTextToImage,
6320
6321
  "text-to-speech": inputsTextToSpeech,
6321
6322
  "text-to-audio": inputsTextToAudio,
@@ -6360,34 +6361,22 @@ __export(curl_exports, {
6360
6361
 
6361
6362
  // src/snippets/common.ts
6362
6363
  function stringifyMessages(messages, opts) {
6363
- const keyRole = opts.attributeKeyQuotes ? `"role"` : "role";
6364
- const keyContent = opts.attributeKeyQuotes ? `"content"` : "content";
6365
- const messagesStringified = messages.map(({ role, content }) => {
6366
- if (typeof content === "string") {
6367
- content = JSON.stringify(content).slice(1, -1);
6368
- if (opts.customContentEscaper) {
6369
- content = opts.customContentEscaper(content);
6370
- }
6371
- return `{ ${keyRole}: "${role}", ${keyContent}: "${content}" }`;
6372
- } else {
6373
- 2;
6374
- content = content.map(({ image_url, text, type }) => ({
6375
- type,
6376
- image_url,
6377
- ...text ? { text: JSON.stringify(text).slice(1, -1) } : void 0
6378
- }));
6379
- content = JSON.stringify(content).slice(1, -1);
6380
- if (opts.customContentEscaper) {
6381
- content = opts.customContentEscaper(content);
6382
- }
6383
- return `{ ${keyRole}: "${role}", ${keyContent}: [${content}] }`;
6384
- }
6385
- });
6386
- return opts.start + messagesStringified.join(opts.sep) + opts.end;
6364
+ let messagesStr = JSON.stringify(messages, null, " ");
6365
+ if (opts?.indent) {
6366
+ messagesStr = messagesStr.replaceAll("\n", `
6367
+ ${opts.indent}`);
6368
+ }
6369
+ if (!opts?.attributeKeyQuotes) {
6370
+ messagesStr = messagesStr.replace(/"([^"]+)":/g, "$1:");
6371
+ }
6372
+ if (opts?.customContentEscaper) {
6373
+ messagesStr = opts.customContentEscaper(messagesStr);
6374
+ }
6375
+ return messagesStr;
6387
6376
  }
6388
6377
  function stringifyGenerationConfig(config, opts) {
6389
6378
  const quote = opts.attributeKeyQuotes ? `"` : "";
6390
- return opts.start + Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(opts.sep) + opts.end;
6379
+ return Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(`,${opts.indent}`);
6391
6380
  }
6392
6381
 
6393
6382
  // src/snippets/curl.ts
@@ -6415,18 +6404,12 @@ var snippetTextGeneration = (model, accessToken, opts) => {
6415
6404
  --data '{
6416
6405
  "model": "${model.id}",
6417
6406
  "messages": ${stringifyMessages(messages, {
6418
- sep: ",\n ",
6419
- start: `[
6420
- `,
6421
- end: `
6422
- ]`,
6407
+ indent: " ",
6423
6408
  attributeKeyQuotes: true,
6424
6409
  customContentEscaper: (str) => str.replace(/'/g, "'\\''")
6425
6410
  })},
6426
6411
  ${stringifyGenerationConfig(config, {
6427
- sep: ",\n ",
6428
- start: "",
6429
- end: "",
6412
+ indent: "\n ",
6430
6413
  attributeKeyQuotes: true,
6431
6414
  attributeValueConnector: ": "
6432
6415
  })},
@@ -6503,23 +6486,14 @@ var snippetConversational = (model, accessToken, opts) => {
6503
6486
  const streaming = opts?.streaming ?? true;
6504
6487
  const exampleMessages = getModelInputSnippet(model);
6505
6488
  const messages = opts?.messages ?? exampleMessages;
6506
- const messagesStr = stringifyMessages(messages, {
6507
- sep: ",\n ",
6508
- start: `[
6509
- `,
6510
- end: `
6511
- ]`,
6512
- attributeKeyQuotes: true
6513
- });
6489
+ const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
6514
6490
  const config = {
6515
6491
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6516
6492
  max_tokens: opts?.max_tokens ?? 500,
6517
6493
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6518
6494
  };
6519
6495
  const configStr = stringifyGenerationConfig(config, {
6520
- sep: ",\n ",
6521
- start: "",
6522
- end: "",
6496
+ indent: "\n ",
6523
6497
  attributeValueConnector: "="
6524
6498
  });
6525
6499
  if (streaming) {
@@ -6540,7 +6514,7 @@ stream = client.chat.completions.create(
6540
6514
  )
6541
6515
 
6542
6516
  for chunk in stream:
6543
- print(chunk.choices[0].delta.content)`
6517
+ print(chunk.choices[0].delta.content, end="")`
6544
6518
  },
6545
6519
  {
6546
6520
  client: "openai",
@@ -6561,7 +6535,7 @@ stream = client.chat.completions.create(
6561
6535
  )
6562
6536
 
6563
6537
  for chunk in stream:
6564
- print(chunk.choices[0].delta.content)`
6538
+ print(chunk.choices[0].delta.content, end="")`
6565
6539
  }
6566
6540
  ];
6567
6541
  } else {
@@ -6801,16 +6775,14 @@ var snippetTextGeneration2 = (model, accessToken, opts) => {
6801
6775
  const streaming = opts?.streaming ?? true;
6802
6776
  const exampleMessages = getModelInputSnippet(model);
6803
6777
  const messages = opts?.messages ?? exampleMessages;
6804
- const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6778
+ const messagesStr = stringifyMessages(messages, { indent: " " });
6805
6779
  const config = {
6806
6780
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6807
6781
  max_tokens: opts?.max_tokens ?? 500,
6808
6782
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6809
6783
  };
6810
6784
  const configStr = stringifyGenerationConfig(config, {
6811
- sep: ",\n ",
6812
- start: "",
6813
- end: "",
6785
+ indent: "\n ",
6814
6786
  attributeValueConnector: ": "
6815
6787
  });
6816
6788
  if (streaming) {
@@ -1,20 +1,14 @@
1
1
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks";
2
- export interface StringifyMessagesOptions {
3
- sep: string;
4
- start: string;
5
- end: string;
2
+ export declare function stringifyMessages(messages: ChatCompletionInputMessage[], opts?: {
3
+ indent?: string;
6
4
  attributeKeyQuotes?: boolean;
7
5
  customContentEscaper?: (str: string) => string;
8
- }
9
- export declare function stringifyMessages(messages: ChatCompletionInputMessage[], opts: StringifyMessagesOptions): string;
6
+ }): string;
10
7
  type PartialGenerationParameters = Partial<Pick<GenerationParameters, "temperature" | "max_tokens" | "top_p">>;
11
- export interface StringifyGenerationConfigOptions {
12
- sep: string;
13
- start: string;
14
- end: string;
8
+ export declare function stringifyGenerationConfig(config: PartialGenerationParameters, opts: {
9
+ indent: string;
15
10
  attributeValueConnector: string;
16
11
  attributeKeyQuotes?: boolean;
17
- }
18
- export declare function stringifyGenerationConfig(config: PartialGenerationParameters, opts: StringifyGenerationConfigOptions): string;
12
+ }): string;
19
13
  export {};
20
14
  //# sourceMappingURL=common.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"common.d.ts","sourceRoot":"","sources":["../../../src/snippets/common.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,UAAU,CAAC;AAEjF,MAAM,WAAW,wBAAwB;IACxC,GAAG,EAAE,MAAM,CAAC;IACZ,KAAK,EAAE,MAAM,CAAC;IACd,GAAG,EAAE,MAAM,CAAC;IACZ,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,oBAAoB,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,MAAM,CAAC;CAC/C;AAED,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,0BAA0B,EAAE,EAAE,IAAI,EAAE,wBAAwB,GAAG,MAAM,CA2BhH;AAED,KAAK,2BAA2B,GAAG,OAAO,CAAC,IAAI,CAAC,oBAAoB,EAAE,aAAa,GAAG,YAAY,GAAG,OAAO,CAAC,CAAC,CAAC;AAE/G,MAAM,WAAW,gCAAgC;IAChD,GAAG,EAAE,MAAM,CAAC;IACZ,KAAK,EAAE,MAAM,CAAC;IACd,GAAG,EAAE,MAAM,CAAC;IACZ,uBAAuB,EAAE,MAAM,CAAC;IAChC,kBAAkB,CAAC,EAAE,OAAO,CAAC;CAC7B;AAED,wBAAgB,yBAAyB,CACxC,MAAM,EAAE,2BAA2B,EACnC,IAAI,EAAE,gCAAgC,GACpC,MAAM,CAUR"}
1
+ {"version":3,"file":"common.d.ts","sourceRoot":"","sources":["../../../src/snippets/common.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,UAAU,CAAC;AAEjF,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,0BAA0B,EAAE,EACtC,IAAI,CAAC,EAAE;IACN,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,oBAAoB,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,MAAM,CAAC;CAC/C,GACC,MAAM,CAYR;AAED,KAAK,2BAA2B,GAAG,OAAO,CAAC,IAAI,CAAC,oBAAoB,EAAE,aAAa,GAAG,YAAY,GAAG,OAAO,CAAC,CAAC,CAAC;AAE/G,wBAAgB,yBAAyB,CACxC,MAAM,EAAE,2BAA2B,EACnC,IAAI,EAAE;IACL,MAAM,EAAE,MAAM,CAAC;IACf,uBAAuB,EAAE,MAAM,CAAC;IAChC,kBAAkB,CAAC,EAAE,OAAO,CAAC;CAC7B,GACC,MAAM,CAMR"}
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAsCF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,gBAAgB,CAItG;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAkCF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,gBAAgB,CAItG;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=curl.spec.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"curl.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.spec.ts"],"names":[],"mappings":""}
@@ -1 +1 @@
1
- {"version":3,"file":"inputs.d.ts","sourceRoot":"","sources":["../../../src/snippets/inputs.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,0BAA0B,EAAE,MAAM,UAAU,CAAC;AAC3D,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AA4IhD,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,MAAM,UAAQ,EACd,QAAQ,UAAQ,GACd,MAAM,GAAG,0BAA0B,EAAE,CAmBvC"}
1
+ {"version":3,"file":"inputs.d.ts","sourceRoot":"","sources":["../../../src/snippets/inputs.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,0BAA0B,EAAE,MAAM,UAAU,CAAC;AAC3D,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AA6IhD,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,MAAM,UAAQ,EACd,QAAQ,UAAQ,GACd,MAAM,GAAG,0BAA0B,EAAE,CAmBvC"}
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA6GrC,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,GACjB,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA2GrC,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,GACjB,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=js.spec.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"js.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.spec.ts"],"names":[],"mappings":""}
@@ -1 +1 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAwGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAStE,CAAC;AAEH,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAe3E,CAAC;AAEH,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,gBAQrD,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAQpD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAW3D,CAAC;AAEH,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAOvD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBA+B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAWzE,CAAC;AAEH,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,GAAG,gBAAgB,EAAE,CAwBvC;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAiGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAStE,CAAC;AAEH,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAe3E,CAAC;AAEH,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,gBAQrD,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAQpD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAW3D,CAAC;AAEH,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAOvD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBA+B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAWzE,CAAC;AAEH,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,GAAG,gBAAgB,EAAE,CAwBvC;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=python.spec.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"python.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.spec.ts"],"names":[],"mappings":""}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.12.29",
4
+ "version": "0.12.30",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
@@ -46,6 +46,7 @@
46
46
  "watch:types": "tsc --emitDeclarationOnly --declaration --watch",
47
47
  "watch": "npm-run-all --parallel watch:export watch:types",
48
48
  "check": "tsc",
49
+ "test": "vitest run",
49
50
  "inference-codegen": "tsx scripts/inference-codegen.ts && prettier --write src/tasks/*/inference.ts",
50
51
  "inference-tgi-import": "tsx scripts/inference-tgi-import.ts && prettier --write src/tasks/text-generation/spec/*.json && prettier --write src/tasks/chat-completion/spec/*.json",
51
52
  "inference-tei-import": "tsx scripts/inference-tei-import.ts && prettier --write src/tasks/feature-extraction/spec/*.json"
@@ -1,63 +1,39 @@
1
1
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks";
2
2
 
3
- export interface StringifyMessagesOptions {
4
- sep: string;
5
- start: string;
6
- end: string;
7
- attributeKeyQuotes?: boolean;
8
- customContentEscaper?: (str: string) => string;
9
- }
10
-
11
- export function stringifyMessages(messages: ChatCompletionInputMessage[], opts: StringifyMessagesOptions): string {
12
- const keyRole = opts.attributeKeyQuotes ? `"role"` : "role";
13
- const keyContent = opts.attributeKeyQuotes ? `"content"` : "content";
14
-
15
- const messagesStringified = messages.map(({ role, content }) => {
16
- if (typeof content === "string") {
17
- content = JSON.stringify(content).slice(1, -1);
18
- if (opts.customContentEscaper) {
19
- content = opts.customContentEscaper(content);
20
- }
21
- return `{ ${keyRole}: "${role}", ${keyContent}: "${content}" }`;
22
- } else {
23
- 2;
24
- content = content.map(({ image_url, text, type }) => ({
25
- type,
26
- image_url,
27
- ...(text ? { text: JSON.stringify(text).slice(1, -1) } : undefined),
28
- }));
29
- content = JSON.stringify(content).slice(1, -1);
30
- if (opts.customContentEscaper) {
31
- content = opts.customContentEscaper(content);
32
- }
33
- return `{ ${keyRole}: "${role}", ${keyContent}: [${content}] }`;
34
- }
35
- });
36
-
37
- return opts.start + messagesStringified.join(opts.sep) + opts.end;
3
+ export function stringifyMessages(
4
+ messages: ChatCompletionInputMessage[],
5
+ opts?: {
6
+ indent?: string;
7
+ attributeKeyQuotes?: boolean;
8
+ customContentEscaper?: (str: string) => string;
9
+ }
10
+ ): string {
11
+ let messagesStr = JSON.stringify(messages, null, "\t");
12
+ if (opts?.indent) {
13
+ messagesStr = messagesStr.replaceAll("\n", `\n${opts.indent}`);
14
+ }
15
+ if (!opts?.attributeKeyQuotes) {
16
+ messagesStr = messagesStr.replace(/"([^"]+)":/g, "$1:");
17
+ }
18
+ if (opts?.customContentEscaper) {
19
+ messagesStr = opts.customContentEscaper(messagesStr);
20
+ }
21
+ return messagesStr;
38
22
  }
39
23
 
40
24
  type PartialGenerationParameters = Partial<Pick<GenerationParameters, "temperature" | "max_tokens" | "top_p">>;
41
25
 
42
- export interface StringifyGenerationConfigOptions {
43
- sep: string;
44
- start: string;
45
- end: string;
46
- attributeValueConnector: string;
47
- attributeKeyQuotes?: boolean;
48
- }
49
-
50
26
  export function stringifyGenerationConfig(
51
27
  config: PartialGenerationParameters,
52
- opts: StringifyGenerationConfigOptions
28
+ opts: {
29
+ indent: string;
30
+ attributeValueConnector: string;
31
+ attributeKeyQuotes?: boolean;
32
+ }
53
33
  ): string {
54
34
  const quote = opts.attributeKeyQuotes ? `"` : "";
55
35
 
56
- return (
57
- opts.start +
58
- Object.entries(config)
59
- .map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`)
60
- .join(opts.sep) +
61
- opts.end
62
- );
36
+ return Object.entries(config)
37
+ .map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`)
38
+ .join(`,${opts.indent}`);
63
39
  }
@@ -0,0 +1,68 @@
1
+ import type { ModelDataMinimal } from "./types";
2
+ import { describe, expect, it } from "vitest";
3
+ import { snippetTextGeneration } from "./curl";
4
+
5
+ describe("inference API snippets", () => {
6
+ it("conversational llm", async () => {
7
+ const model: ModelDataMinimal = {
8
+ id: "meta-llama/Llama-3.1-8B-Instruct",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = snippetTextGeneration(model, "api_token");
14
+
15
+ expect(snippet.content)
16
+ .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
17
+ -H "Authorization: Bearer api_token" \\
18
+ -H 'Content-Type: application/json' \\
19
+ --data '{
20
+ "model": "meta-llama/Llama-3.1-8B-Instruct",
21
+ "messages": [
22
+ {
23
+ "role": "user",
24
+ "content": "What is the capital of France?"
25
+ }
26
+ ],
27
+ "max_tokens": 500,
28
+ "stream": true
29
+ }'`);
30
+ });
31
+
32
+ it("conversational vlm", async () => {
33
+ const model: ModelDataMinimal = {
34
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
35
+ pipeline_tag: "image-text-to-text",
36
+ tags: ["conversational"],
37
+ inference: "",
38
+ };
39
+ const snippet = snippetTextGeneration(model, "api_token");
40
+
41
+ expect(snippet.content)
42
+ .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions' \\
43
+ -H "Authorization: Bearer api_token" \\
44
+ -H 'Content-Type: application/json' \\
45
+ --data '{
46
+ "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
47
+ "messages": [
48
+ {
49
+ "role": "user",
50
+ "content": [
51
+ {
52
+ "type": "text",
53
+ "text": "Describe this image in one sentence."
54
+ },
55
+ {
56
+ "type": "image_url",
57
+ "image_url": {
58
+ "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
59
+ }
60
+ }
61
+ ]
62
+ }
63
+ ],
64
+ "max_tokens": 500,
65
+ "stream": true
66
+ }'`);
67
+ });
68
+ });
@@ -41,16 +41,12 @@ export const snippetTextGeneration = (
41
41
  --data '{
42
42
  "model": "${model.id}",
43
43
  "messages": ${stringifyMessages(messages, {
44
- sep: ",\n\t\t",
45
- start: `[\n\t\t`,
46
- end: `\n\t]`,
44
+ indent: "\t",
47
45
  attributeKeyQuotes: true,
48
46
  customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
49
47
  })},
50
48
  ${stringifyGenerationConfig(config, {
51
- sep: ",\n ",
52
- start: "",
53
- end: "",
49
+ indent: "\n ",
54
50
  attributeKeyQuotes: true,
55
51
  attributeValueConnector: ": ",
56
52
  })},
@@ -128,6 +128,7 @@ const modelInputSnippets: {
128
128
  "tabular-classification": inputsTabularPrediction,
129
129
  "text-classification": inputsTextClassification,
130
130
  "text-generation": inputsTextGeneration,
131
+ "image-text-to-text": inputsTextGeneration,
131
132
  "text-to-image": inputsTextToImage,
132
133
  "text-to-speech": inputsTextToSpeech,
133
134
  "text-to-audio": inputsTextToAudio,
@@ -0,0 +1,86 @@
1
+ import type { InferenceSnippet, ModelDataMinimal } from "./types";
2
+ import { describe, expect, it } from "vitest";
3
+ import { snippetTextGeneration } from "./js";
4
+
5
+ describe("inference API snippets", () => {
6
+ it("conversational llm", async () => {
7
+ const model: ModelDataMinimal = {
8
+ id: "meta-llama/Llama-3.1-8B-Instruct",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
14
+
15
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
16
+
17
+ const client = new HfInference("api_token")
18
+
19
+ let out = "";
20
+
21
+ const stream = client.chatCompletionStream({
22
+ model: "meta-llama/Llama-3.1-8B-Instruct",
23
+ messages: [
24
+ {
25
+ role: "user",
26
+ content: "What is the capital of France?"
27
+ }
28
+ ],
29
+ max_tokens: 500
30
+ });
31
+
32
+ for await (const chunk of stream) {
33
+ if (chunk.choices && chunk.choices.length > 0) {
34
+ const newContent = chunk.choices[0].delta.content;
35
+ out += newContent;
36
+ console.log(newContent);
37
+ }
38
+ }`);
39
+ });
40
+
41
+ it("conversational vlm", async () => {
42
+ const model: ModelDataMinimal = {
43
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
44
+ pipeline_tag: "image-text-to-text",
45
+ tags: ["conversational"],
46
+ inference: "",
47
+ };
48
+ const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
49
+
50
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
51
+
52
+ const client = new HfInference("api_token")
53
+
54
+ let out = "";
55
+
56
+ const stream = client.chatCompletionStream({
57
+ model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
58
+ messages: [
59
+ {
60
+ role: "user",
61
+ content: [
62
+ {
63
+ type: "text",
64
+ text: "Describe this image in one sentence."
65
+ },
66
+ {
67
+ type: "image_url",
68
+ image_url: {
69
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
70
+ }
71
+ }
72
+ ]
73
+ }
74
+ ],
75
+ max_tokens: 500
76
+ });
77
+
78
+ for await (const chunk of stream) {
79
+ if (chunk.choices && chunk.choices.length > 0) {
80
+ const newContent = chunk.choices[0].delta.content;
81
+ out += newContent;
82
+ console.log(newContent);
83
+ }
84
+ }`);
85
+ });
86
+ });
@@ -42,7 +42,7 @@ export const snippetTextGeneration = (
42
42
  const streaming = opts?.streaming ?? true;
43
43
  const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
44
44
  const messages = opts?.messages ?? exampleMessages;
45
- const messagesStr = stringifyMessages(messages, { sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" });
45
+ const messagesStr = stringifyMessages(messages, { indent: "\t" });
46
46
 
47
47
  const config = {
48
48
  ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
@@ -50,9 +50,7 @@ export const snippetTextGeneration = (
50
50
  ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
51
51
  };
52
52
  const configStr = stringifyGenerationConfig(config, {
53
- sep: ",\n\t",
54
- start: "",
55
- end: "",
53
+ indent: "\n\t",
56
54
  attributeValueConnector: ": ",
57
55
  });
58
56
 
@@ -0,0 +1,78 @@
1
+ import type { ModelDataMinimal } from "./types";
2
+ import { describe, expect, it } from "vitest";
3
+ import { snippetConversational } from "./python";
4
+
5
+ describe("inference API snippets", () => {
6
+ it("conversational llm", async () => {
7
+ const model: ModelDataMinimal = {
8
+ id: "meta-llama/Llama-3.1-8B-Instruct",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = snippetConversational(model, "api_token");
14
+
15
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
16
+
17
+ client = InferenceClient(api_key="api_token")
18
+
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": "What is the capital of France?"
23
+ }
24
+ ]
25
+
26
+ stream = client.chat.completions.create(
27
+ model="meta-llama/Llama-3.1-8B-Instruct",
28
+ messages=messages,
29
+ max_tokens=500,
30
+ stream=True
31
+ )
32
+
33
+ for chunk in stream:
34
+ print(chunk.choices[0].delta.content, end="")`);
35
+ });
36
+
37
+ it("conversational vlm", async () => {
38
+ const model: ModelDataMinimal = {
39
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
40
+ pipeline_tag: "image-text-to-text",
41
+ tags: ["conversational"],
42
+ inference: "",
43
+ };
44
+ const snippet = snippetConversational(model, "api_token");
45
+
46
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
47
+
48
+ client = InferenceClient(api_key="api_token")
49
+
50
+ messages = [
51
+ {
52
+ "role": "user",
53
+ "content": [
54
+ {
55
+ "type": "text",
56
+ "text": "Describe this image in one sentence."
57
+ },
58
+ {
59
+ "type": "image_url",
60
+ "image_url": {
61
+ "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
62
+ }
63
+ }
64
+ ]
65
+ }
66
+ ]
67
+
68
+ stream = client.chat.completions.create(
69
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
70
+ messages=messages,
71
+ max_tokens=500,
72
+ stream=True
73
+ )
74
+
75
+ for chunk in stream:
76
+ print(chunk.choices[0].delta.content, end="")`);
77
+ });
78
+ });
@@ -18,12 +18,7 @@ export const snippetConversational = (
18
18
  const streaming = opts?.streaming ?? true;
19
19
  const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
20
20
  const messages = opts?.messages ?? exampleMessages;
21
- const messagesStr = stringifyMessages(messages, {
22
- sep: ",\n\t",
23
- start: `[\n\t`,
24
- end: `\n]`,
25
- attributeKeyQuotes: true,
26
- });
21
+ const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
27
22
 
28
23
  const config = {
29
24
  ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
@@ -31,9 +26,7 @@ export const snippetConversational = (
31
26
  ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
32
27
  };
33
28
  const configStr = stringifyGenerationConfig(config, {
34
- sep: ",\n\t",
35
- start: "",
36
- end: "",
29
+ indent: "\n\t",
37
30
  attributeValueConnector: "=",
38
31
  });
39
32
 
@@ -55,7 +48,7 @@ stream = client.chat.completions.create(
55
48
  )
56
49
 
57
50
  for chunk in stream:
58
- print(chunk.choices[0].delta.content)`,
51
+ print(chunk.choices[0].delta.content, end="")`,
59
52
  },
60
53
  {
61
54
  client: "openai",
@@ -76,7 +69,7 @@ stream = client.chat.completions.create(
76
69
  )
77
70
 
78
71
  for chunk in stream:
79
- print(chunk.choices[0].delta.content)`,
72
+ print(chunk.choices[0].delta.content, end="")`,
80
73
  },
81
74
  ];
82
75
  } else {