@huggingface/inference 3.5.2 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/dist/browser/index.cjs +1652 -0
  2. package/dist/browser/index.js +1652 -0
  3. package/dist/index.cjs +277 -971
  4. package/dist/index.js +268 -982
  5. package/dist/src/index.d.ts.map +1 -1
  6. package/dist/src/lib/makeRequestOptions.d.ts +16 -1
  7. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  8. package/dist/src/providers/novita.d.ts.map +1 -1
  9. package/dist/src/snippets/getInferenceSnippets.d.ts +4 -0
  10. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -0
  11. package/dist/src/snippets/index.d.ts +1 -4
  12. package/dist/src/snippets/index.d.ts.map +1 -1
  13. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  14. package/package.json +15 -6
  15. package/src/index.ts +1 -1
  16. package/src/lib/makeRequestOptions.ts +37 -10
  17. package/src/providers/fireworks-ai.ts +1 -1
  18. package/src/providers/hf-inference.ts +1 -1
  19. package/src/providers/nebius.ts +3 -3
  20. package/src/providers/novita.ts +7 -6
  21. package/src/providers/sambanova.ts +1 -1
  22. package/src/providers/together.ts +3 -3
  23. package/src/snippets/getInferenceSnippets.ts +398 -0
  24. package/src/snippets/index.ts +1 -5
  25. package/src/snippets/templates/js/fetch/basic.jinja +19 -0
  26. package/src/snippets/templates/js/fetch/basicAudio.jinja +19 -0
  27. package/src/snippets/templates/js/fetch/basicImage.jinja +19 -0
  28. package/src/snippets/templates/js/fetch/textToAudio.jinja +41 -0
  29. package/src/snippets/templates/js/fetch/textToImage.jinja +19 -0
  30. package/src/snippets/templates/js/fetch/zeroShotClassification.jinja +22 -0
  31. package/src/snippets/templates/js/huggingface.js/basic.jinja +11 -0
  32. package/src/snippets/templates/js/huggingface.js/basicAudio.jinja +13 -0
  33. package/src/snippets/templates/js/huggingface.js/basicImage.jinja +13 -0
  34. package/src/snippets/templates/js/huggingface.js/conversational.jinja +11 -0
  35. package/src/snippets/templates/js/huggingface.js/conversationalStream.jinja +19 -0
  36. package/src/snippets/templates/js/huggingface.js/textToImage.jinja +11 -0
  37. package/src/snippets/templates/js/huggingface.js/textToVideo.jinja +10 -0
  38. package/src/snippets/templates/js/openai/conversational.jinja +13 -0
  39. package/src/snippets/templates/js/openai/conversationalStream.jinja +22 -0
  40. package/src/snippets/templates/python/fal_client/textToImage.jinja +11 -0
  41. package/src/snippets/templates/python/huggingface_hub/basic.jinja +4 -0
  42. package/src/snippets/templates/python/huggingface_hub/basicAudio.jinja +1 -0
  43. package/src/snippets/templates/python/huggingface_hub/basicImage.jinja +1 -0
  44. package/src/snippets/templates/python/huggingface_hub/conversational.jinja +6 -0
  45. package/src/snippets/templates/python/huggingface_hub/conversationalStream.jinja +8 -0
  46. package/src/snippets/templates/python/huggingface_hub/documentQuestionAnswering.jinja +5 -0
  47. package/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +6 -0
  48. package/src/snippets/templates/python/huggingface_hub/importInferenceClient.jinja +6 -0
  49. package/src/snippets/templates/python/huggingface_hub/textToImage.jinja +5 -0
  50. package/src/snippets/templates/python/huggingface_hub/textToVideo.jinja +4 -0
  51. package/src/snippets/templates/python/openai/conversational.jinja +13 -0
  52. package/src/snippets/templates/python/openai/conversationalStream.jinja +15 -0
  53. package/src/snippets/templates/python/requests/basic.jinja +7 -0
  54. package/src/snippets/templates/python/requests/basicAudio.jinja +7 -0
  55. package/src/snippets/templates/python/requests/basicImage.jinja +7 -0
  56. package/src/snippets/templates/python/requests/conversational.jinja +9 -0
  57. package/src/snippets/templates/python/requests/conversationalStream.jinja +16 -0
  58. package/src/snippets/templates/python/requests/documentQuestionAnswering.jinja +13 -0
  59. package/src/snippets/templates/python/requests/imageToImage.jinja +15 -0
  60. package/src/snippets/templates/python/requests/importRequests.jinja +10 -0
  61. package/src/snippets/templates/python/requests/tabular.jinja +9 -0
  62. package/src/snippets/templates/python/requests/textToAudio.jinja +23 -0
  63. package/src/snippets/templates/python/requests/textToImage.jinja +14 -0
  64. package/src/snippets/templates/python/requests/zeroShotClassification.jinja +8 -0
  65. package/src/snippets/templates/python/requests/zeroShotImageClassification.jinja +14 -0
  66. package/src/snippets/templates/sh/curl/basic.jinja +7 -0
  67. package/src/snippets/templates/sh/curl/basicAudio.jinja +5 -0
  68. package/src/snippets/templates/sh/curl/basicImage.jinja +5 -0
  69. package/src/snippets/templates/sh/curl/conversational.jinja +7 -0
  70. package/src/snippets/templates/sh/curl/conversationalStream.jinja +7 -0
  71. package/src/snippets/templates/sh/curl/zeroShotClassification.jinja +5 -0
  72. package/src/tasks/cv/textToVideo.ts +25 -5
  73. package/src/vendor/fetch-event-source/LICENSE +21 -0
  74. package/dist/src/snippets/curl.d.ts +0 -17
  75. package/dist/src/snippets/curl.d.ts.map +0 -1
  76. package/dist/src/snippets/js.d.ts +0 -21
  77. package/dist/src/snippets/js.d.ts.map +0 -1
  78. package/dist/src/snippets/python.d.ts +0 -4
  79. package/dist/src/snippets/python.d.ts.map +0 -1
  80. package/src/snippets/curl.ts +0 -177
  81. package/src/snippets/js.ts +0 -475
  82. package/src/snippets/python.ts +0 -563
@@ -1,563 +0,0 @@
1
- import { openAIbaseUrl, type SnippetInferenceProvider } from "@huggingface/tasks";
2
- import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4
- import {
5
- type InferenceSnippet,
6
- type ModelDataMinimal,
7
- getModelInputSnippet,
8
- stringifyGenerationConfig,
9
- stringifyMessages,
10
- } from "@huggingface/tasks";
11
-
12
- const HFH_INFERENCE_CLIENT_METHODS: Partial<Record<WidgetType, string>> = {
13
- "audio-classification": "audio_classification",
14
- "audio-to-audio": "audio_to_audio",
15
- "automatic-speech-recognition": "automatic_speech_recognition",
16
- "text-to-speech": "text_to_speech",
17
- "image-classification": "image_classification",
18
- "image-segmentation": "image_segmentation",
19
- "image-to-image": "image_to_image",
20
- "image-to-text": "image_to_text",
21
- "object-detection": "object_detection",
22
- "text-to-image": "text_to_image",
23
- "text-to-video": "text_to_video",
24
- "zero-shot-image-classification": "zero_shot_image_classification",
25
- "document-question-answering": "document_question_answering",
26
- "visual-question-answering": "visual_question_answering",
27
- "feature-extraction": "feature_extraction",
28
- "fill-mask": "fill_mask",
29
- "question-answering": "question_answering",
30
- "sentence-similarity": "sentence_similarity",
31
- summarization: "summarization",
32
- "table-question-answering": "table_question_answering",
33
- "text-classification": "text_classification",
34
- "text-generation": "text_generation",
35
- "token-classification": "token_classification",
36
- translation: "translation",
37
- "zero-shot-classification": "zero_shot_classification",
38
- "tabular-classification": "tabular_classification",
39
- "tabular-regression": "tabular_regression",
40
- };
41
-
42
- const snippetImportInferenceClient = (accessToken: string, provider: SnippetInferenceProvider): string =>
43
- `\
44
- from huggingface_hub import InferenceClient
45
-
46
- client = InferenceClient(
47
- provider="${provider}",
48
- api_key="${accessToken || "{API_TOKEN}"}",
49
- )`;
50
-
51
- const snippetConversational = (
52
- model: ModelDataMinimal,
53
- accessToken: string,
54
- provider: SnippetInferenceProvider,
55
- providerModelId?: string,
56
- opts?: {
57
- streaming?: boolean;
58
- messages?: ChatCompletionInputMessage[];
59
- temperature?: GenerationParameters["temperature"];
60
- max_tokens?: GenerationParameters["max_tokens"];
61
- top_p?: GenerationParameters["top_p"];
62
- }
63
- ): InferenceSnippet[] => {
64
- const streaming = opts?.streaming ?? true;
65
- const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
66
- const messages = opts?.messages ?? exampleMessages;
67
- const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
68
-
69
- const config = {
70
- ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
71
- max_tokens: opts?.max_tokens ?? 500,
72
- ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
73
- };
74
- const configStr = stringifyGenerationConfig(config, {
75
- indent: "\n\t",
76
- attributeValueConnector: "=",
77
- });
78
-
79
- if (streaming) {
80
- return [
81
- {
82
- client: "huggingface_hub",
83
- content: `\
84
- ${snippetImportInferenceClient(accessToken, provider)}
85
-
86
- messages = ${messagesStr}
87
-
88
- stream = client.chat.completions.create(
89
- model="${model.id}",
90
- messages=messages,
91
- ${configStr}
92
- stream=True,
93
- )
94
-
95
- for chunk in stream:
96
- print(chunk.choices[0].delta.content, end="")`,
97
- },
98
- {
99
- client: "openai",
100
- content: `\
101
- from openai import OpenAI
102
-
103
- client = OpenAI(
104
- base_url="${openAIbaseUrl(provider)}",
105
- api_key="${accessToken || "{API_TOKEN}"}"
106
- )
107
-
108
- messages = ${messagesStr}
109
-
110
- stream = client.chat.completions.create(
111
- model="${providerModelId ?? model.id}",
112
- messages=messages,
113
- ${configStr}
114
- stream=True
115
- )
116
-
117
- for chunk in stream:
118
- print(chunk.choices[0].delta.content, end="")`,
119
- },
120
- ];
121
- } else {
122
- return [
123
- {
124
- client: "huggingface_hub",
125
- content: `\
126
- ${snippetImportInferenceClient(accessToken, provider)}
127
-
128
- messages = ${messagesStr}
129
-
130
- completion = client.chat.completions.create(
131
- model="${model.id}",
132
- messages=messages,
133
- ${configStr}
134
- )
135
-
136
- print(completion.choices[0].message)`,
137
- },
138
- {
139
- client: "openai",
140
- content: `\
141
- from openai import OpenAI
142
-
143
- client = OpenAI(
144
- base_url="${openAIbaseUrl(provider)}",
145
- api_key="${accessToken || "{API_TOKEN}"}"
146
- )
147
-
148
- messages = ${messagesStr}
149
-
150
- completion = client.chat.completions.create(
151
- model="${providerModelId ?? model.id}",
152
- messages=messages,
153
- ${configStr}
154
- )
155
-
156
- print(completion.choices[0].message)`,
157
- },
158
- ];
159
- }
160
- };
161
-
162
- const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet[] => {
163
- return [
164
- {
165
- client: "requests",
166
- content: `\
167
- def query(payload):
168
- response = requests.post(API_URL, headers=headers, json=payload)
169
- return response.json()
170
-
171
- output = query({
172
- "inputs": ${getModelInputSnippet(model)},
173
- "parameters": {"candidate_labels": ["refund", "legal", "faq"]},
174
- })`,
175
- },
176
- ];
177
- };
178
-
179
- const snippetZeroShotImageClassification = (model: ModelDataMinimal): InferenceSnippet[] => {
180
- return [
181
- {
182
- client: "requests",
183
- content: `def query(data):
184
- with open(data["image_path"], "rb") as f:
185
- img = f.read()
186
- payload={
187
- "parameters": data["parameters"],
188
- "inputs": base64.b64encode(img).decode("utf-8")
189
- }
190
- response = requests.post(API_URL, headers=headers, json=payload)
191
- return response.json()
192
-
193
- output = query({
194
- "image_path": ${getModelInputSnippet(model)},
195
- "parameters": {"candidate_labels": ["cat", "dog", "llama"]},
196
- })`,
197
- },
198
- ];
199
- };
200
-
201
- const snippetBasic = (
202
- model: ModelDataMinimal,
203
- accessToken: string,
204
- provider: SnippetInferenceProvider
205
- ): InferenceSnippet[] => {
206
- return [
207
- ...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
208
- ? [
209
- {
210
- client: "huggingface_hub",
211
- content: `\
212
- ${snippetImportInferenceClient(accessToken, provider)}
213
-
214
- result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
215
- inputs=${getModelInputSnippet(model)},
216
- model="${model.id}",
217
- )
218
-
219
- print(result)
220
- `,
221
- },
222
- ]
223
- : []),
224
- {
225
- client: "requests",
226
- content: `\
227
- def query(payload):
228
- response = requests.post(API_URL, headers=headers, json=payload)
229
- return response.json()
230
-
231
- output = query({
232
- "inputs": ${getModelInputSnippet(model)},
233
- })`,
234
- },
235
- ];
236
- };
237
-
238
- const snippetFile = (model: ModelDataMinimal): InferenceSnippet[] => {
239
- return [
240
- {
241
- client: "requests",
242
- content: `\
243
- def query(filename):
244
- with open(filename, "rb") as f:
245
- data = f.read()
246
- response = requests.post(API_URL, headers=headers, data=data)
247
- return response.json()
248
-
249
- output = query(${getModelInputSnippet(model)})`,
250
- },
251
- ];
252
- };
253
-
254
- const snippetTextToImage = (
255
- model: ModelDataMinimal,
256
- accessToken: string,
257
- provider: SnippetInferenceProvider,
258
- providerModelId?: string
259
- ): InferenceSnippet[] => {
260
- return [
261
- {
262
- client: "huggingface_hub",
263
- content: `\
264
- ${snippetImportInferenceClient(accessToken, provider)}
265
-
266
- # output is a PIL.Image object
267
- image = client.text_to_image(
268
- ${getModelInputSnippet(model)},
269
- model="${model.id}",
270
- )`,
271
- },
272
- ...(provider === "fal-ai"
273
- ? [
274
- {
275
- client: "fal-client",
276
- content: `\
277
- import fal_client
278
-
279
- result = fal_client.subscribe(
280
- "${providerModelId ?? model.id}",
281
- arguments={
282
- "prompt": ${getModelInputSnippet(model)},
283
- },
284
- )
285
- print(result)
286
- `,
287
- },
288
- ]
289
- : []),
290
- ...(provider === "hf-inference"
291
- ? [
292
- {
293
- client: "requests",
294
- content: `\
295
- def query(payload):
296
- response = requests.post(API_URL, headers=headers, json=payload)
297
- return response.content
298
-
299
- image_bytes = query({
300
- "inputs": ${getModelInputSnippet(model)},
301
- })
302
-
303
- # You can access the image with PIL.Image for example
304
- import io
305
- from PIL import Image
306
- image = Image.open(io.BytesIO(image_bytes))`,
307
- },
308
- ]
309
- : []),
310
- ];
311
- };
312
-
313
- const snippetTextToVideo = (
314
- model: ModelDataMinimal,
315
- accessToken: string,
316
- provider: SnippetInferenceProvider
317
- ): InferenceSnippet[] => {
318
- return ["fal-ai", "replicate"].includes(provider)
319
- ? [
320
- {
321
- client: "huggingface_hub",
322
- content: `\
323
- ${snippetImportInferenceClient(accessToken, provider)}
324
-
325
- video = client.text_to_video(
326
- ${getModelInputSnippet(model)},
327
- model="${model.id}",
328
- )`,
329
- },
330
- ]
331
- : [];
332
- };
333
-
334
- const snippetTabular = (model: ModelDataMinimal): InferenceSnippet[] => {
335
- return [
336
- {
337
- client: "requests",
338
- content: `\
339
- def query(payload):
340
- response = requests.post(API_URL, headers=headers, json=payload)
341
- return response.content
342
-
343
- response = query({
344
- "inputs": {"data": ${getModelInputSnippet(model)}},
345
- })`,
346
- },
347
- ];
348
- };
349
-
350
- const snippetTextToAudio = (model: ModelDataMinimal): InferenceSnippet[] => {
351
- // Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged
352
- // with the latest update to inference-api (IA).
353
- // Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate.
354
- if (model.library_name === "transformers") {
355
- return [
356
- {
357
- client: "requests",
358
- content: `\
359
- def query(payload):
360
- response = requests.post(API_URL, headers=headers, json=payload)
361
- return response.content
362
-
363
- audio_bytes = query({
364
- "inputs": ${getModelInputSnippet(model)},
365
- })
366
- # You can access the audio with IPython.display for example
367
- from IPython.display import Audio
368
- Audio(audio_bytes)`,
369
- },
370
- ];
371
- } else {
372
- return [
373
- {
374
- client: "requests",
375
- content: `def query(payload):
376
- response = requests.post(API_URL, headers=headers, json=payload)
377
- return response.json()
378
-
379
- audio, sampling_rate = query({
380
- "inputs": ${getModelInputSnippet(model)},
381
- })
382
- # You can access the audio with IPython.display for example
383
- from IPython.display import Audio
384
- Audio(audio, rate=sampling_rate)`,
385
- },
386
- ];
387
- }
388
- };
389
-
390
- const snippetAutomaticSpeechRecognition = (
391
- model: ModelDataMinimal,
392
- accessToken: string,
393
- provider: SnippetInferenceProvider
394
- ): InferenceSnippet[] => {
395
- return [
396
- {
397
- client: "huggingface_hub",
398
- content: `${snippetImportInferenceClient(accessToken, provider)}
399
- output = client.automatic_speech_recognition(${getModelInputSnippet(model)}, model="${model.id}")`,
400
- },
401
- snippetFile(model)[0],
402
- ];
403
- };
404
-
405
- const snippetDocumentQuestionAnswering = (
406
- model: ModelDataMinimal,
407
- accessToken: string,
408
- provider: SnippetInferenceProvider
409
- ): InferenceSnippet[] => {
410
- const inputsAsStr = getModelInputSnippet(model) as string;
411
- const inputsAsObj = JSON.parse(inputsAsStr);
412
-
413
- return [
414
- {
415
- client: "huggingface_hub",
416
- content: `${snippetImportInferenceClient(accessToken, provider)}
417
- output = client.document_question_answering(
418
- "${inputsAsObj.image}",
419
- question="${inputsAsObj.question}",
420
- model="${model.id}",
421
- )`,
422
- },
423
- {
424
- client: "requests",
425
- content: `def query(payload):
426
- with open(payload["image"], "rb") as f:
427
- img = f.read()
428
- payload["image"] = base64.b64encode(img).decode("utf-8")
429
- response = requests.post(API_URL, headers=headers, json=payload)
430
- return response.json()
431
-
432
- output = query({
433
- "inputs": ${inputsAsStr},
434
- })`,
435
- },
436
- ];
437
- };
438
-
439
- const snippetImageToImage = (
440
- model: ModelDataMinimal,
441
- accessToken: string,
442
- provider: SnippetInferenceProvider
443
- ): InferenceSnippet[] => {
444
- const inputsAsStr = getModelInputSnippet(model) as string;
445
- const inputsAsObj = JSON.parse(inputsAsStr);
446
-
447
- return [
448
- {
449
- client: "huggingface_hub",
450
- content: `${snippetImportInferenceClient(accessToken, provider)}
451
- # output is a PIL.Image object
452
- image = client.image_to_image(
453
- "${inputsAsObj.image}",
454
- prompt="${inputsAsObj.prompt}",
455
- model="${model.id}",
456
- )`,
457
- },
458
- {
459
- client: "requests",
460
- content: `def query(payload):
461
- with open(payload["inputs"], "rb") as f:
462
- img = f.read()
463
- payload["inputs"] = base64.b64encode(img).decode("utf-8")
464
- response = requests.post(API_URL, headers=headers, json=payload)
465
- return response.content
466
-
467
- image_bytes = query({
468
- "inputs": "${inputsAsObj.image}",
469
- "parameters": {"prompt": "${inputsAsObj.prompt}"},
470
- })
471
-
472
- # You can access the image with PIL.Image for example
473
- import io
474
- from PIL import Image
475
- image = Image.open(io.BytesIO(image_bytes))`,
476
- },
477
- ];
478
- };
479
-
480
- const pythonSnippets: Partial<
481
- Record<
482
- PipelineType,
483
- (
484
- model: ModelDataMinimal,
485
- accessToken: string,
486
- provider: SnippetInferenceProvider,
487
- providerModelId?: string,
488
- opts?: Record<string, unknown>
489
- ) => InferenceSnippet[]
490
- >
491
- > = {
492
- // Same order as in tasks/src/pipelines.ts
493
- "text-classification": snippetBasic,
494
- "token-classification": snippetBasic,
495
- "table-question-answering": snippetBasic,
496
- "question-answering": snippetBasic,
497
- "zero-shot-classification": snippetZeroShotClassification,
498
- translation: snippetBasic,
499
- summarization: snippetBasic,
500
- "feature-extraction": snippetBasic,
501
- "text-generation": snippetBasic,
502
- "text2text-generation": snippetBasic,
503
- "image-text-to-text": snippetConversational,
504
- "fill-mask": snippetBasic,
505
- "sentence-similarity": snippetBasic,
506
- "automatic-speech-recognition": snippetAutomaticSpeechRecognition,
507
- "text-to-image": snippetTextToImage,
508
- "text-to-video": snippetTextToVideo,
509
- "text-to-speech": snippetTextToAudio,
510
- "text-to-audio": snippetTextToAudio,
511
- "audio-to-audio": snippetFile,
512
- "audio-classification": snippetFile,
513
- "image-classification": snippetFile,
514
- "tabular-regression": snippetTabular,
515
- "tabular-classification": snippetTabular,
516
- "object-detection": snippetFile,
517
- "image-segmentation": snippetFile,
518
- "document-question-answering": snippetDocumentQuestionAnswering,
519
- "image-to-text": snippetFile,
520
- "image-to-image": snippetImageToImage,
521
- "zero-shot-image-classification": snippetZeroShotImageClassification,
522
- };
523
-
524
- export function getPythonInferenceSnippet(
525
- model: ModelDataMinimal,
526
- accessToken: string,
527
- provider: SnippetInferenceProvider,
528
- providerModelId?: string,
529
- opts?: Record<string, unknown>
530
- ): InferenceSnippet[] {
531
- if (model.tags.includes("conversational")) {
532
- // Conversational model detected, so we display a code snippet that features the Messages API
533
- return snippetConversational(model, accessToken, provider, providerModelId, opts);
534
- } else {
535
- const snippets =
536
- model.pipeline_tag && model.pipeline_tag in pythonSnippets
537
- ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? []
538
- : [];
539
-
540
- return snippets.map((snippet) => {
541
- return {
542
- ...snippet,
543
- content: addImportsToSnippet(snippet.content, model, accessToken),
544
- };
545
- });
546
- }
547
- }
548
-
549
- const addImportsToSnippet = (snippet: string, model: ModelDataMinimal, accessToken: string): string => {
550
- if (snippet.includes("requests")) {
551
- snippet = `import requests
552
-
553
- API_URL = "https://router.huggingface.co/hf-inference/models/${model.id}"
554
- headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
555
-
556
- ${snippet}`;
557
- }
558
- if (snippet.includes("base64")) {
559
- snippet = `import base64
560
- ${snippet}`;
561
- }
562
- return snippet;
563
- };