opencode-skills-antigravity 1.0.39 → 1.0.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/bundled-skills/.antigravity-install-manifest.json +10 -1
  2. package/bundled-skills/docs/integrations/jetski-cortex.md +3 -3
  3. package/bundled-skills/docs/integrations/jetski-gemini-loader/README.md +1 -1
  4. package/bundled-skills/docs/maintainers/repo-growth-seo.md +3 -3
  5. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-29-refresh.csv +34 -0
  6. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-29-refresh.md +2 -0
  7. package/bundled-skills/docs/maintainers/skills-update-guide.md +1 -1
  8. package/bundled-skills/docs/sources/sources.md +2 -2
  9. package/bundled-skills/docs/users/bundles.md +1 -1
  10. package/bundled-skills/docs/users/claude-code-skills.md +1 -1
  11. package/bundled-skills/docs/users/gemini-cli-skills.md +1 -1
  12. package/bundled-skills/docs/users/getting-started.md +1 -1
  13. package/bundled-skills/docs/users/kiro-integration.md +1 -1
  14. package/bundled-skills/docs/users/usage.md +4 -4
  15. package/bundled-skills/docs/users/visual-guide.md +4 -4
  16. package/bundled-skills/hugging-face-cli/SKILL.md +192 -195
  17. package/bundled-skills/hugging-face-community-evals/SKILL.md +213 -0
  18. package/bundled-skills/hugging-face-community-evals/examples/.env.example +3 -0
  19. package/bundled-skills/hugging-face-community-evals/examples/USAGE_EXAMPLES.md +101 -0
  20. package/bundled-skills/hugging-face-community-evals/scripts/inspect_eval_uv.py +104 -0
  21. package/bundled-skills/hugging-face-community-evals/scripts/inspect_vllm_uv.py +306 -0
  22. package/bundled-skills/hugging-face-community-evals/scripts/lighteval_vllm_uv.py +297 -0
  23. package/bundled-skills/hugging-face-dataset-viewer/SKILL.md +120 -120
  24. package/bundled-skills/hugging-face-gradio/SKILL.md +304 -0
  25. package/bundled-skills/hugging-face-gradio/examples.md +613 -0
  26. package/bundled-skills/hugging-face-jobs/SKILL.md +25 -18
  27. package/bundled-skills/hugging-face-jobs/index.html +216 -0
  28. package/bundled-skills/hugging-face-jobs/references/hardware_guide.md +336 -0
  29. package/bundled-skills/hugging-face-jobs/references/hub_saving.md +352 -0
  30. package/bundled-skills/hugging-face-jobs/references/token_usage.md +570 -0
  31. package/bundled-skills/hugging-face-jobs/references/troubleshooting.md +475 -0
  32. package/bundled-skills/hugging-face-jobs/scripts/cot-self-instruct.py +718 -0
  33. package/bundled-skills/hugging-face-jobs/scripts/finepdfs-stats.py +546 -0
  34. package/bundled-skills/hugging-face-jobs/scripts/generate-responses.py +587 -0
  35. package/bundled-skills/hugging-face-model-trainer/SKILL.md +11 -12
  36. package/bundled-skills/hugging-face-model-trainer/references/gguf_conversion.md +296 -0
  37. package/bundled-skills/hugging-face-model-trainer/references/hardware_guide.md +283 -0
  38. package/bundled-skills/hugging-face-model-trainer/references/hub_saving.md +364 -0
  39. package/bundled-skills/hugging-face-model-trainer/references/local_training_macos.md +231 -0
  40. package/bundled-skills/hugging-face-model-trainer/references/reliability_principles.md +371 -0
  41. package/bundled-skills/hugging-face-model-trainer/references/trackio_guide.md +189 -0
  42. package/bundled-skills/hugging-face-model-trainer/references/training_methods.md +150 -0
  43. package/bundled-skills/hugging-face-model-trainer/references/training_patterns.md +203 -0
  44. package/bundled-skills/hugging-face-model-trainer/references/troubleshooting.md +282 -0
  45. package/bundled-skills/hugging-face-model-trainer/references/unsloth.md +313 -0
  46. package/bundled-skills/hugging-face-model-trainer/scripts/convert_to_gguf.py +424 -0
  47. package/bundled-skills/hugging-face-model-trainer/scripts/dataset_inspector.py +417 -0
  48. package/bundled-skills/hugging-face-model-trainer/scripts/estimate_cost.py +150 -0
  49. package/bundled-skills/hugging-face-model-trainer/scripts/train_dpo_example.py +106 -0
  50. package/bundled-skills/hugging-face-model-trainer/scripts/train_grpo_example.py +89 -0
  51. package/bundled-skills/hugging-face-model-trainer/scripts/train_sft_example.py +122 -0
  52. package/bundled-skills/hugging-face-model-trainer/scripts/unsloth_sft_example.py +512 -0
  53. package/bundled-skills/hugging-face-paper-publisher/SKILL.md +11 -4
  54. package/bundled-skills/hugging-face-paper-publisher/examples/example_usage.md +326 -0
  55. package/bundled-skills/hugging-face-paper-publisher/references/quick_reference.md +216 -0
  56. package/bundled-skills/hugging-face-paper-publisher/scripts/paper_manager.py +606 -0
  57. package/bundled-skills/hugging-face-paper-publisher/templates/arxiv.md +299 -0
  58. package/bundled-skills/hugging-face-paper-publisher/templates/ml-report.md +358 -0
  59. package/bundled-skills/hugging-face-paper-publisher/templates/modern.md +319 -0
  60. package/bundled-skills/hugging-face-paper-publisher/templates/standard.md +201 -0
  61. package/bundled-skills/hugging-face-papers/SKILL.md +241 -0
  62. package/bundled-skills/hugging-face-trackio/.claude-plugin/plugin.json +19 -0
  63. package/bundled-skills/hugging-face-trackio/SKILL.md +117 -0
  64. package/bundled-skills/hugging-face-trackio/references/alerts.md +196 -0
  65. package/bundled-skills/hugging-face-trackio/references/logging_metrics.md +206 -0
  66. package/bundled-skills/hugging-face-trackio/references/retrieving_metrics.md +251 -0
  67. package/bundled-skills/hugging-face-vision-trainer/SKILL.md +595 -0
  68. package/bundled-skills/hugging-face-vision-trainer/references/finetune_sam2_trainer.md +254 -0
  69. package/bundled-skills/hugging-face-vision-trainer/references/hub_saving.md +618 -0
  70. package/bundled-skills/hugging-face-vision-trainer/references/image_classification_training_notebook.md +279 -0
  71. package/bundled-skills/hugging-face-vision-trainer/references/object_detection_training_notebook.md +700 -0
  72. package/bundled-skills/hugging-face-vision-trainer/references/reliability_principles.md +310 -0
  73. package/bundled-skills/hugging-face-vision-trainer/references/timm_trainer.md +91 -0
  74. package/bundled-skills/hugging-face-vision-trainer/scripts/dataset_inspector.py +814 -0
  75. package/bundled-skills/hugging-face-vision-trainer/scripts/estimate_cost.py +217 -0
  76. package/bundled-skills/hugging-face-vision-trainer/scripts/image_classification_training.py +383 -0
  77. package/bundled-skills/hugging-face-vision-trainer/scripts/object_detection_training.py +710 -0
  78. package/bundled-skills/hugging-face-vision-trainer/scripts/sam_segmentation_training.py +382 -0
  79. package/bundled-skills/jq/SKILL.md +273 -0
  80. package/bundled-skills/odoo-edi-connector/SKILL.md +32 -10
  81. package/bundled-skills/odoo-woocommerce-bridge/SKILL.md +9 -5
  82. package/bundled-skills/tmux/SKILL.md +370 -0
  83. package/bundled-skills/transformers-js/SKILL.md +639 -0
  84. package/bundled-skills/transformers-js/references/CACHE.md +339 -0
  85. package/bundled-skills/transformers-js/references/CONFIGURATION.md +390 -0
  86. package/bundled-skills/transformers-js/references/EXAMPLES.md +605 -0
  87. package/bundled-skills/transformers-js/references/MODEL_ARCHITECTURES.md +167 -0
  88. package/bundled-skills/transformers-js/references/PIPELINE_OPTIONS.md +545 -0
  89. package/bundled-skills/transformers-js/references/TEXT_GENERATION.md +315 -0
  90. package/bundled-skills/viboscope/SKILL.md +64 -0
  91. package/package.json +1 -1
@@ -0,0 +1,605 @@
1
+ # Transformers.js Code Examples
2
+
3
+ Working examples showing how to use Transformers.js across different runtimes and frameworks.
4
+
5
+ All examples use the same task and model for consistency:
6
+ - **Task**: `feature-extraction`
7
+ - **Model**: `onnx-community/all-MiniLM-L6-v2-ONNX`
8
+
9
+ ## Table of Contents
10
+ 1. [Browser (Vanilla JS)](#browser-vanilla-js)
11
+ 2. [Node.js](#nodejs)
12
+ 3. [React](#react)
13
+ 4. [Express API](#express-api)
14
+
15
+ ## Browser (Vanilla JS)
16
+
17
+ ### Basic Usage
18
+
19
+ ```html
20
+ <!DOCTYPE html>
21
+ <html>
22
+ <head>
23
+ <title>Feature Extraction</title>
24
+ </head>
25
+ <body>
26
+ <h1>Text Embedding Generator</h1>
27
+ <textarea id="input" placeholder="Enter text to embed..."></textarea>
28
+ <button onclick="generateEmbedding()">Generate Embedding</button>
29
+ <div id="result"></div>
30
+ <div id="loading" style="display:none;">Loading model...</div>
31
+
32
+ <script type="module">
33
+ import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1';
34
+
35
+ let extractor;
36
+
37
+ // Initialize model on page load
38
+ document.getElementById('loading').style.display = 'block';
39
+ extractor = await pipeline(
40
+ 'feature-extraction',
41
+ 'onnx-community/all-MiniLM-L6-v2-ONNX'
42
+ );
43
+ document.getElementById('loading').style.display = 'none';
44
+
45
+ window.generateEmbedding = async function() {
46
+ const text = document.getElementById('input').value;
47
+ const output = await extractor(text, { pooling: 'mean', normalize: true });
48
+
49
+ document.getElementById('result').innerHTML = `
50
+ <h3>Embedding Generated:</h3>
51
+ <p>Dimensions: ${output.data.length}</p>
52
+ <p>First 5 values: ${Array.from(output.data).slice(0, 5).join(', ')}</p>
53
+ `;
54
+ };
55
+
56
+ // Cleanup on page unload
57
+ window.addEventListener('beforeunload', () => {
58
+ if (extractor) extractor.dispose();
59
+ });
60
+ </script>
61
+ </body>
62
+ </html>
63
+ ```
64
+
65
+ ### With Progress Tracking
66
+
67
+ ```html
68
+ <!DOCTYPE html>
69
+ <html>
70
+ <head>
71
+ <title>Feature Extraction with Progress</title>
72
+ <style>
73
+ .file-progress {
74
+ margin: 10px 0;
75
+ }
76
+ .file-name {
77
+ font-size: 12px;
78
+ margin-bottom: 5px;
79
+ }
80
+ .progress-bar {
81
+ width: 100%;
82
+ height: 20px;
83
+ background: #f0f0f0;
84
+ border-radius: 5px;
85
+ overflow: hidden;
86
+ }
87
+ .progress-fill {
88
+ height: 100%;
89
+ background: #4CAF50;
90
+ transition: width 0.3s;
91
+ }
92
+ </style>
93
+ </head>
94
+ <body>
95
+ <h1>Text Embedding Generator</h1>
96
+ <div id="loading">
97
+ <p id="status">Loading model...</p>
98
+ <div id="progress-container"></div>
99
+ </div>
100
+ <div id="app" style="display:none;">
101
+ <textarea id="input" placeholder="Enter text..."></textarea>
102
+ <button onclick="generateEmbedding()">Generate</button>
103
+ <div id="result"></div>
104
+ </div>
105
+
106
+ <script type="module">
107
+ import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1';
108
+
109
+ let extractor;
110
+ const fileProgressBars = {};
111
+ const progressContainer = document.getElementById('progress-container');
112
+
113
+ extractor = await pipeline(
114
+ 'feature-extraction',
115
+ 'onnx-community/all-MiniLM-L6-v2-ONNX',
116
+ {
117
+ progress_callback: (info) => {
118
+ document.getElementById('status').textContent = `${info.status}: ${info.file}`;
119
+
120
+ if (info.status === 'progress') {
121
+ // Create progress bar for each file
122
+ if (!fileProgressBars[info.file]) {
123
+ const fileDiv = document.createElement('div');
124
+ fileDiv.className = 'file-progress';
125
+ fileDiv.innerHTML = `
126
+ <div class="file-name">${info.file}</div>
127
+ <div class="progress-bar">
128
+ <div class="progress-fill"></div>
129
+ </div>
130
+ `;
131
+ progressContainer.appendChild(fileDiv);
132
+ fileProgressBars[info.file] = fileDiv.querySelector('.progress-fill');
133
+ }
134
+
135
+ // Update progress
136
+ fileProgressBars[info.file].style.width = `${info.progress}%`;
137
+ }
138
+
139
+ if (info.status === 'ready') {
140
+ document.getElementById('loading').style.display = 'none';
141
+ document.getElementById('app').style.display = 'block';
142
+ }
143
+ }
144
+ }
145
+ );
146
+
147
+ window.generateEmbedding = async function() {
148
+ const text = document.getElementById('input').value;
149
+ const output = await extractor(text, { pooling: 'mean', normalize: true });
150
+
151
+ document.getElementById('result').innerHTML = `
152
+ <p>Embedding: ${output.data.length} dimensions</p>
153
+ `;
154
+ };
155
+
156
+ // Cleanup on page unload
157
+ window.addEventListener('beforeunload', () => {
158
+ if (extractor) extractor.dispose();
159
+ });
160
+ </script>
161
+ </body>
162
+ </html>
163
+ ```
164
+
165
+ ## Node.js
166
+
167
+ ### Basic Script
168
+
169
+ ```javascript
170
+ // embed.js
171
+ import { pipeline } from '@huggingface/transformers';
172
+
173
+ async function generateEmbedding(text) {
174
+ const extractor = await pipeline(
175
+ 'feature-extraction',
176
+ 'onnx-community/all-MiniLM-L6-v2-ONNX'
177
+ );
178
+
179
+ const output = await extractor(text, { pooling: 'mean', normalize: true });
180
+
181
+ console.log('Text:', text);
182
+ console.log('Embedding dimensions:', output.data.length);
183
+ console.log('First 5 values:', Array.from(output.data).slice(0, 5));
184
+
185
+ await extractor.dispose();
186
+ }
187
+
188
+ generateEmbedding('Hello, world!');
189
+ ```
190
+
191
+ ### Batch Processing
192
+
193
+ ```javascript
194
+ // batch-embed.js
195
+ import { pipeline } from '@huggingface/transformers';
196
+ import fs from 'fs/promises';
197
+
198
+ async function embedDocuments(documents) {
199
+ const extractor = await pipeline(
200
+ 'feature-extraction',
201
+ 'onnx-community/all-MiniLM-L6-v2-ONNX'
202
+ );
203
+
204
+ console.log(`Processing ${documents.length} documents...`);
205
+
206
+ const embeddings = [];
207
+
208
+ for (let i = 0; i < documents.length; i++) {
209
+ const output = await extractor(documents[i], {
210
+ pooling: 'mean',
211
+ normalize: true
212
+ });
213
+
214
+ embeddings.push({
215
+ text: documents[i],
216
+ embedding: Array.from(output.data)
217
+ });
218
+
219
+ console.log(`Processed ${i + 1}/${documents.length}`);
220
+ }
221
+
222
+ await fs.writeFile(
223
+ 'embeddings.json',
224
+ JSON.stringify(embeddings, null, 2)
225
+ );
226
+
227
+ console.log('Saved to embeddings.json');
228
+
229
+ await extractor.dispose();
230
+ }
231
+
232
+ const documents = [
233
+ 'The cat sat on the mat',
234
+ 'A dog played in the park',
235
+ 'Machine learning is fascinating'
236
+ ];
237
+
238
+ embedDocuments(documents);
239
+ ```
240
+
241
+ ### CLI with Progress
242
+
243
+ ```javascript
244
+ // cli-embed.js
245
+ import { pipeline } from '@huggingface/transformers';
246
+
247
+ async function main() {
248
+ const text = process.argv[2] || 'Hello, world!';
249
+
250
+ console.log('Loading model...');
251
+
252
+ const fileProgress = {};
253
+
254
+ const extractor = await pipeline(
255
+ 'feature-extraction',
256
+ 'onnx-community/all-MiniLM-L6-v2-ONNX',
257
+ {
258
+ progress_callback: (info) => {
259
+ if (info.status === 'progress') {
260
+ fileProgress[info.file] = info.progress;
261
+
262
+ // Show all files progress
263
+ const progressLines = Object.entries(fileProgress)
264
+ .map(([file, progress]) => ` ${file}: ${progress.toFixed(1)}%`)
265
+ .join('\n');
266
+
267
+ process.stdout.write(`\r\x1b[K${progressLines}`);
268
+ }
269
+
270
+ if (info.status === 'done') {
271
+ console.log(`\n✓ ${info.file} complete`);
272
+ }
273
+
274
+ if (info.status === 'ready') {
275
+ console.log('\nModel ready!');
276
+ }
277
+ }
278
+ }
279
+ );
280
+
281
+ console.log('Generating embedding...');
282
+ const output = await extractor(text, { pooling: 'mean', normalize: true });
283
+
284
+ console.log(`\nText: "${text}"`);
285
+ console.log(`Dimensions: ${output.data.length}`);
286
+ console.log(`First 5 values: ${Array.from(output.data).slice(0, 5).join(', ')}`);
287
+
288
+ await extractor.dispose();
289
+ }
290
+
291
+ main();
292
+ ```
293
+
294
+ ## React
295
+
296
+ ### Basic Component
297
+
298
+ ```jsx
299
+ // EmbeddingGenerator.jsx
300
+ import { useState, useRef, useEffect } from 'react';
301
+ import { pipeline } from '@huggingface/transformers';
302
+
303
+ export function EmbeddingGenerator() {
304
+ const extractorRef = useRef(null);
305
+ const [text, setText] = useState('');
306
+ const [embedding, setEmbedding] = useState(null);
307
+ const [loading, setLoading] = useState(false);
308
+
309
+ const generate = async () => {
310
+ if (!text) return;
311
+
312
+ setLoading(true);
313
+
314
+ // Load model on first generate
315
+ if (!extractorRef.current) {
316
+ extractorRef.current = await pipeline(
317
+ 'feature-extraction',
318
+ 'onnx-community/all-MiniLM-L6-v2-ONNX'
319
+ );
320
+ }
321
+
322
+ const output = await extractorRef.current(text, {
323
+ pooling: 'mean',
324
+ normalize: true
325
+ });
326
+ setEmbedding(Array.from(output.data));
327
+ setLoading(false);
328
+ };
329
+
330
+ // Cleanup on unmount
331
+ useEffect(() => {
332
+ return () => {
333
+ if (extractorRef.current) {
334
+ extractorRef.current.dispose();
335
+ }
336
+ };
337
+ }, []);
338
+
339
+ return (
340
+ <div>
341
+ <h2>Text Embedding Generator</h2>
342
+
343
+ <textarea
344
+ value={text}
345
+ onChange={(e) => setText(e.target.value)}
346
+ placeholder="Enter text"
347
+ disabled={loading}
348
+ />
349
+
350
+ <button onClick={generate} disabled={loading || !text}>
351
+ {loading ? 'Processing...' : 'Generate Embedding'}
352
+ </button>
353
+
354
+ {embedding && (
355
+ <div>
356
+ <h3>Result:</h3>
357
+ <p>Dimensions: {embedding.length}</p>
358
+ <p>First 5 values: {embedding.slice(0, 5).join(', ')}</p>
359
+ </div>
360
+ )}
361
+ </div>
362
+ );
363
+ }
364
+ ```
365
+
366
+ ### With Progress Tracking
367
+
368
+ ```jsx
369
+ // EmbeddingGeneratorWithProgress.jsx
370
+ import { useState, useRef, useEffect } from 'react';
371
+ import { pipeline } from '@huggingface/transformers';
372
+
373
+ export function EmbeddingGeneratorWithProgress() {
374
+ const extractorRef = useRef(null);
375
+ const [text, setText] = useState('');
376
+ const [embedding, setEmbedding] = useState(null);
377
+ const [fileProgress, setFileProgress] = useState({});
378
+ const [status, setStatus] = useState('');
379
+ const [loading, setLoading] = useState(false);
380
+
381
+ const generate = async () => {
382
+ if (!text) return;
383
+
384
+ setLoading(true);
385
+
386
+ // Load model on first generate
387
+ if (!extractorRef.current) {
388
+ setStatus('Loading model...');
389
+
390
+ extractorRef.current = await pipeline(
391
+ 'feature-extraction',
392
+ 'onnx-community/all-MiniLM-L6-v2-ONNX',
393
+ {
394
+ progress_callback: (info) => {
395
+ setStatus(`${info.status}: ${info.file}`);
396
+
397
+ if (info.status === 'progress') {
398
+ setFileProgress(prev => ({
399
+ ...prev,
400
+ [info.file]: info.progress
401
+ }));
402
+ }
403
+
404
+ if (info.status === 'ready') {
405
+ setStatus('Model ready!');
406
+ }
407
+ }
408
+ }
409
+ );
410
+ }
411
+
412
+ setStatus('Generating embedding...');
413
+ const output = await extractorRef.current(text, {
414
+ pooling: 'mean',
415
+ normalize: true
416
+ });
417
+ setEmbedding(Array.from(output.data));
418
+ setStatus('Complete!');
419
+ setLoading(false);
420
+ };
421
+
422
+ // Cleanup on unmount
423
+ useEffect(() => {
424
+ return () => {
425
+ if (extractorRef.current) {
426
+ extractorRef.current.dispose();
427
+ }
428
+ };
429
+ }, []);
430
+
431
+ return (
432
+ <div>
433
+ <h2>Text Embedding Generator</h2>
434
+
435
+ {loading && Object.keys(fileProgress).length > 0 && (
436
+ <div>
437
+ <p>{status}</p>
438
+ {Object.entries(fileProgress).map(([file, progress]) => (
439
+ <div key={file} style={{ margin: '10px 0' }}>
440
+ <div style={{ fontSize: '12px', marginBottom: '5px' }}>{file}</div>
441
+ <div style={{ width: '100%', height: '20px', background: '#f0f0f0', borderRadius: '5px', overflow: 'hidden' }}>
442
+ <div
443
+ style={{
444
+ width: `${progress}%`,
445
+ height: '100%',
446
+ background: '#4CAF50',
447
+ transition: 'width 0.3s'
448
+ }}
449
+ />
450
+ </div>
451
+ </div>
452
+ ))}
453
+ </div>
454
+ )}
455
+
456
+ <textarea
457
+ value={text}
458
+ onChange={(e) => setText(e.target.value)}
459
+ placeholder="Enter text"
460
+ disabled={loading}
461
+ />
462
+
463
+ <button onClick={generate} disabled={loading || !text}>
464
+ {loading ? 'Processing...' : 'Generate Embedding'}
465
+ </button>
466
+
467
+ {embedding && (
468
+ <div>
469
+ <h3>Result:</h3>
470
+ <p>Dimensions: {embedding.length}</p>
471
+ <p>First 5 values: {embedding.slice(0, 5).join(', ')}</p>
472
+ </div>
473
+ )}
474
+ </div>
475
+ );
476
+ }
477
+ ```
478
+
479
+ ## Express API
480
+
481
+ ### Basic API Server
482
+
483
+ ```javascript
484
+ // server.js
485
+ import express from 'express';
486
+ import { pipeline } from '@huggingface/transformers';
487
+
488
+ const app = express();
489
+ app.use(express.json());
490
+
491
+ // Initialize model once at startup
492
+ let extractor;
493
+ (async () => {
494
+ console.log('Loading model...');
495
+ extractor = await pipeline(
496
+ 'feature-extraction',
497
+ 'onnx-community/all-MiniLM-L6-v2-ONNX'
498
+ );
499
+ console.log('Model ready!');
500
+ })();
501
+
502
+ app.post('/embed', async (req, res) => {
503
+ try {
504
+ const { text } = req.body;
505
+
506
+ if (!text) {
507
+ return res.status(400).json({ error: 'Text is required' });
508
+ }
509
+
510
+ const output = await extractor(text, {
511
+ pooling: 'mean',
512
+ normalize: true
513
+ });
514
+
515
+ res.json({
516
+ text,
517
+ embedding: Array.from(output.data),
518
+ dimensions: output.data.length
519
+ });
520
+ } catch (error) {
521
+ console.error('Error:', error);
522
+ res.status(500).json({ error: 'Failed to generate embedding' });
523
+ }
524
+ });
525
+
526
+ app.listen(3000, () => {
527
+ console.log('Server running on http://localhost:3000');
528
+ });
529
+ ```
530
+
531
+ ### API with Graceful Shutdown
532
+
533
+ ```javascript
534
+ // server-with-shutdown.js
535
+ import express from 'express';
536
+ import { pipeline } from '@huggingface/transformers';
537
+
538
+ const app = express();
539
+ app.use(express.json());
540
+
541
+ let extractor;
542
+ let server;
543
+
544
+ async function initialize() {
545
+ console.log('Loading model...');
546
+ extractor = await pipeline(
547
+ 'feature-extraction',
548
+ 'onnx-community/all-MiniLM-L6-v2-ONNX'
549
+ );
550
+ console.log('Model ready!');
551
+ }
552
+
553
+ app.post('/embed', async (req, res) => {
554
+ try {
555
+ const { text } = req.body;
556
+
557
+ if (!text) {
558
+ return res.status(400).json({ error: 'Text is required' });
559
+ }
560
+
561
+ const output = await extractor(text, {
562
+ pooling: 'mean',
563
+ normalize: true
564
+ });
565
+
566
+ res.json({
567
+ embedding: Array.from(output.data),
568
+ dimensions: output.data.length
569
+ });
570
+ } catch (error) {
571
+ res.status(500).json({ error: error.message });
572
+ }
573
+ });
574
+
575
+ async function shutdown(signal) {
576
+ console.log(`\n${signal} received. Shutting down...`);
577
+
578
+ if (server) {
579
+ server.close(() => {
580
+ console.log('HTTP server closed');
581
+ });
582
+ }
583
+
584
+ if (extractor) {
585
+ console.log('Disposing model...');
586
+ await extractor.dispose();
587
+ console.log('Model disposed');
588
+ }
589
+
590
+ process.exit(0);
591
+ }
592
+
593
+ process.on('SIGTERM', () => shutdown('SIGTERM'));
594
+ process.on('SIGINT', () => shutdown('SIGINT'));
595
+
596
+ initialize().then(() => {
597
+ server = app.listen(3000, () => {
598
+ console.log('Server running on http://localhost:3000');
599
+ });
600
+ });
601
+ ```
602
+
603
+ ---
604
+
605
+ These examples demonstrate the same functionality across different runtimes and frameworks, making it easy to adapt to your specific use case. All examples include proper cleanup with `.dispose()` to free memory.