voyageai-cli 1.10.0 → 1.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/lib/api.js CHANGED
@@ -78,6 +78,8 @@ async function apiRequest(endpoint, body) {
78
78
  body: JSON.stringify(body),
79
79
  });
80
80
 
81
+ // 429: The API said "slow down monkey" — respect the rate limit
82
+ // like you'd respect a $merge that's already running on your replica set.
81
83
  if (response.status === 429 && attempt < MAX_RETRIES) {
82
84
  const retryAfter = response.headers.get('Retry-After');
83
85
  const waitMs = retryAfter ? parseInt(retryAfter, 10) * 1000 : Math.pow(2, attempt) * 1000;
@@ -127,6 +129,7 @@ async function apiRequest(endpoint, body) {
127
129
  * @param {string} [options.inputType] - Input type (query|document)
128
130
  * @param {number} [options.dimensions] - Output dimensions
129
131
  * @param {boolean} [options.truncation] - Enable/disable truncation
132
+ * @param {string} [options.outputDtype] - Output data type: float, int8, uint8, binary, ubinary
130
133
  * @returns {Promise<object>} API response with embeddings
131
134
  */
132
135
  async function generateEmbeddings(texts, options = {}) {
@@ -146,6 +149,9 @@ async function generateEmbeddings(texts, options = {}) {
146
149
  if (options.truncation !== undefined) {
147
150
  body.truncation = options.truncation;
148
151
  }
152
+ if (options.outputDtype && options.outputDtype !== 'float') {
153
+ body.output_dtype = options.outputDtype;
154
+ }
149
155
 
150
156
  return apiRequest('/embeddings', body);
151
157
  }
@@ -22,6 +22,8 @@ function getDefaultDimensions() {
22
22
  return getConfigValue('defaultDimensions') || DEFAULT_DIMENSIONS;
23
23
  }
24
24
 
25
+ // The model catalog: like a wine list (I don't drink :-P), except every choice
26
+ // leads to vectors instead of regret.
25
27
  /** @type {Array<{name: string, type: string, context: string, dimensions: string, price: string, bestFor: string}>} */
26
28
  const MODEL_CATALOG = [
27
29
  { name: 'voyage-4-large', type: 'embedding', context: '32K', dimensions: '1024 (default), 256, 512, 2048', price: '$0.12/1M tokens', bestFor: 'Best quality, multilingual', shortFor: 'Best quality' },
@@ -406,6 +406,65 @@ const concepts = {
406
406
  'vai embed --file document.txt --input-type document',
407
407
  ],
408
408
  },
409
+ quantization: {
410
+ title: 'Quantization & Flexible Dimensions',
411
+ summary: 'Reduce storage costs with lower-precision embeddings',
412
+ content: [
413
+ `${pc.cyan('Quantization')} reduces embedding precision from 32-bit floats to smaller`,
414
+ `representations, dramatically cutting storage and search costs with minimal`,
415
+ `quality loss. Combined with ${pc.cyan('Matryoshka dimensions')}, you can shrink vectors`,
416
+ `by up to ${pc.bold('128×')} (32× from binary × 4× from fewer dimensions).`,
417
+ ``,
418
+ `${pc.bold('Output data types (--output-dtype):')}`,
419
+ ``,
420
+ ` ${pc.cyan('float')} 32 bits/dim 4 bytes/dim Baseline (default)`,
421
+ ` ${pc.cyan('int8')} 8 bits/dim 1 byte/dim ${pc.green('4× smaller')} Signed: -128 to 127`,
422
+ ` ${pc.cyan('uint8')} 8 bits/dim 1 byte/dim ${pc.green('4× smaller')} Unsigned: 0 to 255`,
423
+ ` ${pc.cyan('binary')} 1 bit/dim 1/8 byte/dim ${pc.green('32× smaller')} Bit-packed int8 (offset binary)`,
424
+ ` ${pc.cyan('ubinary')} 1 bit/dim 1/8 byte/dim ${pc.green('32× smaller')} Bit-packed uint8`,
425
+ ``,
426
+ `${pc.bold('Storage math for 1M documents at 1024 dims:')}`,
427
+ ` float: ${pc.dim('1M × 1024 × 4B')} = ${pc.cyan('4.0 GB')}`,
428
+ ` int8: ${pc.dim('1M × 1024 × 1B')} = ${pc.cyan('1.0 GB')} (4× savings)`,
429
+ ` binary: ${pc.dim('1M × 1024 / 8B')} = ${pc.cyan('128 MB')} (32× savings)`,
430
+ ` ${pc.dim('+ reduced dimensions:')} 256-dim binary = ${pc.cyan('32 MB')} (128× savings)`,
431
+ ``,
432
+ `${pc.bold('How binary quantization works:')} Each float value is converted to a single bit:`,
433
+ `positive values become 1, zero/negative become 0. Eight bits are packed into`,
434
+ `one byte. ${pc.cyan('binary')} uses offset binary (subtract 128) for signed int8 output;`,
435
+ `${pc.cyan('ubinary')} stores the raw unsigned uint8 value.`,
436
+ ``,
437
+ `${pc.bold('Quality impact:')} Quantization-aware training minimizes degradation:`,
438
+ ` ${pc.dim('•')} ${pc.cyan('int8/uint8')} — Typically <1% retrieval quality loss vs float`,
439
+ ` ${pc.dim('•')} ${pc.cyan('binary/ubinary')} — ~2-5% quality loss; best paired with a reranker`,
440
+ ` ${pc.dim('•')} Combining lower dimensions + quantization compounds the quality loss`,
441
+ ``,
442
+ `${pc.bold('Matryoshka dimensions:')} Voyage 4 models produce ${pc.cyan('nested embeddings')} — the`,
443
+ `first 256 entries of a 1024-dim vector are themselves a valid 256-dim embedding.`,
444
+ `You can embed once at full dimension and truncate later without re-embedding.`,
445
+ `Supported values: 256, 512, 1024 (default), 2048.`,
446
+ ``,
447
+ `${pc.bold('Which vector databases support quantized storage?')}`,
448
+ ` ${pc.dim('•')} MongoDB Atlas Vector Search — float and int8`,
449
+ ` ${pc.dim('•')} Milvus, Qdrant, Weaviate, Elasticsearch, Vespa — float, int8, binary`,
450
+ ``,
451
+ `${pc.bold('Decision framework:')}`,
452
+ ` 1. Start with ${pc.cyan('float')} at default dimensions — measure your baseline`,
453
+ ` 2. Try ${pc.cyan('int8')} — if quality holds, you get 4× storage savings for free`,
454
+ ` 3. If storage is critical, try ${pc.cyan('binary')} + reranker for 32× savings`,
455
+ ` 4. Reduce dimensions (1024→256) for another 4× on top of quantization`,
456
+ ` 5. Use ${pc.cyan('vai benchmark quantization')} to measure the tradeoffs on your data`,
457
+ ].join('\n'),
458
+ links: [
459
+ 'https://docs.voyageai.com/docs/flexible-dimensions-and-quantization',
460
+ 'https://www.mongodb.com/docs/voyageai/models/text-embeddings/',
461
+ ],
462
+ tryIt: [
463
+ 'vai embed "hello world" --output-dtype int8',
464
+ 'vai embed "hello world" --output-dtype binary --dimensions 256',
465
+ 'vai benchmark quantization --model voyage-4-large',
466
+ ],
467
+ },
409
468
  benchmarking: {
410
469
  title: 'Benchmarking & Model Selection',
411
470
  summary: 'How to choose the right model for your use case',
@@ -434,12 +493,18 @@ const concepts = {
434
493
  ` Measures throughput (texts/sec) at different batch sizes.`,
435
494
  ` ${pc.dim('vai benchmark batch --batch-sizes 1,5,10,25,50 --rounds 3')}`,
436
495
  ``,
496
+ `${pc.bold('vai benchmark quantization')} — Compare output dtypes for storage savings:`,
497
+ ` Embeds the same corpus with float, int8, and binary, measures ranking quality`,
498
+ ` degradation vs storage savings. Helps you decide if quantization works for your data.`,
499
+ ` ${pc.dim('vai benchmark quantization --model voyage-4-large --dtypes float,int8,ubinary')}`,
500
+ ``,
437
501
  `${pc.bold('Decision framework:')}`,
438
502
  ` 1. Run ${pc.cyan('benchmark cost')} to eliminate models outside your budget`,
439
503
  ` 2. Run ${pc.cyan('benchmark embed')} to compare latency of affordable models`,
440
504
  ` 3. Run ${pc.cyan('benchmark similarity')} with your actual data to compare quality`,
441
- ` 4. If quality is similar, pick the cheaper/faster model`,
442
- ` 5. Use ${pc.cyan('--save')} to track results over time as your data evolves`,
505
+ ` 4. Run ${pc.cyan('benchmark quantization')} to see if int8/binary preserves your ranking`,
506
+ ` 5. If quality is similar, pick the cheaper/faster model + smallest viable dtype`,
507
+ ` 6. Use ${pc.cyan('--save')} to track results over time as your data evolves`,
443
508
  ].join('\n'),
444
509
  links: ['https://www.mongodb.com/docs/voyageai/models/text-embeddings/'],
445
510
  tryIt: [
@@ -488,6 +553,15 @@ const aliases = {
488
553
  batch: 'batch-processing',
489
554
  'batch-processing': 'batch-processing',
490
555
  batching: 'batch-processing',
556
+ quantization: 'quantization',
557
+ quantize: 'quantization',
558
+ 'output-dtype': 'quantization',
559
+ dtype: 'quantization',
560
+ int8: 'quantization',
561
+ binary: 'quantization',
562
+ ubinary: 'quantization',
563
+ matryoshka: 'quantization',
564
+ 'flexible-dimensions': 'quantization',
491
565
  benchmark: 'benchmarking',
492
566
  benchmarking: 'benchmarking',
493
567
  'model-selection': 'benchmarking',
package/src/lib/math.js CHANGED
@@ -3,6 +3,11 @@
3
3
  /**
4
4
  * Compute cosine similarity between two vectors.
5
5
  * cosine_sim(a, b) = dot(a, b) / (||a|| * ||b||)
6
+ *
7
+ * Fun fact: this is basically asking "how much do these two vectors
8
+ * vibe?" — 1.0 means soulmates, 0.0 means strangers at a party,
9
+ * -1.0 means they're in a Twitter argument.
10
+ *
6
11
  * @param {number[]} a
7
12
  * @param {number[]} b
8
13
  * @returns {number} Similarity score in [-1, 1]