@promptbook/azure-openai 0.94.0-0 → 0.94.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/README.md +1 -8
  2. package/esm/index.es.js +93 -98
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/ollama.index.d.ts +14 -0
  7. package/esm/typings/src/_packages/openai.index.d.ts +2 -0
  8. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  9. package/esm/typings/src/_packages/wizzard.index.d.ts +4 -0
  10. package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
  11. package/esm/typings/src/execution/ExecutionTask.d.ts +3 -1
  12. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
  13. package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +44 -0
  19. package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +11 -0
  21. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
  22. package/esm/typings/src/llm-providers/ollama/playground/playground.d.ts +6 -0
  23. package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +14 -0
  24. package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +15 -0
  25. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
  27. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
  28. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
  29. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
  30. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
  31. package/esm/typings/src/version.d.ts +1 -1
  32. package/package.json +25 -2
  33. package/umd/index.umd.js +93 -98
  34. package/umd/index.umd.js.map +1 -1
  35. /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -24,7 +24,7 @@
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-0';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-12';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1200,11 +1200,11 @@
1200
1200
  }
1201
1201
 
1202
1202
  /**
1203
- * Function computeUsage will create price per one token based on the string value found on openai page
1203
+ * Create price per one token based on the string value found on openai page
1204
1204
  *
1205
1205
  * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1206
1206
  */
1207
- function computeUsage(value) {
1207
+ function pricing(value) {
1208
1208
  const [price, tokens] = value.split(' / ');
1209
1209
  return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1210
1210
  }
@@ -1238,10 +1238,10 @@
1238
1238
  modelVariant: 'COMPLETION',
1239
1239
  modelTitle: 'davinci-002',
1240
1240
  modelName: 'davinci-002',
1241
- modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
1241
+ modelDescription: 'Legacy completion model with 4K token context window. Excels at complex text generation, creative writing, and detailed content creation with strong contextual understanding. Optimized for instructions requiring nuanced outputs and extended reasoning. Suitable for applications needing high-quality text generation without conversation management.',
1242
1242
  pricing: {
1243
- prompt: computeUsage(`$2.00 / 1M tokens`),
1244
- output: computeUsage(`$2.00 / 1M tokens`),
1243
+ prompt: pricing(`$2.00 / 1M tokens`),
1244
+ output: pricing(`$2.00 / 1M tokens`),
1245
1245
  },
1246
1246
  },
1247
1247
  /**/
@@ -1256,10 +1256,10 @@
1256
1256
  modelVariant: 'CHAT',
1257
1257
  modelTitle: 'gpt-3.5-turbo-16k',
1258
1258
  modelName: 'gpt-3.5-turbo-16k',
1259
- modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
1259
+ modelDescription: 'Extended context GPT-3.5 Turbo with 16K token window. Maintains core capabilities of standard 3.5 Turbo while supporting longer conversations and documents. Features good balance of performance and cost for applications requiring more context than standard 4K models. Effective for document analysis, extended conversations, and multi-step reasoning tasks.',
1260
1260
  pricing: {
1261
- prompt: computeUsage(`$3.00 / 1M tokens`),
1262
- output: computeUsage(`$4.00 / 1M tokens`),
1261
+ prompt: pricing(`$3.00 / 1M tokens`),
1262
+ output: pricing(`$4.00 / 1M tokens`),
1263
1263
  },
1264
1264
  },
1265
1265
  /**/
@@ -1280,10 +1280,10 @@
1280
1280
  modelVariant: 'CHAT',
1281
1281
  modelTitle: 'gpt-4',
1282
1282
  modelName: 'gpt-4',
1283
- modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
1283
+ modelDescription: 'Powerful language model with 8K context window featuring sophisticated reasoning, instruction-following, and knowledge capabilities. Demonstrates strong performance on complex tasks requiring deep understanding and multi-step reasoning. Excels at code generation, logical analysis, and nuanced content creation. Suitable for advanced applications requiring high-quality outputs.',
1284
1284
  pricing: {
1285
- prompt: computeUsage(`$30.00 / 1M tokens`),
1286
- output: computeUsage(`$60.00 / 1M tokens`),
1285
+ prompt: pricing(`$30.00 / 1M tokens`),
1286
+ output: pricing(`$60.00 / 1M tokens`),
1287
1287
  },
1288
1288
  },
1289
1289
  /**/
@@ -1292,10 +1292,10 @@
1292
1292
  modelVariant: 'CHAT',
1293
1293
  modelTitle: 'gpt-4-32k',
1294
1294
  modelName: 'gpt-4-32k',
1295
- modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
1295
+ modelDescription: 'Extended context version of GPT-4 with 32K token window. Maintains all capabilities of standard GPT-4 while supporting analysis of very lengthy documents, code bases, and conversations. Features enhanced ability to maintain context over long interactions and process detailed information from large inputs. Ideal for document analysis, legal review, and complex problem-solving.',
1296
1296
  pricing: {
1297
- prompt: computeUsage(`$60.00 / 1M tokens`),
1298
- output: computeUsage(`$120.00 / 1M tokens`),
1297
+ prompt: pricing(`$60.00 / 1M tokens`),
1298
+ output: pricing(`$120.00 / 1M tokens`),
1299
1299
  },
1300
1300
  },
1301
1301
  /**/
@@ -1315,10 +1315,10 @@
1315
1315
  modelVariant: 'CHAT',
1316
1316
  modelTitle: 'gpt-4-turbo-2024-04-09',
1317
1317
  modelName: 'gpt-4-turbo-2024-04-09',
1318
- modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
1318
+ modelDescription: 'Latest stable GPT-4 Turbo from April 2024 with 128K context window. Features enhanced reasoning chains, improved factual accuracy with 40% reduction in hallucinations, and better instruction following compared to earlier versions. Includes advanced function calling capabilities and knowledge up to April 2024. Provides optimal performance for enterprise applications requiring reliability.',
1319
1319
  pricing: {
1320
- prompt: computeUsage(`$10.00 / 1M tokens`),
1321
- output: computeUsage(`$30.00 / 1M tokens`),
1320
+ prompt: pricing(`$10.00 / 1M tokens`),
1321
+ output: pricing(`$30.00 / 1M tokens`),
1322
1322
  },
1323
1323
  },
1324
1324
  /**/
@@ -1327,10 +1327,10 @@
1327
1327
  modelVariant: 'CHAT',
1328
1328
  modelTitle: 'gpt-3.5-turbo-1106',
1329
1329
  modelName: 'gpt-3.5-turbo-1106',
1330
- modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
1330
+ modelDescription: 'November 2023 version of GPT-3.5 Turbo with 16K token context window. Features improved instruction following, more consistent output formatting, and enhanced function calling capabilities. Includes knowledge cutoff from April 2023. Suitable for applications requiring good performance at lower cost than GPT-4 models.',
1331
1331
  pricing: {
1332
- prompt: computeUsage(`$1.00 / 1M tokens`),
1333
- output: computeUsage(`$2.00 / 1M tokens`),
1332
+ prompt: pricing(`$1.00 / 1M tokens`),
1333
+ output: pricing(`$2.00 / 1M tokens`),
1334
1334
  },
1335
1335
  },
1336
1336
  /**/
@@ -1339,10 +1339,10 @@
1339
1339
  modelVariant: 'CHAT',
1340
1340
  modelTitle: 'gpt-4-turbo',
1341
1341
  modelName: 'gpt-4-turbo',
1342
- modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
1342
+ modelDescription: 'More capable and cost-efficient version of GPT-4 with 128K token context window. Features improved instruction following, advanced function calling capabilities, and better performance on coding tasks. Maintains superior reasoning and knowledge while offering substantial cost reduction compared to base GPT-4. Ideal for complex applications requiring extensive context processing.',
1343
1343
  pricing: {
1344
- prompt: computeUsage(`$10.00 / 1M tokens`),
1345
- output: computeUsage(`$30.00 / 1M tokens`),
1344
+ prompt: pricing(`$10.00 / 1M tokens`),
1345
+ output: pricing(`$30.00 / 1M tokens`),
1346
1346
  },
1347
1347
  },
1348
1348
  /**/
@@ -1351,10 +1351,10 @@
1351
1351
  modelVariant: 'COMPLETION',
1352
1352
  modelTitle: 'gpt-3.5-turbo-instruct-0914',
1353
1353
  modelName: 'gpt-3.5-turbo-instruct-0914',
1354
- modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
1354
+ modelDescription: 'September 2023 version of GPT-3.5 Turbo Instruct with 4K context window. Optimized for completion-style instruction following with deterministic responses. Better suited than chat models for applications requiring specific formatted outputs without conversation management. Knowledge cutoff from September 2021.',
1355
1355
  pricing: {
1356
- prompt: computeUsage(`$1.50 / 1M tokens`),
1357
- output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1356
+ prompt: pricing(`$1.50 / 1M tokens`),
1357
+ output: pricing(`$2.00 / 1M tokens`),
1358
1358
  },
1359
1359
  },
1360
1360
  /**/
@@ -1363,10 +1363,10 @@
1363
1363
  modelVariant: 'COMPLETION',
1364
1364
  modelTitle: 'gpt-3.5-turbo-instruct',
1365
1365
  modelName: 'gpt-3.5-turbo-instruct',
1366
- modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
1366
+ modelDescription: 'Optimized version of GPT-3.5 for completion-style API with 4K token context window. Features strong instruction following with single-turn design rather than multi-turn conversation. Provides more consistent, deterministic outputs compared to chat models. Well-suited for templated content generation and structured text transformation tasks.',
1367
1367
  pricing: {
1368
- prompt: computeUsage(`$1.50 / 1M tokens`),
1369
- output: computeUsage(`$2.00 / 1M tokens`),
1368
+ prompt: pricing(`$1.50 / 1M tokens`),
1369
+ output: pricing(`$2.00 / 1M tokens`),
1370
1370
  },
1371
1371
  },
1372
1372
  /**/
@@ -1381,10 +1381,10 @@
1381
1381
  modelVariant: 'CHAT',
1382
1382
  modelTitle: 'gpt-3.5-turbo',
1383
1383
  modelName: 'gpt-3.5-turbo',
1384
- modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
1384
+ modelDescription: 'Latest version of GPT-3.5 Turbo with 4K token default context window (16K available). Features continually improved performance with enhanced instruction following and reduced hallucinations. Offers excellent balance between capability and cost efficiency. Suitable for most general-purpose applications requiring good AI capabilities at reasonable cost.',
1385
1385
  pricing: {
1386
- prompt: computeUsage(`$0.50 / 1M tokens`),
1387
- output: computeUsage(`$1.50 / 1M tokens`),
1386
+ prompt: pricing(`$0.50 / 1M tokens`),
1387
+ output: pricing(`$1.50 / 1M tokens`),
1388
1388
  },
1389
1389
  },
1390
1390
  /**/
@@ -1393,10 +1393,10 @@
1393
1393
  modelVariant: 'CHAT',
1394
1394
  modelTitle: 'gpt-3.5-turbo-0301',
1395
1395
  modelName: 'gpt-3.5-turbo-0301',
1396
- modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
1396
+ modelDescription: 'March 2023 version of GPT-3.5 Turbo with 4K token context window. Legacy model maintained for backward compatibility with specific application behaviors. Features solid conversational abilities and basic instruction following. Knowledge cutoff from September 2021. Suitable for applications explicitly designed for this version.',
1397
1397
  pricing: {
1398
- prompt: computeUsage(`$1.50 / 1M tokens`),
1399
- output: computeUsage(`$2.00 / 1M tokens`),
1398
+ prompt: pricing(`$1.50 / 1M tokens`),
1399
+ output: pricing(`$2.00 / 1M tokens`),
1400
1400
  },
1401
1401
  },
1402
1402
  /**/
@@ -1405,10 +1405,10 @@
1405
1405
  modelVariant: 'COMPLETION',
1406
1406
  modelTitle: 'babbage-002',
1407
1407
  modelName: 'babbage-002',
1408
- modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
1408
+ modelDescription: 'Efficient legacy completion model with 4K context window balancing performance and speed. Features moderate reasoning capabilities with focus on straightforward text generation tasks. Significantly more efficient than davinci models while maintaining adequate quality for many applications. Suitable for high-volume, cost-sensitive text generation needs.',
1409
1409
  pricing: {
1410
- prompt: computeUsage(`$0.40 / 1M tokens`),
1411
- output: computeUsage(`$0.40 / 1M tokens`),
1410
+ prompt: pricing(`$0.40 / 1M tokens`),
1411
+ output: pricing(`$0.40 / 1M tokens`),
1412
1412
  },
1413
1413
  },
1414
1414
  /**/
@@ -1417,10 +1417,10 @@
1417
1417
  modelVariant: 'CHAT',
1418
1418
  modelTitle: 'gpt-4-1106-preview',
1419
1419
  modelName: 'gpt-4-1106-preview',
1420
- modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
1420
+ modelDescription: 'November 2023 preview version of GPT-4 Turbo with 128K token context window. Features improved instruction following, better function calling capabilities, and enhanced reasoning. Includes knowledge cutoff from April 2023. Suitable for complex applications requiring extensive document understanding and sophisticated interactions.',
1421
1421
  pricing: {
1422
- prompt: computeUsage(`$10.00 / 1M tokens`),
1423
- output: computeUsage(`$30.00 / 1M tokens`),
1422
+ prompt: pricing(`$10.00 / 1M tokens`),
1423
+ output: pricing(`$30.00 / 1M tokens`),
1424
1424
  },
1425
1425
  },
1426
1426
  /**/
@@ -1429,10 +1429,10 @@
1429
1429
  modelVariant: 'CHAT',
1430
1430
  modelTitle: 'gpt-4-0125-preview',
1431
1431
  modelName: 'gpt-4-0125-preview',
1432
- modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
1432
+ modelDescription: 'January 2024 preview version of GPT-4 Turbo with 128K token context window. Features improved reasoning capabilities, enhanced tool use, and more reliable function calling. Includes knowledge cutoff from October 2023. Offers better performance on complex logical tasks and more consistent outputs than previous preview versions.',
1433
1433
  pricing: {
1434
- prompt: computeUsage(`$10.00 / 1M tokens`),
1435
- output: computeUsage(`$30.00 / 1M tokens`),
1434
+ prompt: pricing(`$10.00 / 1M tokens`),
1435
+ output: pricing(`$30.00 / 1M tokens`),
1436
1436
  },
1437
1437
  },
1438
1438
  /**/
@@ -1447,10 +1447,10 @@
1447
1447
  modelVariant: 'CHAT',
1448
1448
  modelTitle: 'gpt-3.5-turbo-0125',
1449
1449
  modelName: 'gpt-3.5-turbo-0125',
1450
- modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
1450
+ modelDescription: 'January 2024 version of GPT-3.5 Turbo with 16K token context window. Features improved reasoning capabilities, better instruction adherence, and reduced hallucinations compared to previous versions. Includes knowledge cutoff from September 2021. Provides good performance for most general applications at reasonable cost.',
1451
1451
  pricing: {
1452
- prompt: computeUsage(`$0.50 / 1M tokens`),
1453
- output: computeUsage(`$1.50 / 1M tokens`),
1452
+ prompt: pricing(`$0.50 / 1M tokens`),
1453
+ output: pricing(`$1.50 / 1M tokens`),
1454
1454
  },
1455
1455
  },
1456
1456
  /**/
@@ -1459,10 +1459,10 @@
1459
1459
  modelVariant: 'CHAT',
1460
1460
  modelTitle: 'gpt-4-turbo-preview',
1461
1461
  modelName: 'gpt-4-turbo-preview',
1462
- modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
1462
+ modelDescription: 'Preview version of GPT-4 Turbo with 128K token context window that points to the latest development model. Features cutting-edge improvements to instruction following, knowledge representation, and tool use capabilities. Provides access to newest features but may have occasional behavior changes. Best for non-critical applications wanting latest capabilities.',
1463
1463
  pricing: {
1464
- prompt: computeUsage(`$10.00 / 1M tokens`),
1465
- output: computeUsage(`$30.00 / 1M tokens`),
1464
+ prompt: pricing(`$10.00 / 1M tokens`),
1465
+ output: pricing(`$30.00 / 1M tokens`),
1466
1466
  },
1467
1467
  },
1468
1468
  /**/
@@ -1471,11 +1471,10 @@
1471
1471
  modelVariant: 'EMBEDDING',
1472
1472
  modelTitle: 'text-embedding-3-large',
1473
1473
  modelName: 'text-embedding-3-large',
1474
- modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
1474
+ modelDescription: "OpenAI's most capable text embedding model generating 3072-dimensional vectors. Designed for high-quality embeddings for complex similarity tasks, clustering, and information retrieval. Features enhanced cross-lingual capabilities and significantly improved performance on retrieval and classification benchmarks. Ideal for sophisticated RAG systems and semantic search applications.",
1475
1475
  pricing: {
1476
- prompt: computeUsage(`$0.13 / 1M tokens`),
1477
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1478
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1476
+ prompt: pricing(`$0.13 / 1M tokens`),
1477
+ output: 0,
1479
1478
  },
1480
1479
  },
1481
1480
  /**/
@@ -1484,11 +1483,10 @@
1484
1483
  modelVariant: 'EMBEDDING',
1485
1484
  modelTitle: 'text-embedding-3-small',
1486
1485
  modelName: 'text-embedding-3-small',
1487
- modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
1486
+ modelDescription: 'Cost-effective embedding model generating 1536-dimensional vectors. Balances quality and efficiency for simpler tasks while maintaining good performance on text similarity and retrieval applications. Offers 20% better quality than ada-002 at significantly lower cost. Ideal for production embedding applications with cost constraints.',
1488
1487
  pricing: {
1489
- prompt: computeUsage(`$0.02 / 1M tokens`),
1490
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1491
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1488
+ prompt: pricing(`$0.02 / 1M tokens`),
1489
+ output: 0,
1492
1490
  },
1493
1491
  },
1494
1492
  /**/
@@ -1497,10 +1495,10 @@
1497
1495
  modelVariant: 'CHAT',
1498
1496
  modelTitle: 'gpt-3.5-turbo-0613',
1499
1497
  modelName: 'gpt-3.5-turbo-0613',
1500
- modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
1498
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with 4K token context window. Features function calling capabilities for structured data extraction and API interaction. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
1501
1499
  pricing: {
1502
- prompt: computeUsage(`$1.50 / 1M tokens`),
1503
- output: computeUsage(`$2.00 / 1M tokens`),
1500
+ prompt: pricing(`$1.50 / 1M tokens`),
1501
+ output: pricing(`$2.00 / 1M tokens`),
1504
1502
  },
1505
1503
  },
1506
1504
  /**/
@@ -1509,11 +1507,10 @@
1509
1507
  modelVariant: 'EMBEDDING',
1510
1508
  modelTitle: 'text-embedding-ada-002',
1511
1509
  modelName: 'text-embedding-ada-002',
1512
- modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
1510
+ modelDescription: 'Legacy text embedding model generating 1536-dimensional vectors suitable for text similarity and retrieval applications. Processes up to 8K tokens per request with consistent embedding quality. While superseded by newer embedding-3 models, still maintains adequate performance for many semantic search and classification tasks.',
1513
1511
  pricing: {
1514
- prompt: computeUsage(`$0.1 / 1M tokens`),
1515
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1516
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1512
+ prompt: pricing(`$0.1 / 1M tokens`),
1513
+ output: 0,
1517
1514
  },
1518
1515
  },
1519
1516
  /**/
@@ -1540,10 +1537,10 @@
1540
1537
  modelVariant: 'CHAT',
1541
1538
  modelTitle: 'gpt-4o-2024-05-13',
1542
1539
  modelName: 'gpt-4o-2024-05-13',
1543
- modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
1540
+ modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
1544
1541
  pricing: {
1545
- prompt: computeUsage(`$5.00 / 1M tokens`),
1546
- output: computeUsage(`$15.00 / 1M tokens`),
1542
+ prompt: pricing(`$5.00 / 1M tokens`),
1543
+ output: pricing(`$15.00 / 1M tokens`),
1547
1544
  },
1548
1545
  },
1549
1546
  /**/
@@ -1552,10 +1549,10 @@
1552
1549
  modelVariant: 'CHAT',
1553
1550
  modelTitle: 'gpt-4o',
1554
1551
  modelName: 'gpt-4o',
1555
- modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
1552
+ modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
1556
1553
  pricing: {
1557
- prompt: computeUsage(`$5.00 / 1M tokens`),
1558
- output: computeUsage(`$15.00 / 1M tokens`),
1554
+ prompt: pricing(`$5.00 / 1M tokens`),
1555
+ output: pricing(`$15.00 / 1M tokens`),
1559
1556
  },
1560
1557
  },
1561
1558
  /**/
@@ -1564,10 +1561,10 @@
1564
1561
  modelVariant: 'CHAT',
1565
1562
  modelTitle: 'gpt-4o-mini',
1566
1563
  modelName: 'gpt-4o-mini',
1567
- modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
1564
+ modelDescription: 'Smaller, more cost-effective version of GPT-4o with 128K context window. Maintains impressive capabilities across text, vision, and audio tasks while operating at significantly lower cost. Features 3x faster inference than GPT-4o with good performance on general tasks. Excellent for applications requiring good quality multimodal capabilities at scale.',
1568
1565
  pricing: {
1569
- prompt: computeUsage(`$0.15 / 1M tokens`),
1570
- output: computeUsage(`$0.60 / 1M tokens`),
1566
+ prompt: pricing(`$0.15 / 1M tokens`),
1567
+ output: pricing(`$0.60 / 1M tokens`),
1571
1568
  },
1572
1569
  },
1573
1570
  /**/
@@ -1576,10 +1573,10 @@
1576
1573
  modelVariant: 'CHAT',
1577
1574
  modelTitle: 'o1-preview',
1578
1575
  modelName: 'o1-preview',
1579
- modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
1576
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Features exceptional step-by-step problem-solving capabilities, advanced mathematical and scientific reasoning, and superior performance on STEM-focused problems. Significantly outperforms GPT-4 on quantitative reasoning benchmarks. Ideal for professional and specialized applications.',
1580
1577
  pricing: {
1581
- prompt: computeUsage(`$15.00 / 1M tokens`),
1582
- output: computeUsage(`$60.00 / 1M tokens`),
1578
+ prompt: pricing(`$15.00 / 1M tokens`),
1579
+ output: pricing(`$60.00 / 1M tokens`),
1583
1580
  },
1584
1581
  },
1585
1582
  /**/
@@ -1588,11 +1585,10 @@
1588
1585
  modelVariant: 'CHAT',
1589
1586
  modelTitle: 'o1-preview-2024-09-12',
1590
1587
  modelName: 'o1-preview-2024-09-12',
1591
- modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
1592
- // <- TODO: [💩] Some better system to organize these date suffixes and versions
1588
+ modelDescription: 'September 2024 version of O1 preview with 128K context window. Features specialized reasoning capabilities with 30% improvement on mathematical and scientific accuracy over previous versions. Includes enhanced support for formal logic, statistical analysis, and technical domains. Optimized for professional applications requiring precise analytical thinking and rigorous methodologies.',
1593
1589
  pricing: {
1594
- prompt: computeUsage(`$15.00 / 1M tokens`),
1595
- output: computeUsage(`$60.00 / 1M tokens`),
1590
+ prompt: pricing(`$15.00 / 1M tokens`),
1591
+ output: pricing(`$60.00 / 1M tokens`),
1596
1592
  },
1597
1593
  },
1598
1594
  /**/
@@ -1601,10 +1597,10 @@
1601
1597
  modelVariant: 'CHAT',
1602
1598
  modelTitle: 'o1-mini',
1603
1599
  modelName: 'o1-mini',
1604
- modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
1600
+ modelDescription: 'Smaller, cost-effective version of the O1 model with 128K context window. Maintains strong analytical reasoning abilities while reducing computational requirements by 70%. Features good performance on mathematical, logical, and scientific tasks at significantly lower cost than full O1. Excellent for everyday analytical applications that benefit from reasoning focus.',
1605
1601
  pricing: {
1606
- prompt: computeUsage(`$3.00 / 1M tokens`),
1607
- output: computeUsage(`$12.00 / 1M tokens`),
1602
+ prompt: pricing(`$3.00 / 1M tokens`),
1603
+ output: pricing(`$12.00 / 1M tokens`),
1608
1604
  },
1609
1605
  },
1610
1606
  /**/
@@ -1613,10 +1609,10 @@
1613
1609
  modelVariant: 'CHAT',
1614
1610
  modelTitle: 'o1',
1615
1611
  modelName: 'o1',
1616
- modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
1612
+ modelDescription: "OpenAI's advanced reasoning model with 128K context window focusing on logical problem-solving and analytical thinking. Features exceptional performance on quantitative tasks, step-by-step deduction, and complex technical problems. Maintains 95%+ of o1-preview capabilities with production-ready stability. Ideal for scientific computing, financial analysis, and professional applications.",
1617
1613
  pricing: {
1618
- prompt: computeUsage(`$15.00 / 1M tokens`),
1619
- output: computeUsage(`$60.00 / 1M tokens`),
1614
+ prompt: pricing(`$15.00 / 1M tokens`),
1615
+ output: pricing(`$60.00 / 1M tokens`),
1620
1616
  },
1621
1617
  },
1622
1618
  /**/
@@ -1625,11 +1621,10 @@
1625
1621
  modelVariant: 'CHAT',
1626
1622
  modelTitle: 'o3-mini',
1627
1623
  modelName: 'o3-mini',
1628
- modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
1624
+ modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
1629
1625
  pricing: {
1630
- prompt: computeUsage(`$3.00 / 1M tokens`),
1631
- output: computeUsage(`$12.00 / 1M tokens`),
1632
- // <- TODO: !! Unsure, check the pricing
1626
+ prompt: pricing(`$3.00 / 1M tokens`),
1627
+ output: pricing(`$12.00 / 1M tokens`),
1633
1628
  },
1634
1629
  },
1635
1630
  /**/
@@ -1638,10 +1633,10 @@
1638
1633
  modelVariant: 'CHAT',
1639
1634
  modelTitle: 'o1-mini-2024-09-12',
1640
1635
  modelName: 'o1-mini-2024-09-12',
1641
- modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
1636
+ modelDescription: "September 2024 version of O1-mini with 128K context window featuring balanced reasoning capabilities and cost-efficiency. Includes 25% improvement in mathematical accuracy and enhanced performance on coding tasks compared to previous versions. Maintains efficient resource utilization while delivering improved results for analytical applications that don't require the full O1 model.",
1642
1637
  pricing: {
1643
- prompt: computeUsage(`$3.00 / 1M tokens`),
1644
- output: computeUsage(`$12.00 / 1M tokens`),
1638
+ prompt: pricing(`$3.00 / 1M tokens`),
1639
+ output: pricing(`$12.00 / 1M tokens`),
1645
1640
  },
1646
1641
  },
1647
1642
  /**/
@@ -1650,10 +1645,10 @@
1650
1645
  modelVariant: 'CHAT',
1651
1646
  modelTitle: 'gpt-3.5-turbo-16k-0613',
1652
1647
  modelName: 'gpt-3.5-turbo-16k-0613',
1653
- modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
1648
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with extended 16K token context window. Features good handling of longer conversations and documents with improved memory management across extended contexts. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
1654
1649
  pricing: {
1655
- prompt: computeUsage(`$3.00 / 1M tokens`),
1656
- output: computeUsage(`$4.00 / 1M tokens`),
1650
+ prompt: pricing(`$3.00 / 1M tokens`),
1651
+ output: pricing(`$4.00 / 1M tokens`),
1657
1652
  },
1658
1653
  },
1659
1654
  /**/