@promptbook/openai 0.94.0-1 → 0.94.0-13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +6 -8
  2. package/esm/index.es.js +192 -159
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/ollama.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/openai.index.d.ts +2 -0
  6. package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
  7. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
  8. package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +36 -11
  14. package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -12
  15. package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +3 -3
  16. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
  22. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +25 -2
  25. package/umd/index.umd.js +192 -158
  26. package/umd/index.umd.js.map +1 -1
  27. /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-13';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1264,11 +1264,11 @@
1264
1264
  }
1265
1265
 
1266
1266
  /**
1267
- * Function computeUsage will create price per one token based on the string value found on openai page
1267
+ * Create price per one token based on the string value found on openai page
1268
1268
  *
1269
1269
  * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1270
1270
  */
1271
- function computeUsage(value) {
1271
+ function pricing(value) {
1272
1272
  const [price, tokens] = value.split(' / ');
1273
1273
  return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1274
1274
  }
@@ -1302,10 +1302,10 @@
1302
1302
  modelVariant: 'COMPLETION',
1303
1303
  modelTitle: 'davinci-002',
1304
1304
  modelName: 'davinci-002',
1305
- modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
1305
+ modelDescription: 'Legacy completion model with 4K token context window. Excels at complex text generation, creative writing, and detailed content creation with strong contextual understanding. Optimized for instructions requiring nuanced outputs and extended reasoning. Suitable for applications needing high-quality text generation without conversation management.',
1306
1306
  pricing: {
1307
- prompt: computeUsage(`$2.00 / 1M tokens`),
1308
- output: computeUsage(`$2.00 / 1M tokens`),
1307
+ prompt: pricing(`$2.00 / 1M tokens`),
1308
+ output: pricing(`$2.00 / 1M tokens`),
1309
1309
  },
1310
1310
  },
1311
1311
  /**/
@@ -1320,10 +1320,10 @@
1320
1320
  modelVariant: 'CHAT',
1321
1321
  modelTitle: 'gpt-3.5-turbo-16k',
1322
1322
  modelName: 'gpt-3.5-turbo-16k',
1323
- modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
1323
+ modelDescription: 'Extended context GPT-3.5 Turbo with 16K token window. Maintains core capabilities of standard 3.5 Turbo while supporting longer conversations and documents. Features good balance of performance and cost for applications requiring more context than standard 4K models. Effective for document analysis, extended conversations, and multi-step reasoning tasks.',
1324
1324
  pricing: {
1325
- prompt: computeUsage(`$3.00 / 1M tokens`),
1326
- output: computeUsage(`$4.00 / 1M tokens`),
1325
+ prompt: pricing(`$3.00 / 1M tokens`),
1326
+ output: pricing(`$4.00 / 1M tokens`),
1327
1327
  },
1328
1328
  },
1329
1329
  /**/
@@ -1344,10 +1344,10 @@
1344
1344
  modelVariant: 'CHAT',
1345
1345
  modelTitle: 'gpt-4',
1346
1346
  modelName: 'gpt-4',
1347
- modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
1347
+ modelDescription: 'Powerful language model with 8K context window featuring sophisticated reasoning, instruction-following, and knowledge capabilities. Demonstrates strong performance on complex tasks requiring deep understanding and multi-step reasoning. Excels at code generation, logical analysis, and nuanced content creation. Suitable for advanced applications requiring high-quality outputs.',
1348
1348
  pricing: {
1349
- prompt: computeUsage(`$30.00 / 1M tokens`),
1350
- output: computeUsage(`$60.00 / 1M tokens`),
1349
+ prompt: pricing(`$30.00 / 1M tokens`),
1350
+ output: pricing(`$60.00 / 1M tokens`),
1351
1351
  },
1352
1352
  },
1353
1353
  /**/
@@ -1356,10 +1356,10 @@
1356
1356
  modelVariant: 'CHAT',
1357
1357
  modelTitle: 'gpt-4-32k',
1358
1358
  modelName: 'gpt-4-32k',
1359
- modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
1359
+ modelDescription: 'Extended context version of GPT-4 with 32K token window. Maintains all capabilities of standard GPT-4 while supporting analysis of very lengthy documents, code bases, and conversations. Features enhanced ability to maintain context over long interactions and process detailed information from large inputs. Ideal for document analysis, legal review, and complex problem-solving.',
1360
1360
  pricing: {
1361
- prompt: computeUsage(`$60.00 / 1M tokens`),
1362
- output: computeUsage(`$120.00 / 1M tokens`),
1361
+ prompt: pricing(`$60.00 / 1M tokens`),
1362
+ output: pricing(`$120.00 / 1M tokens`),
1363
1363
  },
1364
1364
  },
1365
1365
  /**/
@@ -1379,10 +1379,10 @@
1379
1379
  modelVariant: 'CHAT',
1380
1380
  modelTitle: 'gpt-4-turbo-2024-04-09',
1381
1381
  modelName: 'gpt-4-turbo-2024-04-09',
1382
- modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
1382
+ modelDescription: 'Latest stable GPT-4 Turbo from April 2024 with 128K context window. Features enhanced reasoning chains, improved factual accuracy with 40% reduction in hallucinations, and better instruction following compared to earlier versions. Includes advanced function calling capabilities and knowledge up to April 2024. Provides optimal performance for enterprise applications requiring reliability.',
1383
1383
  pricing: {
1384
- prompt: computeUsage(`$10.00 / 1M tokens`),
1385
- output: computeUsage(`$30.00 / 1M tokens`),
1384
+ prompt: pricing(`$10.00 / 1M tokens`),
1385
+ output: pricing(`$30.00 / 1M tokens`),
1386
1386
  },
1387
1387
  },
1388
1388
  /**/
@@ -1391,10 +1391,10 @@
1391
1391
  modelVariant: 'CHAT',
1392
1392
  modelTitle: 'gpt-3.5-turbo-1106',
1393
1393
  modelName: 'gpt-3.5-turbo-1106',
1394
- modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
1394
+ modelDescription: 'November 2023 version of GPT-3.5 Turbo with 16K token context window. Features improved instruction following, more consistent output formatting, and enhanced function calling capabilities. Includes knowledge cutoff from April 2023. Suitable for applications requiring good performance at lower cost than GPT-4 models.',
1395
1395
  pricing: {
1396
- prompt: computeUsage(`$1.00 / 1M tokens`),
1397
- output: computeUsage(`$2.00 / 1M tokens`),
1396
+ prompt: pricing(`$1.00 / 1M tokens`),
1397
+ output: pricing(`$2.00 / 1M tokens`),
1398
1398
  },
1399
1399
  },
1400
1400
  /**/
@@ -1403,10 +1403,10 @@
1403
1403
  modelVariant: 'CHAT',
1404
1404
  modelTitle: 'gpt-4-turbo',
1405
1405
  modelName: 'gpt-4-turbo',
1406
- modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
1406
+ modelDescription: 'More capable and cost-efficient version of GPT-4 with 128K token context window. Features improved instruction following, advanced function calling capabilities, and better performance on coding tasks. Maintains superior reasoning and knowledge while offering substantial cost reduction compared to base GPT-4. Ideal for complex applications requiring extensive context processing.',
1407
1407
  pricing: {
1408
- prompt: computeUsage(`$10.00 / 1M tokens`),
1409
- output: computeUsage(`$30.00 / 1M tokens`),
1408
+ prompt: pricing(`$10.00 / 1M tokens`),
1409
+ output: pricing(`$30.00 / 1M tokens`),
1410
1410
  },
1411
1411
  },
1412
1412
  /**/
@@ -1415,10 +1415,10 @@
1415
1415
  modelVariant: 'COMPLETION',
1416
1416
  modelTitle: 'gpt-3.5-turbo-instruct-0914',
1417
1417
  modelName: 'gpt-3.5-turbo-instruct-0914',
1418
- modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
1418
+ modelDescription: 'September 2023 version of GPT-3.5 Turbo Instruct with 4K context window. Optimized for completion-style instruction following with deterministic responses. Better suited than chat models for applications requiring specific formatted outputs without conversation management. Knowledge cutoff from September 2021.',
1419
1419
  pricing: {
1420
- prompt: computeUsage(`$1.50 / 1M tokens`),
1421
- output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1420
+ prompt: pricing(`$1.50 / 1M tokens`),
1421
+ output: pricing(`$2.00 / 1M tokens`),
1422
1422
  },
1423
1423
  },
1424
1424
  /**/
@@ -1427,10 +1427,10 @@
1427
1427
  modelVariant: 'COMPLETION',
1428
1428
  modelTitle: 'gpt-3.5-turbo-instruct',
1429
1429
  modelName: 'gpt-3.5-turbo-instruct',
1430
- modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
1430
+ modelDescription: 'Optimized version of GPT-3.5 for completion-style API with 4K token context window. Features strong instruction following with single-turn design rather than multi-turn conversation. Provides more consistent, deterministic outputs compared to chat models. Well-suited for templated content generation and structured text transformation tasks.',
1431
1431
  pricing: {
1432
- prompt: computeUsage(`$1.50 / 1M tokens`),
1433
- output: computeUsage(`$2.00 / 1M tokens`),
1432
+ prompt: pricing(`$1.50 / 1M tokens`),
1433
+ output: pricing(`$2.00 / 1M tokens`),
1434
1434
  },
1435
1435
  },
1436
1436
  /**/
@@ -1445,10 +1445,10 @@
1445
1445
  modelVariant: 'CHAT',
1446
1446
  modelTitle: 'gpt-3.5-turbo',
1447
1447
  modelName: 'gpt-3.5-turbo',
1448
- modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
1448
+ modelDescription: 'Latest version of GPT-3.5 Turbo with 4K token default context window (16K available). Features continually improved performance with enhanced instruction following and reduced hallucinations. Offers excellent balance between capability and cost efficiency. Suitable for most general-purpose applications requiring good AI capabilities at reasonable cost.',
1449
1449
  pricing: {
1450
- prompt: computeUsage(`$0.50 / 1M tokens`),
1451
- output: computeUsage(`$1.50 / 1M tokens`),
1450
+ prompt: pricing(`$0.50 / 1M tokens`),
1451
+ output: pricing(`$1.50 / 1M tokens`),
1452
1452
  },
1453
1453
  },
1454
1454
  /**/
@@ -1457,10 +1457,10 @@
1457
1457
  modelVariant: 'CHAT',
1458
1458
  modelTitle: 'gpt-3.5-turbo-0301',
1459
1459
  modelName: 'gpt-3.5-turbo-0301',
1460
- modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
1460
+ modelDescription: 'March 2023 version of GPT-3.5 Turbo with 4K token context window. Legacy model maintained for backward compatibility with specific application behaviors. Features solid conversational abilities and basic instruction following. Knowledge cutoff from September 2021. Suitable for applications explicitly designed for this version.',
1461
1461
  pricing: {
1462
- prompt: computeUsage(`$1.50 / 1M tokens`),
1463
- output: computeUsage(`$2.00 / 1M tokens`),
1462
+ prompt: pricing(`$1.50 / 1M tokens`),
1463
+ output: pricing(`$2.00 / 1M tokens`),
1464
1464
  },
1465
1465
  },
1466
1466
  /**/
@@ -1469,10 +1469,10 @@
1469
1469
  modelVariant: 'COMPLETION',
1470
1470
  modelTitle: 'babbage-002',
1471
1471
  modelName: 'babbage-002',
1472
- modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
1472
+ modelDescription: 'Efficient legacy completion model with 4K context window balancing performance and speed. Features moderate reasoning capabilities with focus on straightforward text generation tasks. Significantly more efficient than davinci models while maintaining adequate quality for many applications. Suitable for high-volume, cost-sensitive text generation needs.',
1473
1473
  pricing: {
1474
- prompt: computeUsage(`$0.40 / 1M tokens`),
1475
- output: computeUsage(`$0.40 / 1M tokens`),
1474
+ prompt: pricing(`$0.40 / 1M tokens`),
1475
+ output: pricing(`$0.40 / 1M tokens`),
1476
1476
  },
1477
1477
  },
1478
1478
  /**/
@@ -1481,10 +1481,10 @@
1481
1481
  modelVariant: 'CHAT',
1482
1482
  modelTitle: 'gpt-4-1106-preview',
1483
1483
  modelName: 'gpt-4-1106-preview',
1484
- modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
1484
+ modelDescription: 'November 2023 preview version of GPT-4 Turbo with 128K token context window. Features improved instruction following, better function calling capabilities, and enhanced reasoning. Includes knowledge cutoff from April 2023. Suitable for complex applications requiring extensive document understanding and sophisticated interactions.',
1485
1485
  pricing: {
1486
- prompt: computeUsage(`$10.00 / 1M tokens`),
1487
- output: computeUsage(`$30.00 / 1M tokens`),
1486
+ prompt: pricing(`$10.00 / 1M tokens`),
1487
+ output: pricing(`$30.00 / 1M tokens`),
1488
1488
  },
1489
1489
  },
1490
1490
  /**/
@@ -1493,10 +1493,10 @@
1493
1493
  modelVariant: 'CHAT',
1494
1494
  modelTitle: 'gpt-4-0125-preview',
1495
1495
  modelName: 'gpt-4-0125-preview',
1496
- modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
1496
+ modelDescription: 'January 2024 preview version of GPT-4 Turbo with 128K token context window. Features improved reasoning capabilities, enhanced tool use, and more reliable function calling. Includes knowledge cutoff from October 2023. Offers better performance on complex logical tasks and more consistent outputs than previous preview versions.',
1497
1497
  pricing: {
1498
- prompt: computeUsage(`$10.00 / 1M tokens`),
1499
- output: computeUsage(`$30.00 / 1M tokens`),
1498
+ prompt: pricing(`$10.00 / 1M tokens`),
1499
+ output: pricing(`$30.00 / 1M tokens`),
1500
1500
  },
1501
1501
  },
1502
1502
  /**/
@@ -1511,10 +1511,10 @@
1511
1511
  modelVariant: 'CHAT',
1512
1512
  modelTitle: 'gpt-3.5-turbo-0125',
1513
1513
  modelName: 'gpt-3.5-turbo-0125',
1514
- modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
1514
+ modelDescription: 'January 2024 version of GPT-3.5 Turbo with 16K token context window. Features improved reasoning capabilities, better instruction adherence, and reduced hallucinations compared to previous versions. Includes knowledge cutoff from September 2021. Provides good performance for most general applications at reasonable cost.',
1515
1515
  pricing: {
1516
- prompt: computeUsage(`$0.50 / 1M tokens`),
1517
- output: computeUsage(`$1.50 / 1M tokens`),
1516
+ prompt: pricing(`$0.50 / 1M tokens`),
1517
+ output: pricing(`$1.50 / 1M tokens`),
1518
1518
  },
1519
1519
  },
1520
1520
  /**/
@@ -1523,10 +1523,10 @@
1523
1523
  modelVariant: 'CHAT',
1524
1524
  modelTitle: 'gpt-4-turbo-preview',
1525
1525
  modelName: 'gpt-4-turbo-preview',
1526
- modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
1526
+ modelDescription: 'Preview version of GPT-4 Turbo with 128K token context window that points to the latest development model. Features cutting-edge improvements to instruction following, knowledge representation, and tool use capabilities. Provides access to newest features but may have occasional behavior changes. Best for non-critical applications wanting latest capabilities.',
1527
1527
  pricing: {
1528
- prompt: computeUsage(`$10.00 / 1M tokens`),
1529
- output: computeUsage(`$30.00 / 1M tokens`),
1528
+ prompt: pricing(`$10.00 / 1M tokens`),
1529
+ output: pricing(`$30.00 / 1M tokens`),
1530
1530
  },
1531
1531
  },
1532
1532
  /**/
@@ -1535,11 +1535,10 @@
1535
1535
  modelVariant: 'EMBEDDING',
1536
1536
  modelTitle: 'text-embedding-3-large',
1537
1537
  modelName: 'text-embedding-3-large',
1538
- modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
1538
+ modelDescription: "OpenAI's most capable text embedding model generating 3072-dimensional vectors. Designed for high-quality embeddings for complex similarity tasks, clustering, and information retrieval. Features enhanced cross-lingual capabilities and significantly improved performance on retrieval and classification benchmarks. Ideal for sophisticated RAG systems and semantic search applications.",
1539
1539
  pricing: {
1540
- prompt: computeUsage(`$0.13 / 1M tokens`),
1541
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1542
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1540
+ prompt: pricing(`$0.13 / 1M tokens`),
1541
+ output: 0,
1543
1542
  },
1544
1543
  },
1545
1544
  /**/
@@ -1548,11 +1547,10 @@
1548
1547
  modelVariant: 'EMBEDDING',
1549
1548
  modelTitle: 'text-embedding-3-small',
1550
1549
  modelName: 'text-embedding-3-small',
1551
- modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
1550
+ modelDescription: 'Cost-effective embedding model generating 1536-dimensional vectors. Balances quality and efficiency for simpler tasks while maintaining good performance on text similarity and retrieval applications. Offers 20% better quality than ada-002 at significantly lower cost. Ideal for production embedding applications with cost constraints.',
1552
1551
  pricing: {
1553
- prompt: computeUsage(`$0.02 / 1M tokens`),
1554
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1555
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1552
+ prompt: pricing(`$0.02 / 1M tokens`),
1553
+ output: 0,
1556
1554
  },
1557
1555
  },
1558
1556
  /**/
@@ -1561,10 +1559,10 @@
1561
1559
  modelVariant: 'CHAT',
1562
1560
  modelTitle: 'gpt-3.5-turbo-0613',
1563
1561
  modelName: 'gpt-3.5-turbo-0613',
1564
- modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
1562
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with 4K token context window. Features function calling capabilities for structured data extraction and API interaction. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
1565
1563
  pricing: {
1566
- prompt: computeUsage(`$1.50 / 1M tokens`),
1567
- output: computeUsage(`$2.00 / 1M tokens`),
1564
+ prompt: pricing(`$1.50 / 1M tokens`),
1565
+ output: pricing(`$2.00 / 1M tokens`),
1568
1566
  },
1569
1567
  },
1570
1568
  /**/
@@ -1573,11 +1571,10 @@
1573
1571
  modelVariant: 'EMBEDDING',
1574
1572
  modelTitle: 'text-embedding-ada-002',
1575
1573
  modelName: 'text-embedding-ada-002',
1576
- modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
1574
+ modelDescription: 'Legacy text embedding model generating 1536-dimensional vectors suitable for text similarity and retrieval applications. Processes up to 8K tokens per request with consistent embedding quality. While superseded by newer embedding-3 models, still maintains adequate performance for many semantic search and classification tasks.',
1577
1575
  pricing: {
1578
- prompt: computeUsage(`$0.1 / 1M tokens`),
1579
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1580
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1576
+ prompt: pricing(`$0.1 / 1M tokens`),
1577
+ output: 0,
1581
1578
  },
1582
1579
  },
1583
1580
  /**/
@@ -1604,10 +1601,10 @@
1604
1601
  modelVariant: 'CHAT',
1605
1602
  modelTitle: 'gpt-4o-2024-05-13',
1606
1603
  modelName: 'gpt-4o-2024-05-13',
1607
- modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
1604
+ modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
1608
1605
  pricing: {
1609
- prompt: computeUsage(`$5.00 / 1M tokens`),
1610
- output: computeUsage(`$15.00 / 1M tokens`),
1606
+ prompt: pricing(`$5.00 / 1M tokens`),
1607
+ output: pricing(`$15.00 / 1M tokens`),
1611
1608
  },
1612
1609
  },
1613
1610
  /**/
@@ -1616,10 +1613,10 @@
1616
1613
  modelVariant: 'CHAT',
1617
1614
  modelTitle: 'gpt-4o',
1618
1615
  modelName: 'gpt-4o',
1619
- modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
1616
+ modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
1620
1617
  pricing: {
1621
- prompt: computeUsage(`$5.00 / 1M tokens`),
1622
- output: computeUsage(`$15.00 / 1M tokens`),
1618
+ prompt: pricing(`$5.00 / 1M tokens`),
1619
+ output: pricing(`$15.00 / 1M tokens`),
1623
1620
  },
1624
1621
  },
1625
1622
  /**/
@@ -1628,10 +1625,10 @@
1628
1625
  modelVariant: 'CHAT',
1629
1626
  modelTitle: 'gpt-4o-mini',
1630
1627
  modelName: 'gpt-4o-mini',
1631
- modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
1628
+ modelDescription: 'Smaller, more cost-effective version of GPT-4o with 128K context window. Maintains impressive capabilities across text, vision, and audio tasks while operating at significantly lower cost. Features 3x faster inference than GPT-4o with good performance on general tasks. Excellent for applications requiring good quality multimodal capabilities at scale.',
1632
1629
  pricing: {
1633
- prompt: computeUsage(`$0.15 / 1M tokens`),
1634
- output: computeUsage(`$0.60 / 1M tokens`),
1630
+ prompt: pricing(`$0.15 / 1M tokens`),
1631
+ output: pricing(`$0.60 / 1M tokens`),
1635
1632
  },
1636
1633
  },
1637
1634
  /**/
@@ -1640,10 +1637,10 @@
1640
1637
  modelVariant: 'CHAT',
1641
1638
  modelTitle: 'o1-preview',
1642
1639
  modelName: 'o1-preview',
1643
- modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
1640
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Features exceptional step-by-step problem-solving capabilities, advanced mathematical and scientific reasoning, and superior performance on STEM-focused problems. Significantly outperforms GPT-4 on quantitative reasoning benchmarks. Ideal for professional and specialized applications.',
1644
1641
  pricing: {
1645
- prompt: computeUsage(`$15.00 / 1M tokens`),
1646
- output: computeUsage(`$60.00 / 1M tokens`),
1642
+ prompt: pricing(`$15.00 / 1M tokens`),
1643
+ output: pricing(`$60.00 / 1M tokens`),
1647
1644
  },
1648
1645
  },
1649
1646
  /**/
@@ -1652,11 +1649,10 @@
1652
1649
  modelVariant: 'CHAT',
1653
1650
  modelTitle: 'o1-preview-2024-09-12',
1654
1651
  modelName: 'o1-preview-2024-09-12',
1655
- modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
1656
- // <- TODO: [💩] Some better system to organize these date suffixes and versions
1652
+ modelDescription: 'September 2024 version of O1 preview with 128K context window. Features specialized reasoning capabilities with 30% improvement on mathematical and scientific accuracy over previous versions. Includes enhanced support for formal logic, statistical analysis, and technical domains. Optimized for professional applications requiring precise analytical thinking and rigorous methodologies.',
1657
1653
  pricing: {
1658
- prompt: computeUsage(`$15.00 / 1M tokens`),
1659
- output: computeUsage(`$60.00 / 1M tokens`),
1654
+ prompt: pricing(`$15.00 / 1M tokens`),
1655
+ output: pricing(`$60.00 / 1M tokens`),
1660
1656
  },
1661
1657
  },
1662
1658
  /**/
@@ -1665,10 +1661,10 @@
1665
1661
  modelVariant: 'CHAT',
1666
1662
  modelTitle: 'o1-mini',
1667
1663
  modelName: 'o1-mini',
1668
- modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
1664
+ modelDescription: 'Smaller, cost-effective version of the O1 model with 128K context window. Maintains strong analytical reasoning abilities while reducing computational requirements by 70%. Features good performance on mathematical, logical, and scientific tasks at significantly lower cost than full O1. Excellent for everyday analytical applications that benefit from reasoning focus.',
1669
1665
  pricing: {
1670
- prompt: computeUsage(`$3.00 / 1M tokens`),
1671
- output: computeUsage(`$12.00 / 1M tokens`),
1666
+ prompt: pricing(`$3.00 / 1M tokens`),
1667
+ output: pricing(`$12.00 / 1M tokens`),
1672
1668
  },
1673
1669
  },
1674
1670
  /**/
@@ -1677,10 +1673,10 @@
1677
1673
  modelVariant: 'CHAT',
1678
1674
  modelTitle: 'o1',
1679
1675
  modelName: 'o1',
1680
- modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
1676
+ modelDescription: "OpenAI's advanced reasoning model with 128K context window focusing on logical problem-solving and analytical thinking. Features exceptional performance on quantitative tasks, step-by-step deduction, and complex technical problems. Maintains 95%+ of o1-preview capabilities with production-ready stability. Ideal for scientific computing, financial analysis, and professional applications.",
1681
1677
  pricing: {
1682
- prompt: computeUsage(`$15.00 / 1M tokens`),
1683
- output: computeUsage(`$60.00 / 1M tokens`),
1678
+ prompt: pricing(`$15.00 / 1M tokens`),
1679
+ output: pricing(`$60.00 / 1M tokens`),
1684
1680
  },
1685
1681
  },
1686
1682
  /**/
@@ -1689,11 +1685,10 @@
1689
1685
  modelVariant: 'CHAT',
1690
1686
  modelTitle: 'o3-mini',
1691
1687
  modelName: 'o3-mini',
1692
- modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
1688
+ modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
1693
1689
  pricing: {
1694
- prompt: computeUsage(`$3.00 / 1M tokens`),
1695
- output: computeUsage(`$12.00 / 1M tokens`),
1696
- // <- TODO: !! Unsure, check the pricing
1690
+ prompt: pricing(`$3.00 / 1M tokens`),
1691
+ output: pricing(`$12.00 / 1M tokens`),
1697
1692
  },
1698
1693
  },
1699
1694
  /**/
@@ -1702,10 +1697,10 @@
1702
1697
  modelVariant: 'CHAT',
1703
1698
  modelTitle: 'o1-mini-2024-09-12',
1704
1699
  modelName: 'o1-mini-2024-09-12',
1705
- modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
1700
+ modelDescription: "September 2024 version of O1-mini with 128K context window featuring balanced reasoning capabilities and cost-efficiency. Includes 25% improvement in mathematical accuracy and enhanced performance on coding tasks compared to previous versions. Maintains efficient resource utilization while delivering improved results for analytical applications that don't require the full O1 model.",
1706
1701
  pricing: {
1707
- prompt: computeUsage(`$3.00 / 1M tokens`),
1708
- output: computeUsage(`$12.00 / 1M tokens`),
1702
+ prompt: pricing(`$3.00 / 1M tokens`),
1703
+ output: pricing(`$12.00 / 1M tokens`),
1709
1704
  },
1710
1705
  },
1711
1706
  /**/
@@ -1714,10 +1709,10 @@
1714
1709
  modelVariant: 'CHAT',
1715
1710
  modelTitle: 'gpt-3.5-turbo-16k-0613',
1716
1711
  modelName: 'gpt-3.5-turbo-16k-0613',
1717
- modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
1712
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with extended 16K token context window. Features good handling of longer conversations and documents with improved memory management across extended contexts. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
1718
1713
  pricing: {
1719
- prompt: computeUsage(`$3.00 / 1M tokens`),
1720
- output: computeUsage(`$4.00 / 1M tokens`),
1714
+ prompt: pricing(`$3.00 / 1M tokens`),
1715
+ output: pricing(`$4.00 / 1M tokens`),
1721
1716
  },
1722
1717
  },
1723
1718
  /**/
@@ -1792,15 +1787,15 @@
1792
1787
  */
1793
1788
 
1794
1789
  /**
1795
- * Execution Tools for calling OpenAI API
1790
+ * Execution Tools for calling OpenAI API or other OpeenAI compatible provider
1796
1791
  *
1797
1792
  * @public exported from `@promptbook/openai`
1798
1793
  */
1799
- class OpenAiExecutionTools {
1794
+ class OpenAiCompatibleExecutionTools {
1800
1795
  /**
1801
- * Creates OpenAI Execution Tools.
1796
+ * Creates OpenAI compatible Execution Tools.
1802
1797
  *
1803
- * @param options which are relevant are directly passed to the OpenAI client
1798
+ * @param options which are relevant are directly passed to the OpenAI compatible client
1804
1799
  */
1805
1800
  constructor(options) {
1806
1801
  this.options = options;
@@ -1813,12 +1808,6 @@
1813
1808
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
1814
1809
  });
1815
1810
  }
1816
- get title() {
1817
- return 'OpenAI';
1818
- }
1819
- get description() {
1820
- return 'Use all models provided by OpenAI';
1821
- }
1822
1811
  async getClient() {
1823
1812
  if (this.client === null) {
1824
1813
  // Note: Passing only OpenAI relevant options to OpenAI constructor
@@ -1829,18 +1818,6 @@
1829
1818
  }
1830
1819
  return this.client;
1831
1820
  }
1832
- /*
1833
- Note: Commenting this out to avoid circular dependency
1834
- /**
1835
- * Create (sub)tools for calling OpenAI API Assistants
1836
- *
1837
- * @param assistantId Which assistant to use
1838
- * @returns Tools for calling OpenAI API Assistants with same token
1839
- * /
1840
- public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
1841
- return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
1842
- }
1843
- */
1844
1821
  /**
1845
1822
  * Check the `options` passed to `constructor`
1846
1823
  */
@@ -1849,25 +1826,36 @@
1849
1826
  // TODO: [🎍] Do here a real check that API is online, working and API key is correct
1850
1827
  }
1851
1828
  /**
1852
- * List all available OpenAI models that can be used
1829
+ * List all available OpenAI compatible models that can be used
1853
1830
  */
1854
- listModels() {
1855
- /*
1856
- Note: Dynamic lising of the models
1857
- const models = await this.openai.models.list({});
1858
-
1859
- console.log({ models });
1860
- console.log(models.data);
1861
- */
1862
- return OPENAI_MODELS;
1831
+ async listModels() {
1832
+ const client = await this.getClient();
1833
+ const rawModelsList = await client.models.list();
1834
+ const availableModels = rawModelsList.data
1835
+ .sort((a, b) => (a.created > b.created ? 1 : -1))
1836
+ .map((modelFromApi) => {
1837
+ const modelFromList = this.HARDCODED_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
1838
+ modelName.startsWith(modelFromApi.id) ||
1839
+ modelFromApi.id.startsWith(modelName));
1840
+ if (modelFromList !== undefined) {
1841
+ return modelFromList;
1842
+ }
1843
+ return {
1844
+ modelVariant: 'CHAT',
1845
+ modelTitle: modelFromApi.id,
1846
+ modelName: modelFromApi.id,
1847
+ modelDescription: '',
1848
+ };
1849
+ });
1850
+ return availableModels;
1863
1851
  }
1864
1852
  /**
1865
- * Calls OpenAI API to use a chat model.
1853
+ * Calls OpenAI compatible API to use a chat model.
1866
1854
  */
1867
1855
  async callChatModel(prompt) {
1868
1856
  var _a;
1869
1857
  if (this.options.isVerbose) {
1870
- console.info('💬 OpenAI callChatModel call', { prompt });
1858
+ console.info(`💬 ${this.title} callChatModel call`, { prompt });
1871
1859
  }
1872
1860
  const { content, parameters, modelRequirements, format } = prompt;
1873
1861
  const client = await this.getClient();
@@ -1928,20 +1916,20 @@
1928
1916
  }
1929
1917
  const complete = $getCurrentDate();
1930
1918
  if (!rawResponse.choices[0]) {
1931
- throw new PipelineExecutionError('No choises from OpenAI');
1919
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
1932
1920
  }
1933
1921
  if (rawResponse.choices.length > 1) {
1934
1922
  // TODO: This should be maybe only warning
1935
- throw new PipelineExecutionError('More than one choise from OpenAI');
1923
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
1936
1924
  }
1937
1925
  const resultContent = rawResponse.choices[0].message.content;
1938
- const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1926
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
1939
1927
  if (resultContent === null) {
1940
- throw new PipelineExecutionError('No response message from OpenAI');
1928
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
1941
1929
  }
1942
1930
  return exportJson({
1943
1931
  name: 'promptResult',
1944
- message: `Result of \`OpenAiExecutionTools.callChatModel\``,
1932
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
1945
1933
  order: [],
1946
1934
  value: {
1947
1935
  content: resultContent,
@@ -1964,7 +1952,7 @@
1964
1952
  async callCompletionModel(prompt) {
1965
1953
  var _a;
1966
1954
  if (this.options.isVerbose) {
1967
- console.info('🖋 OpenAI callCompletionModel call', { prompt });
1955
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
1968
1956
  }
1969
1957
  const { content, parameters, modelRequirements } = prompt;
1970
1958
  const client = await this.getClient();
@@ -2005,17 +1993,17 @@
2005
1993
  }
2006
1994
  const complete = $getCurrentDate();
2007
1995
  if (!rawResponse.choices[0]) {
2008
- throw new PipelineExecutionError('No choises from OpenAI');
1996
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
2009
1997
  }
2010
1998
  if (rawResponse.choices.length > 1) {
2011
1999
  // TODO: This should be maybe only warning
2012
- throw new PipelineExecutionError('More than one choise from OpenAI');
2000
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2013
2001
  }
2014
2002
  const resultContent = rawResponse.choices[0].text;
2015
- const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
2003
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2016
2004
  return exportJson({
2017
2005
  name: 'promptResult',
2018
- message: `Result of \`OpenAiExecutionTools.callCompletionModel\``,
2006
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2019
2007
  order: [],
2020
2008
  value: {
2021
2009
  content: resultContent,
@@ -2033,11 +2021,11 @@
2033
2021
  });
2034
2022
  }
2035
2023
  /**
2036
- * Calls OpenAI API to use a embedding model
2024
+ * Calls OpenAI compatible API to use a embedding model
2037
2025
  */
2038
2026
  async callEmbeddingModel(prompt) {
2039
2027
  if (this.options.isVerbose) {
2040
- console.info('🖋 OpenAI embedding call', { prompt });
2028
+ console.info(`🖋 ${this.title} embedding call`, { prompt });
2041
2029
  }
2042
2030
  const { content, parameters, modelRequirements } = prompt;
2043
2031
  const client = await this.getClient();
@@ -2072,12 +2060,12 @@
2072
2060
  throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
2073
2061
  }
2074
2062
  const resultContent = rawResponse.data[0].embedding;
2075
- const usage = computeOpenAiUsage(content || '', '',
2063
+ const usage = this.computeUsage(content || '', '',
2076
2064
  // <- Note: Embedding does not have result content
2077
2065
  rawResponse);
2078
2066
  return exportJson({
2079
2067
  name: 'promptResult',
2080
- message: `Result of \`OpenAiExecutionTools.callEmbeddingModel\``,
2068
+ message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
2081
2069
  order: [],
2082
2070
  value: {
2083
2071
  content: resultContent,
@@ -2100,18 +2088,69 @@
2100
2088
  */
2101
2089
  getDefaultModel(defaultModelName) {
2102
2090
  // Note: Match exact or prefix for model families
2103
- const model = OPENAI_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2091
+ const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2104
2092
  if (model === undefined) {
2105
- throw new UnexpectedError(spaceTrim__default["default"]((block) => `
2106
- Cannot find model in OpenAI models with name "${defaultModelName}" which should be used as default.
2093
+ throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
2094
+ Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
2107
2095
 
2108
2096
  Available models:
2109
- ${block(OPENAI_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2097
+ ${block(this.HARDCODED_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2098
+
2099
+ Model "${defaultModelName}" is probably not available anymore, not installed, inaccessible or misconfigured.
2110
2100
 
2111
2101
  `));
2112
2102
  }
2113
2103
  return model;
2114
2104
  }
2105
+ }
2106
+ /**
2107
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
2108
+ * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
2109
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2110
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2111
+ */
2112
+
2113
+ /**
2114
+ * Execution Tools for calling OpenAI API
2115
+ *
2116
+ * @public exported from `@promptbook/openai`
2117
+ */
2118
+ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
2119
+ constructor() {
2120
+ super(...arguments);
2121
+ /**
2122
+ * Computes the usage of the OpenAI API based on the response from OpenAI
2123
+ */
2124
+ this.computeUsage = computeOpenAiUsage;
2125
+ // <- Note: [🤖] getDefaultXxxModel
2126
+ }
2127
+ /* <- TODO: [🍚] `, Destroyable` */
2128
+ get title() {
2129
+ return 'OpenAI';
2130
+ }
2131
+ get description() {
2132
+ return 'Use all models provided by OpenAI';
2133
+ }
2134
+ /*
2135
+ Note: Commenting this out to avoid circular dependency
2136
+ /**
2137
+ * Create (sub)tools for calling OpenAI API Assistants
2138
+ *
2139
+ * @param assistantId Which assistant to use
2140
+ * @returns Tools for calling OpenAI API Assistants with same token
2141
+ * /
2142
+ public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
2143
+ return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
2144
+ }
2145
+ */
2146
+ /**
2147
+ * List all available models (non dynamically)
2148
+ *
2149
+ * Note: Purpose of this is to provide more information about models than standard listing from API
2150
+ */
2151
+ get HARDCODED_MODELS() {
2152
+ return OPENAI_MODELS;
2153
+ }
2115
2154
  /**
2116
2155
  * Default model for chat variant.
2117
2156
  */
@@ -2131,13 +2170,6 @@
2131
2170
  return this.getDefaultModel('text-embedding-3-large');
2132
2171
  }
2133
2172
  }
2134
- /**
2135
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2136
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2137
- * TODO: Maybe make custom OpenAiError
2138
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2139
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2140
- */
2141
2173
 
2142
2174
  /**
2143
2175
  * Execution Tools for calling OpenAI API Assistants
@@ -2329,10 +2361,11 @@
2329
2361
  /**
2330
2362
  * Execution Tools for calling OpenAI API
2331
2363
  *
2364
+ * Note: This can be also used for other OpenAI compatible APIs, like Ollama
2365
+ *
2332
2366
  * @public exported from `@promptbook/openai`
2333
2367
  */
2334
2368
  const createOpenAiExecutionTools = Object.assign((options) => {
2335
- // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2336
2369
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2337
2370
  options = { ...options, dangerouslyAllowBrowser: true };
2338
2371
  }
@@ -2514,6 +2547,7 @@
2514
2547
  exports.BOOK_LANGUAGE_VERSION = BOOK_LANGUAGE_VERSION;
2515
2548
  exports.OPENAI_MODELS = OPENAI_MODELS;
2516
2549
  exports.OpenAiAssistantExecutionTools = OpenAiAssistantExecutionTools;
2550
+ exports.OpenAiCompatibleExecutionTools = OpenAiCompatibleExecutionTools;
2517
2551
  exports.OpenAiExecutionTools = OpenAiExecutionTools;
2518
2552
  exports.PROMPTBOOK_ENGINE_VERSION = PROMPTBOOK_ENGINE_VERSION;
2519
2553
  exports._OpenAiAssistantRegistration = _OpenAiAssistantRegistration;