@promptbook/openai 0.94.0-1 → 0.94.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +6 -8
  2. package/esm/index.es.js +192 -159
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/ollama.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/openai.index.d.ts +2 -0
  6. package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
  7. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
  8. package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +36 -11
  14. package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -12
  15. package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +3 -3
  16. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
  22. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +25 -2
  25. package/umd/index.umd.js +192 -158
  26. package/umd/index.umd.js.map +1 -1
  27. /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -18,7 +18,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
18
18
  * @generated
19
19
  * @see https://github.com/webgptorg/promptbook
20
20
  */
21
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
21
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-12';
22
22
  /**
23
23
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
24
24
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1257,11 +1257,11 @@ function uncertainNumber(value, isUncertain) {
1257
1257
  }
1258
1258
 
1259
1259
  /**
1260
- * Function computeUsage will create price per one token based on the string value found on openai page
1260
+ * Create price per one token based on the string value found on openai page
1261
1261
  *
1262
1262
  * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1263
1263
  */
1264
- function computeUsage(value) {
1264
+ function pricing(value) {
1265
1265
  const [price, tokens] = value.split(' / ');
1266
1266
  return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1267
1267
  }
@@ -1295,10 +1295,10 @@ const OPENAI_MODELS = exportJson({
1295
1295
  modelVariant: 'COMPLETION',
1296
1296
  modelTitle: 'davinci-002',
1297
1297
  modelName: 'davinci-002',
1298
- modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
1298
+ modelDescription: 'Legacy completion model with 4K token context window. Excels at complex text generation, creative writing, and detailed content creation with strong contextual understanding. Optimized for instructions requiring nuanced outputs and extended reasoning. Suitable for applications needing high-quality text generation without conversation management.',
1299
1299
  pricing: {
1300
- prompt: computeUsage(`$2.00 / 1M tokens`),
1301
- output: computeUsage(`$2.00 / 1M tokens`),
1300
+ prompt: pricing(`$2.00 / 1M tokens`),
1301
+ output: pricing(`$2.00 / 1M tokens`),
1302
1302
  },
1303
1303
  },
1304
1304
  /**/
@@ -1313,10 +1313,10 @@ const OPENAI_MODELS = exportJson({
1313
1313
  modelVariant: 'CHAT',
1314
1314
  modelTitle: 'gpt-3.5-turbo-16k',
1315
1315
  modelName: 'gpt-3.5-turbo-16k',
1316
- modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
1316
+ modelDescription: 'Extended context GPT-3.5 Turbo with 16K token window. Maintains core capabilities of standard 3.5 Turbo while supporting longer conversations and documents. Features good balance of performance and cost for applications requiring more context than standard 4K models. Effective for document analysis, extended conversations, and multi-step reasoning tasks.',
1317
1317
  pricing: {
1318
- prompt: computeUsage(`$3.00 / 1M tokens`),
1319
- output: computeUsage(`$4.00 / 1M tokens`),
1318
+ prompt: pricing(`$3.00 / 1M tokens`),
1319
+ output: pricing(`$4.00 / 1M tokens`),
1320
1320
  },
1321
1321
  },
1322
1322
  /**/
@@ -1337,10 +1337,10 @@ const OPENAI_MODELS = exportJson({
1337
1337
  modelVariant: 'CHAT',
1338
1338
  modelTitle: 'gpt-4',
1339
1339
  modelName: 'gpt-4',
1340
- modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
1340
+ modelDescription: 'Powerful language model with 8K context window featuring sophisticated reasoning, instruction-following, and knowledge capabilities. Demonstrates strong performance on complex tasks requiring deep understanding and multi-step reasoning. Excels at code generation, logical analysis, and nuanced content creation. Suitable for advanced applications requiring high-quality outputs.',
1341
1341
  pricing: {
1342
- prompt: computeUsage(`$30.00 / 1M tokens`),
1343
- output: computeUsage(`$60.00 / 1M tokens`),
1342
+ prompt: pricing(`$30.00 / 1M tokens`),
1343
+ output: pricing(`$60.00 / 1M tokens`),
1344
1344
  },
1345
1345
  },
1346
1346
  /**/
@@ -1349,10 +1349,10 @@ const OPENAI_MODELS = exportJson({
1349
1349
  modelVariant: 'CHAT',
1350
1350
  modelTitle: 'gpt-4-32k',
1351
1351
  modelName: 'gpt-4-32k',
1352
- modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
1352
+ modelDescription: 'Extended context version of GPT-4 with 32K token window. Maintains all capabilities of standard GPT-4 while supporting analysis of very lengthy documents, code bases, and conversations. Features enhanced ability to maintain context over long interactions and process detailed information from large inputs. Ideal for document analysis, legal review, and complex problem-solving.',
1353
1353
  pricing: {
1354
- prompt: computeUsage(`$60.00 / 1M tokens`),
1355
- output: computeUsage(`$120.00 / 1M tokens`),
1354
+ prompt: pricing(`$60.00 / 1M tokens`),
1355
+ output: pricing(`$120.00 / 1M tokens`),
1356
1356
  },
1357
1357
  },
1358
1358
  /**/
@@ -1372,10 +1372,10 @@ const OPENAI_MODELS = exportJson({
1372
1372
  modelVariant: 'CHAT',
1373
1373
  modelTitle: 'gpt-4-turbo-2024-04-09',
1374
1374
  modelName: 'gpt-4-turbo-2024-04-09',
1375
- modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
1375
+ modelDescription: 'Latest stable GPT-4 Turbo from April 2024 with 128K context window. Features enhanced reasoning chains, improved factual accuracy with 40% reduction in hallucinations, and better instruction following compared to earlier versions. Includes advanced function calling capabilities and knowledge up to April 2024. Provides optimal performance for enterprise applications requiring reliability.',
1376
1376
  pricing: {
1377
- prompt: computeUsage(`$10.00 / 1M tokens`),
1378
- output: computeUsage(`$30.00 / 1M tokens`),
1377
+ prompt: pricing(`$10.00 / 1M tokens`),
1378
+ output: pricing(`$30.00 / 1M tokens`),
1379
1379
  },
1380
1380
  },
1381
1381
  /**/
@@ -1384,10 +1384,10 @@ const OPENAI_MODELS = exportJson({
1384
1384
  modelVariant: 'CHAT',
1385
1385
  modelTitle: 'gpt-3.5-turbo-1106',
1386
1386
  modelName: 'gpt-3.5-turbo-1106',
1387
- modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
1387
+ modelDescription: 'November 2023 version of GPT-3.5 Turbo with 16K token context window. Features improved instruction following, more consistent output formatting, and enhanced function calling capabilities. Includes knowledge cutoff from April 2023. Suitable for applications requiring good performance at lower cost than GPT-4 models.',
1388
1388
  pricing: {
1389
- prompt: computeUsage(`$1.00 / 1M tokens`),
1390
- output: computeUsage(`$2.00 / 1M tokens`),
1389
+ prompt: pricing(`$1.00 / 1M tokens`),
1390
+ output: pricing(`$2.00 / 1M tokens`),
1391
1391
  },
1392
1392
  },
1393
1393
  /**/
@@ -1396,10 +1396,10 @@ const OPENAI_MODELS = exportJson({
1396
1396
  modelVariant: 'CHAT',
1397
1397
  modelTitle: 'gpt-4-turbo',
1398
1398
  modelName: 'gpt-4-turbo',
1399
- modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
1399
+ modelDescription: 'More capable and cost-efficient version of GPT-4 with 128K token context window. Features improved instruction following, advanced function calling capabilities, and better performance on coding tasks. Maintains superior reasoning and knowledge while offering substantial cost reduction compared to base GPT-4. Ideal for complex applications requiring extensive context processing.',
1400
1400
  pricing: {
1401
- prompt: computeUsage(`$10.00 / 1M tokens`),
1402
- output: computeUsage(`$30.00 / 1M tokens`),
1401
+ prompt: pricing(`$10.00 / 1M tokens`),
1402
+ output: pricing(`$30.00 / 1M tokens`),
1403
1403
  },
1404
1404
  },
1405
1405
  /**/
@@ -1408,10 +1408,10 @@ const OPENAI_MODELS = exportJson({
1408
1408
  modelVariant: 'COMPLETION',
1409
1409
  modelTitle: 'gpt-3.5-turbo-instruct-0914',
1410
1410
  modelName: 'gpt-3.5-turbo-instruct-0914',
1411
- modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
1411
+ modelDescription: 'September 2023 version of GPT-3.5 Turbo Instruct with 4K context window. Optimized for completion-style instruction following with deterministic responses. Better suited than chat models for applications requiring specific formatted outputs without conversation management. Knowledge cutoff from September 2021.',
1412
1412
  pricing: {
1413
- prompt: computeUsage(`$1.50 / 1M tokens`),
1414
- output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1413
+ prompt: pricing(`$1.50 / 1M tokens`),
1414
+ output: pricing(`$2.00 / 1M tokens`),
1415
1415
  },
1416
1416
  },
1417
1417
  /**/
@@ -1420,10 +1420,10 @@ const OPENAI_MODELS = exportJson({
1420
1420
  modelVariant: 'COMPLETION',
1421
1421
  modelTitle: 'gpt-3.5-turbo-instruct',
1422
1422
  modelName: 'gpt-3.5-turbo-instruct',
1423
- modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
1423
+ modelDescription: 'Optimized version of GPT-3.5 for completion-style API with 4K token context window. Features strong instruction following with single-turn design rather than multi-turn conversation. Provides more consistent, deterministic outputs compared to chat models. Well-suited for templated content generation and structured text transformation tasks.',
1424
1424
  pricing: {
1425
- prompt: computeUsage(`$1.50 / 1M tokens`),
1426
- output: computeUsage(`$2.00 / 1M tokens`),
1425
+ prompt: pricing(`$1.50 / 1M tokens`),
1426
+ output: pricing(`$2.00 / 1M tokens`),
1427
1427
  },
1428
1428
  },
1429
1429
  /**/
@@ -1438,10 +1438,10 @@ const OPENAI_MODELS = exportJson({
1438
1438
  modelVariant: 'CHAT',
1439
1439
  modelTitle: 'gpt-3.5-turbo',
1440
1440
  modelName: 'gpt-3.5-turbo',
1441
- modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
1441
+ modelDescription: 'Latest version of GPT-3.5 Turbo with 4K token default context window (16K available). Features continually improved performance with enhanced instruction following and reduced hallucinations. Offers excellent balance between capability and cost efficiency. Suitable for most general-purpose applications requiring good AI capabilities at reasonable cost.',
1442
1442
  pricing: {
1443
- prompt: computeUsage(`$0.50 / 1M tokens`),
1444
- output: computeUsage(`$1.50 / 1M tokens`),
1443
+ prompt: pricing(`$0.50 / 1M tokens`),
1444
+ output: pricing(`$1.50 / 1M tokens`),
1445
1445
  },
1446
1446
  },
1447
1447
  /**/
@@ -1450,10 +1450,10 @@ const OPENAI_MODELS = exportJson({
1450
1450
  modelVariant: 'CHAT',
1451
1451
  modelTitle: 'gpt-3.5-turbo-0301',
1452
1452
  modelName: 'gpt-3.5-turbo-0301',
1453
- modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
1453
+ modelDescription: 'March 2023 version of GPT-3.5 Turbo with 4K token context window. Legacy model maintained for backward compatibility with specific application behaviors. Features solid conversational abilities and basic instruction following. Knowledge cutoff from September 2021. Suitable for applications explicitly designed for this version.',
1454
1454
  pricing: {
1455
- prompt: computeUsage(`$1.50 / 1M tokens`),
1456
- output: computeUsage(`$2.00 / 1M tokens`),
1455
+ prompt: pricing(`$1.50 / 1M tokens`),
1456
+ output: pricing(`$2.00 / 1M tokens`),
1457
1457
  },
1458
1458
  },
1459
1459
  /**/
@@ -1462,10 +1462,10 @@ const OPENAI_MODELS = exportJson({
1462
1462
  modelVariant: 'COMPLETION',
1463
1463
  modelTitle: 'babbage-002',
1464
1464
  modelName: 'babbage-002',
1465
- modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
1465
+ modelDescription: 'Efficient legacy completion model with 4K context window balancing performance and speed. Features moderate reasoning capabilities with focus on straightforward text generation tasks. Significantly more efficient than davinci models while maintaining adequate quality for many applications. Suitable for high-volume, cost-sensitive text generation needs.',
1466
1466
  pricing: {
1467
- prompt: computeUsage(`$0.40 / 1M tokens`),
1468
- output: computeUsage(`$0.40 / 1M tokens`),
1467
+ prompt: pricing(`$0.40 / 1M tokens`),
1468
+ output: pricing(`$0.40 / 1M tokens`),
1469
1469
  },
1470
1470
  },
1471
1471
  /**/
@@ -1474,10 +1474,10 @@ const OPENAI_MODELS = exportJson({
1474
1474
  modelVariant: 'CHAT',
1475
1475
  modelTitle: 'gpt-4-1106-preview',
1476
1476
  modelName: 'gpt-4-1106-preview',
1477
- modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
1477
+ modelDescription: 'November 2023 preview version of GPT-4 Turbo with 128K token context window. Features improved instruction following, better function calling capabilities, and enhanced reasoning. Includes knowledge cutoff from April 2023. Suitable for complex applications requiring extensive document understanding and sophisticated interactions.',
1478
1478
  pricing: {
1479
- prompt: computeUsage(`$10.00 / 1M tokens`),
1480
- output: computeUsage(`$30.00 / 1M tokens`),
1479
+ prompt: pricing(`$10.00 / 1M tokens`),
1480
+ output: pricing(`$30.00 / 1M tokens`),
1481
1481
  },
1482
1482
  },
1483
1483
  /**/
@@ -1486,10 +1486,10 @@ const OPENAI_MODELS = exportJson({
1486
1486
  modelVariant: 'CHAT',
1487
1487
  modelTitle: 'gpt-4-0125-preview',
1488
1488
  modelName: 'gpt-4-0125-preview',
1489
- modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
1489
+ modelDescription: 'January 2024 preview version of GPT-4 Turbo with 128K token context window. Features improved reasoning capabilities, enhanced tool use, and more reliable function calling. Includes knowledge cutoff from October 2023. Offers better performance on complex logical tasks and more consistent outputs than previous preview versions.',
1490
1490
  pricing: {
1491
- prompt: computeUsage(`$10.00 / 1M tokens`),
1492
- output: computeUsage(`$30.00 / 1M tokens`),
1491
+ prompt: pricing(`$10.00 / 1M tokens`),
1492
+ output: pricing(`$30.00 / 1M tokens`),
1493
1493
  },
1494
1494
  },
1495
1495
  /**/
@@ -1504,10 +1504,10 @@ const OPENAI_MODELS = exportJson({
1504
1504
  modelVariant: 'CHAT',
1505
1505
  modelTitle: 'gpt-3.5-turbo-0125',
1506
1506
  modelName: 'gpt-3.5-turbo-0125',
1507
- modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
1507
+ modelDescription: 'January 2024 version of GPT-3.5 Turbo with 16K token context window. Features improved reasoning capabilities, better instruction adherence, and reduced hallucinations compared to previous versions. Includes knowledge cutoff from September 2021. Provides good performance for most general applications at reasonable cost.',
1508
1508
  pricing: {
1509
- prompt: computeUsage(`$0.50 / 1M tokens`),
1510
- output: computeUsage(`$1.50 / 1M tokens`),
1509
+ prompt: pricing(`$0.50 / 1M tokens`),
1510
+ output: pricing(`$1.50 / 1M tokens`),
1511
1511
  },
1512
1512
  },
1513
1513
  /**/
@@ -1516,10 +1516,10 @@ const OPENAI_MODELS = exportJson({
1516
1516
  modelVariant: 'CHAT',
1517
1517
  modelTitle: 'gpt-4-turbo-preview',
1518
1518
  modelName: 'gpt-4-turbo-preview',
1519
- modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
1519
+ modelDescription: 'Preview version of GPT-4 Turbo with 128K token context window that points to the latest development model. Features cutting-edge improvements to instruction following, knowledge representation, and tool use capabilities. Provides access to newest features but may have occasional behavior changes. Best for non-critical applications wanting latest capabilities.',
1520
1520
  pricing: {
1521
- prompt: computeUsage(`$10.00 / 1M tokens`),
1522
- output: computeUsage(`$30.00 / 1M tokens`),
1521
+ prompt: pricing(`$10.00 / 1M tokens`),
1522
+ output: pricing(`$30.00 / 1M tokens`),
1523
1523
  },
1524
1524
  },
1525
1525
  /**/
@@ -1528,11 +1528,10 @@ const OPENAI_MODELS = exportJson({
1528
1528
  modelVariant: 'EMBEDDING',
1529
1529
  modelTitle: 'text-embedding-3-large',
1530
1530
  modelName: 'text-embedding-3-large',
1531
- modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
1531
+ modelDescription: "OpenAI's most capable text embedding model generating 3072-dimensional vectors. Designed for high-quality embeddings for complex similarity tasks, clustering, and information retrieval. Features enhanced cross-lingual capabilities and significantly improved performance on retrieval and classification benchmarks. Ideal for sophisticated RAG systems and semantic search applications.",
1532
1532
  pricing: {
1533
- prompt: computeUsage(`$0.13 / 1M tokens`),
1534
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1535
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1533
+ prompt: pricing(`$0.13 / 1M tokens`),
1534
+ output: 0,
1536
1535
  },
1537
1536
  },
1538
1537
  /**/
@@ -1541,11 +1540,10 @@ const OPENAI_MODELS = exportJson({
1541
1540
  modelVariant: 'EMBEDDING',
1542
1541
  modelTitle: 'text-embedding-3-small',
1543
1542
  modelName: 'text-embedding-3-small',
1544
- modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
1543
+ modelDescription: 'Cost-effective embedding model generating 1536-dimensional vectors. Balances quality and efficiency for simpler tasks while maintaining good performance on text similarity and retrieval applications. Offers 20% better quality than ada-002 at significantly lower cost. Ideal for production embedding applications with cost constraints.',
1545
1544
  pricing: {
1546
- prompt: computeUsage(`$0.02 / 1M tokens`),
1547
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1548
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1545
+ prompt: pricing(`$0.02 / 1M tokens`),
1546
+ output: 0,
1549
1547
  },
1550
1548
  },
1551
1549
  /**/
@@ -1554,10 +1552,10 @@ const OPENAI_MODELS = exportJson({
1554
1552
  modelVariant: 'CHAT',
1555
1553
  modelTitle: 'gpt-3.5-turbo-0613',
1556
1554
  modelName: 'gpt-3.5-turbo-0613',
1557
- modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
1555
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with 4K token context window. Features function calling capabilities for structured data extraction and API interaction. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
1558
1556
  pricing: {
1559
- prompt: computeUsage(`$1.50 / 1M tokens`),
1560
- output: computeUsage(`$2.00 / 1M tokens`),
1557
+ prompt: pricing(`$1.50 / 1M tokens`),
1558
+ output: pricing(`$2.00 / 1M tokens`),
1561
1559
  },
1562
1560
  },
1563
1561
  /**/
@@ -1566,11 +1564,10 @@ const OPENAI_MODELS = exportJson({
1566
1564
  modelVariant: 'EMBEDDING',
1567
1565
  modelTitle: 'text-embedding-ada-002',
1568
1566
  modelName: 'text-embedding-ada-002',
1569
- modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
1567
+ modelDescription: 'Legacy text embedding model generating 1536-dimensional vectors suitable for text similarity and retrieval applications. Processes up to 8K tokens per request with consistent embedding quality. While superseded by newer embedding-3 models, still maintains adequate performance for many semantic search and classification tasks.',
1570
1568
  pricing: {
1571
- prompt: computeUsage(`$0.1 / 1M tokens`),
1572
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1573
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1569
+ prompt: pricing(`$0.1 / 1M tokens`),
1570
+ output: 0,
1574
1571
  },
1575
1572
  },
1576
1573
  /**/
@@ -1597,10 +1594,10 @@ const OPENAI_MODELS = exportJson({
1597
1594
  modelVariant: 'CHAT',
1598
1595
  modelTitle: 'gpt-4o-2024-05-13',
1599
1596
  modelName: 'gpt-4o-2024-05-13',
1600
- modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
1597
+ modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
1601
1598
  pricing: {
1602
- prompt: computeUsage(`$5.00 / 1M tokens`),
1603
- output: computeUsage(`$15.00 / 1M tokens`),
1599
+ prompt: pricing(`$5.00 / 1M tokens`),
1600
+ output: pricing(`$15.00 / 1M tokens`),
1604
1601
  },
1605
1602
  },
1606
1603
  /**/
@@ -1609,10 +1606,10 @@ const OPENAI_MODELS = exportJson({
1609
1606
  modelVariant: 'CHAT',
1610
1607
  modelTitle: 'gpt-4o',
1611
1608
  modelName: 'gpt-4o',
1612
- modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
1609
+ modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
1613
1610
  pricing: {
1614
- prompt: computeUsage(`$5.00 / 1M tokens`),
1615
- output: computeUsage(`$15.00 / 1M tokens`),
1611
+ prompt: pricing(`$5.00 / 1M tokens`),
1612
+ output: pricing(`$15.00 / 1M tokens`),
1616
1613
  },
1617
1614
  },
1618
1615
  /**/
@@ -1621,10 +1618,10 @@ const OPENAI_MODELS = exportJson({
1621
1618
  modelVariant: 'CHAT',
1622
1619
  modelTitle: 'gpt-4o-mini',
1623
1620
  modelName: 'gpt-4o-mini',
1624
- modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
1621
+ modelDescription: 'Smaller, more cost-effective version of GPT-4o with 128K context window. Maintains impressive capabilities across text, vision, and audio tasks while operating at significantly lower cost. Features 3x faster inference than GPT-4o with good performance on general tasks. Excellent for applications requiring good quality multimodal capabilities at scale.',
1625
1622
  pricing: {
1626
- prompt: computeUsage(`$0.15 / 1M tokens`),
1627
- output: computeUsage(`$0.60 / 1M tokens`),
1623
+ prompt: pricing(`$0.15 / 1M tokens`),
1624
+ output: pricing(`$0.60 / 1M tokens`),
1628
1625
  },
1629
1626
  },
1630
1627
  /**/
@@ -1633,10 +1630,10 @@ const OPENAI_MODELS = exportJson({
1633
1630
  modelVariant: 'CHAT',
1634
1631
  modelTitle: 'o1-preview',
1635
1632
  modelName: 'o1-preview',
1636
- modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
1633
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Features exceptional step-by-step problem-solving capabilities, advanced mathematical and scientific reasoning, and superior performance on STEM-focused problems. Significantly outperforms GPT-4 on quantitative reasoning benchmarks. Ideal for professional and specialized applications.',
1637
1634
  pricing: {
1638
- prompt: computeUsage(`$15.00 / 1M tokens`),
1639
- output: computeUsage(`$60.00 / 1M tokens`),
1635
+ prompt: pricing(`$15.00 / 1M tokens`),
1636
+ output: pricing(`$60.00 / 1M tokens`),
1640
1637
  },
1641
1638
  },
1642
1639
  /**/
@@ -1645,11 +1642,10 @@ const OPENAI_MODELS = exportJson({
1645
1642
  modelVariant: 'CHAT',
1646
1643
  modelTitle: 'o1-preview-2024-09-12',
1647
1644
  modelName: 'o1-preview-2024-09-12',
1648
- modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
1649
- // <- TODO: [💩] Some better system to organize these date suffixes and versions
1645
+ modelDescription: 'September 2024 version of O1 preview with 128K context window. Features specialized reasoning capabilities with 30% improvement on mathematical and scientific accuracy over previous versions. Includes enhanced support for formal logic, statistical analysis, and technical domains. Optimized for professional applications requiring precise analytical thinking and rigorous methodologies.',
1650
1646
  pricing: {
1651
- prompt: computeUsage(`$15.00 / 1M tokens`),
1652
- output: computeUsage(`$60.00 / 1M tokens`),
1647
+ prompt: pricing(`$15.00 / 1M tokens`),
1648
+ output: pricing(`$60.00 / 1M tokens`),
1653
1649
  },
1654
1650
  },
1655
1651
  /**/
@@ -1658,10 +1654,10 @@ const OPENAI_MODELS = exportJson({
1658
1654
  modelVariant: 'CHAT',
1659
1655
  modelTitle: 'o1-mini',
1660
1656
  modelName: 'o1-mini',
1661
- modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
1657
+ modelDescription: 'Smaller, cost-effective version of the O1 model with 128K context window. Maintains strong analytical reasoning abilities while reducing computational requirements by 70%. Features good performance on mathematical, logical, and scientific tasks at significantly lower cost than full O1. Excellent for everyday analytical applications that benefit from reasoning focus.',
1662
1658
  pricing: {
1663
- prompt: computeUsage(`$3.00 / 1M tokens`),
1664
- output: computeUsage(`$12.00 / 1M tokens`),
1659
+ prompt: pricing(`$3.00 / 1M tokens`),
1660
+ output: pricing(`$12.00 / 1M tokens`),
1665
1661
  },
1666
1662
  },
1667
1663
  /**/
@@ -1670,10 +1666,10 @@ const OPENAI_MODELS = exportJson({
1670
1666
  modelVariant: 'CHAT',
1671
1667
  modelTitle: 'o1',
1672
1668
  modelName: 'o1',
1673
- modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
1669
+ modelDescription: "OpenAI's advanced reasoning model with 128K context window focusing on logical problem-solving and analytical thinking. Features exceptional performance on quantitative tasks, step-by-step deduction, and complex technical problems. Maintains 95%+ of o1-preview capabilities with production-ready stability. Ideal for scientific computing, financial analysis, and professional applications.",
1674
1670
  pricing: {
1675
- prompt: computeUsage(`$15.00 / 1M tokens`),
1676
- output: computeUsage(`$60.00 / 1M tokens`),
1671
+ prompt: pricing(`$15.00 / 1M tokens`),
1672
+ output: pricing(`$60.00 / 1M tokens`),
1677
1673
  },
1678
1674
  },
1679
1675
  /**/
@@ -1682,11 +1678,10 @@ const OPENAI_MODELS = exportJson({
1682
1678
  modelVariant: 'CHAT',
1683
1679
  modelTitle: 'o3-mini',
1684
1680
  modelName: 'o3-mini',
1685
- modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
1681
+ modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
1686
1682
  pricing: {
1687
- prompt: computeUsage(`$3.00 / 1M tokens`),
1688
- output: computeUsage(`$12.00 / 1M tokens`),
1689
- // <- TODO: !! Unsure, check the pricing
1683
+ prompt: pricing(`$3.00 / 1M tokens`),
1684
+ output: pricing(`$12.00 / 1M tokens`),
1690
1685
  },
1691
1686
  },
1692
1687
  /**/
@@ -1695,10 +1690,10 @@ const OPENAI_MODELS = exportJson({
1695
1690
  modelVariant: 'CHAT',
1696
1691
  modelTitle: 'o1-mini-2024-09-12',
1697
1692
  modelName: 'o1-mini-2024-09-12',
1698
- modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
1693
+ modelDescription: "September 2024 version of O1-mini with 128K context window featuring balanced reasoning capabilities and cost-efficiency. Includes 25% improvement in mathematical accuracy and enhanced performance on coding tasks compared to previous versions. Maintains efficient resource utilization while delivering improved results for analytical applications that don't require the full O1 model.",
1699
1694
  pricing: {
1700
- prompt: computeUsage(`$3.00 / 1M tokens`),
1701
- output: computeUsage(`$12.00 / 1M tokens`),
1695
+ prompt: pricing(`$3.00 / 1M tokens`),
1696
+ output: pricing(`$12.00 / 1M tokens`),
1702
1697
  },
1703
1698
  },
1704
1699
  /**/
@@ -1707,10 +1702,10 @@ const OPENAI_MODELS = exportJson({
1707
1702
  modelVariant: 'CHAT',
1708
1703
  modelTitle: 'gpt-3.5-turbo-16k-0613',
1709
1704
  modelName: 'gpt-3.5-turbo-16k-0613',
1710
- modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
1705
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with extended 16K token context window. Features good handling of longer conversations and documents with improved memory management across extended contexts. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
1711
1706
  pricing: {
1712
- prompt: computeUsage(`$3.00 / 1M tokens`),
1713
- output: computeUsage(`$4.00 / 1M tokens`),
1707
+ prompt: pricing(`$3.00 / 1M tokens`),
1708
+ output: pricing(`$4.00 / 1M tokens`),
1714
1709
  },
1715
1710
  },
1716
1711
  /**/
@@ -1785,15 +1780,15 @@ resultContent, rawResponse) {
1785
1780
  */
1786
1781
 
1787
1782
  /**
1788
- * Execution Tools for calling OpenAI API
1783
+ * Execution Tools for calling OpenAI API or other OpeenAI compatible provider
1789
1784
  *
1790
1785
  * @public exported from `@promptbook/openai`
1791
1786
  */
1792
- class OpenAiExecutionTools {
1787
+ class OpenAiCompatibleExecutionTools {
1793
1788
  /**
1794
- * Creates OpenAI Execution Tools.
1789
+ * Creates OpenAI compatible Execution Tools.
1795
1790
  *
1796
- * @param options which are relevant are directly passed to the OpenAI client
1791
+ * @param options which are relevant are directly passed to the OpenAI compatible client
1797
1792
  */
1798
1793
  constructor(options) {
1799
1794
  this.options = options;
@@ -1806,12 +1801,6 @@ class OpenAiExecutionTools {
1806
1801
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
1807
1802
  });
1808
1803
  }
1809
- get title() {
1810
- return 'OpenAI';
1811
- }
1812
- get description() {
1813
- return 'Use all models provided by OpenAI';
1814
- }
1815
1804
  async getClient() {
1816
1805
  if (this.client === null) {
1817
1806
  // Note: Passing only OpenAI relevant options to OpenAI constructor
@@ -1822,18 +1811,6 @@ class OpenAiExecutionTools {
1822
1811
  }
1823
1812
  return this.client;
1824
1813
  }
1825
- /*
1826
- Note: Commenting this out to avoid circular dependency
1827
- /**
1828
- * Create (sub)tools for calling OpenAI API Assistants
1829
- *
1830
- * @param assistantId Which assistant to use
1831
- * @returns Tools for calling OpenAI API Assistants with same token
1832
- * /
1833
- public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
1834
- return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
1835
- }
1836
- */
1837
1814
  /**
1838
1815
  * Check the `options` passed to `constructor`
1839
1816
  */
@@ -1842,25 +1819,36 @@ class OpenAiExecutionTools {
1842
1819
  // TODO: [🎍] Do here a real check that API is online, working and API key is correct
1843
1820
  }
1844
1821
  /**
1845
- * List all available OpenAI models that can be used
1822
+ * List all available OpenAI compatible models that can be used
1846
1823
  */
1847
- listModels() {
1848
- /*
1849
- Note: Dynamic lising of the models
1850
- const models = await this.openai.models.list({});
1851
-
1852
- console.log({ models });
1853
- console.log(models.data);
1854
- */
1855
- return OPENAI_MODELS;
1824
+ async listModels() {
1825
+ const client = await this.getClient();
1826
+ const rawModelsList = await client.models.list();
1827
+ const availableModels = rawModelsList.data
1828
+ .sort((a, b) => (a.created > b.created ? 1 : -1))
1829
+ .map((modelFromApi) => {
1830
+ const modelFromList = this.HARDCODED_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
1831
+ modelName.startsWith(modelFromApi.id) ||
1832
+ modelFromApi.id.startsWith(modelName));
1833
+ if (modelFromList !== undefined) {
1834
+ return modelFromList;
1835
+ }
1836
+ return {
1837
+ modelVariant: 'CHAT',
1838
+ modelTitle: modelFromApi.id,
1839
+ modelName: modelFromApi.id,
1840
+ modelDescription: '',
1841
+ };
1842
+ });
1843
+ return availableModels;
1856
1844
  }
1857
1845
  /**
1858
- * Calls OpenAI API to use a chat model.
1846
+ * Calls OpenAI compatible API to use a chat model.
1859
1847
  */
1860
1848
  async callChatModel(prompt) {
1861
1849
  var _a;
1862
1850
  if (this.options.isVerbose) {
1863
- console.info('💬 OpenAI callChatModel call', { prompt });
1851
+ console.info(`💬 ${this.title} callChatModel call`, { prompt });
1864
1852
  }
1865
1853
  const { content, parameters, modelRequirements, format } = prompt;
1866
1854
  const client = await this.getClient();
@@ -1921,20 +1909,20 @@ class OpenAiExecutionTools {
1921
1909
  }
1922
1910
  const complete = $getCurrentDate();
1923
1911
  if (!rawResponse.choices[0]) {
1924
- throw new PipelineExecutionError('No choises from OpenAI');
1912
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
1925
1913
  }
1926
1914
  if (rawResponse.choices.length > 1) {
1927
1915
  // TODO: This should be maybe only warning
1928
- throw new PipelineExecutionError('More than one choise from OpenAI');
1916
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
1929
1917
  }
1930
1918
  const resultContent = rawResponse.choices[0].message.content;
1931
- const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1919
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
1932
1920
  if (resultContent === null) {
1933
- throw new PipelineExecutionError('No response message from OpenAI');
1921
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
1934
1922
  }
1935
1923
  return exportJson({
1936
1924
  name: 'promptResult',
1937
- message: `Result of \`OpenAiExecutionTools.callChatModel\``,
1925
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
1938
1926
  order: [],
1939
1927
  value: {
1940
1928
  content: resultContent,
@@ -1957,7 +1945,7 @@ class OpenAiExecutionTools {
1957
1945
  async callCompletionModel(prompt) {
1958
1946
  var _a;
1959
1947
  if (this.options.isVerbose) {
1960
- console.info('🖋 OpenAI callCompletionModel call', { prompt });
1948
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
1961
1949
  }
1962
1950
  const { content, parameters, modelRequirements } = prompt;
1963
1951
  const client = await this.getClient();
@@ -1998,17 +1986,17 @@ class OpenAiExecutionTools {
1998
1986
  }
1999
1987
  const complete = $getCurrentDate();
2000
1988
  if (!rawResponse.choices[0]) {
2001
- throw new PipelineExecutionError('No choises from OpenAI');
1989
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
2002
1990
  }
2003
1991
  if (rawResponse.choices.length > 1) {
2004
1992
  // TODO: This should be maybe only warning
2005
- throw new PipelineExecutionError('More than one choise from OpenAI');
1993
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2006
1994
  }
2007
1995
  const resultContent = rawResponse.choices[0].text;
2008
- const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1996
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2009
1997
  return exportJson({
2010
1998
  name: 'promptResult',
2011
- message: `Result of \`OpenAiExecutionTools.callCompletionModel\``,
1999
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2012
2000
  order: [],
2013
2001
  value: {
2014
2002
  content: resultContent,
@@ -2026,11 +2014,11 @@ class OpenAiExecutionTools {
2026
2014
  });
2027
2015
  }
2028
2016
  /**
2029
- * Calls OpenAI API to use a embedding model
2017
+ * Calls OpenAI compatible API to use a embedding model
2030
2018
  */
2031
2019
  async callEmbeddingModel(prompt) {
2032
2020
  if (this.options.isVerbose) {
2033
- console.info('🖋 OpenAI embedding call', { prompt });
2021
+ console.info(`🖋 ${this.title} embedding call`, { prompt });
2034
2022
  }
2035
2023
  const { content, parameters, modelRequirements } = prompt;
2036
2024
  const client = await this.getClient();
@@ -2065,12 +2053,12 @@ class OpenAiExecutionTools {
2065
2053
  throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
2066
2054
  }
2067
2055
  const resultContent = rawResponse.data[0].embedding;
2068
- const usage = computeOpenAiUsage(content || '', '',
2056
+ const usage = this.computeUsage(content || '', '',
2069
2057
  // <- Note: Embedding does not have result content
2070
2058
  rawResponse);
2071
2059
  return exportJson({
2072
2060
  name: 'promptResult',
2073
- message: `Result of \`OpenAiExecutionTools.callEmbeddingModel\``,
2061
+ message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
2074
2062
  order: [],
2075
2063
  value: {
2076
2064
  content: resultContent,
@@ -2093,18 +2081,69 @@ class OpenAiExecutionTools {
2093
2081
  */
2094
2082
  getDefaultModel(defaultModelName) {
2095
2083
  // Note: Match exact or prefix for model families
2096
- const model = OPENAI_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2084
+ const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2097
2085
  if (model === undefined) {
2098
- throw new UnexpectedError(spaceTrim$1((block) => `
2099
- Cannot find model in OpenAI models with name "${defaultModelName}" which should be used as default.
2086
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
2087
+ Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
2100
2088
 
2101
2089
  Available models:
2102
- ${block(OPENAI_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2090
+ ${block(this.HARDCODED_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2091
+
2092
+ Model "${defaultModelName}" is probably not available anymore, not installed, inaccessible or misconfigured.
2103
2093
 
2104
2094
  `));
2105
2095
  }
2106
2096
  return model;
2107
2097
  }
2098
+ }
2099
+ /**
2100
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
2101
+ * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
2102
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2103
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2104
+ */
2105
+
2106
+ /**
2107
+ * Execution Tools for calling OpenAI API
2108
+ *
2109
+ * @public exported from `@promptbook/openai`
2110
+ */
2111
+ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
2112
+ constructor() {
2113
+ super(...arguments);
2114
+ /**
2115
+ * Computes the usage of the OpenAI API based on the response from OpenAI
2116
+ */
2117
+ this.computeUsage = computeOpenAiUsage;
2118
+ // <- Note: [🤖] getDefaultXxxModel
2119
+ }
2120
+ /* <- TODO: [🍚] `, Destroyable` */
2121
+ get title() {
2122
+ return 'OpenAI';
2123
+ }
2124
+ get description() {
2125
+ return 'Use all models provided by OpenAI';
2126
+ }
2127
+ /*
2128
+ Note: Commenting this out to avoid circular dependency
2129
+ /**
2130
+ * Create (sub)tools for calling OpenAI API Assistants
2131
+ *
2132
+ * @param assistantId Which assistant to use
2133
+ * @returns Tools for calling OpenAI API Assistants with same token
2134
+ * /
2135
+ public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
2136
+ return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
2137
+ }
2138
+ */
2139
+ /**
2140
+ * List all available models (non dynamically)
2141
+ *
2142
+ * Note: Purpose of this is to provide more information about models than standard listing from API
2143
+ */
2144
+ get HARDCODED_MODELS() {
2145
+ return OPENAI_MODELS;
2146
+ }
2108
2147
  /**
2109
2148
  * Default model for chat variant.
2110
2149
  */
@@ -2124,13 +2163,6 @@ class OpenAiExecutionTools {
2124
2163
  return this.getDefaultModel('text-embedding-3-large');
2125
2164
  }
2126
2165
  }
2127
- /**
2128
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2129
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2130
- * TODO: Maybe make custom OpenAiError
2131
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2132
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2133
- */
2134
2166
 
2135
2167
  /**
2136
2168
  * Execution Tools for calling OpenAI API Assistants
@@ -2322,10 +2354,11 @@ const createOpenAiAssistantExecutionTools = Object.assign((options) => {
2322
2354
  /**
2323
2355
  * Execution Tools for calling OpenAI API
2324
2356
  *
2357
+ * Note: This can be also used for other OpenAI compatible APIs, like Ollama
2358
+ *
2325
2359
  * @public exported from `@promptbook/openai`
2326
2360
  */
2327
2361
  const createOpenAiExecutionTools = Object.assign((options) => {
2328
- // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2329
2362
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2330
2363
  options = { ...options, dangerouslyAllowBrowser: true };
2331
2364
  }
@@ -2504,5 +2537,5 @@ const _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssi
2504
2537
  * Note: [💞] Ignore a discrepancy between file name and entity name
2505
2538
  */
2506
2539
 
2507
- export { BOOK_LANGUAGE_VERSION, OPENAI_MODELS, OpenAiAssistantExecutionTools, OpenAiExecutionTools, PROMPTBOOK_ENGINE_VERSION, _OpenAiAssistantRegistration, _OpenAiRegistration, createOpenAiAssistantExecutionTools, createOpenAiExecutionTools };
2540
+ export { BOOK_LANGUAGE_VERSION, OPENAI_MODELS, OpenAiAssistantExecutionTools, OpenAiCompatibleExecutionTools, OpenAiExecutionTools, PROMPTBOOK_ENGINE_VERSION, _OpenAiAssistantRegistration, _OpenAiRegistration, createOpenAiAssistantExecutionTools, createOpenAiExecutionTools };
2508
2541
  //# sourceMappingURL=index.es.js.map