@promptbook/openai 0.94.0-7 → 0.95.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -23
- package/esm/index.es.js +40 -45
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -2
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +25 -14
- package/umd/index.umd.js +40 -45
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -521,7 +521,7 @@
|
|
|
521
521
|
else {
|
|
522
522
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
523
523
|
if (subValue === undefined) {
|
|
524
|
-
// Note: undefined in object is serializable - it is just
|
|
524
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
525
525
|
continue;
|
|
526
526
|
}
|
|
527
527
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -1302,7 +1302,7 @@
|
|
|
1302
1302
|
modelVariant: 'COMPLETION',
|
|
1303
1303
|
modelTitle: 'davinci-002',
|
|
1304
1304
|
modelName: 'davinci-002',
|
|
1305
|
-
modelDescription: 'Legacy completion model with
|
|
1305
|
+
modelDescription: 'Legacy completion model with 4K token context window. Excels at complex text generation, creative writing, and detailed content creation with strong contextual understanding. Optimized for instructions requiring nuanced outputs and extended reasoning. Suitable for applications needing high-quality text generation without conversation management.',
|
|
1306
1306
|
pricing: {
|
|
1307
1307
|
prompt: pricing(`$2.00 / 1M tokens`),
|
|
1308
1308
|
output: pricing(`$2.00 / 1M tokens`),
|
|
@@ -1320,7 +1320,7 @@
|
|
|
1320
1320
|
modelVariant: 'CHAT',
|
|
1321
1321
|
modelTitle: 'gpt-3.5-turbo-16k',
|
|
1322
1322
|
modelName: 'gpt-3.5-turbo-16k',
|
|
1323
|
-
modelDescription: 'GPT-3.5 Turbo with
|
|
1323
|
+
modelDescription: 'Extended context GPT-3.5 Turbo with 16K token window. Maintains core capabilities of standard 3.5 Turbo while supporting longer conversations and documents. Features good balance of performance and cost for applications requiring more context than standard 4K models. Effective for document analysis, extended conversations, and multi-step reasoning tasks.',
|
|
1324
1324
|
pricing: {
|
|
1325
1325
|
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1326
1326
|
output: pricing(`$4.00 / 1M tokens`),
|
|
@@ -1344,7 +1344,7 @@
|
|
|
1344
1344
|
modelVariant: 'CHAT',
|
|
1345
1345
|
modelTitle: 'gpt-4',
|
|
1346
1346
|
modelName: 'gpt-4',
|
|
1347
|
-
modelDescription: '
|
|
1347
|
+
modelDescription: 'Powerful language model with 8K context window featuring sophisticated reasoning, instruction-following, and knowledge capabilities. Demonstrates strong performance on complex tasks requiring deep understanding and multi-step reasoning. Excels at code generation, logical analysis, and nuanced content creation. Suitable for advanced applications requiring high-quality outputs.',
|
|
1348
1348
|
pricing: {
|
|
1349
1349
|
prompt: pricing(`$30.00 / 1M tokens`),
|
|
1350
1350
|
output: pricing(`$60.00 / 1M tokens`),
|
|
@@ -1356,7 +1356,7 @@
|
|
|
1356
1356
|
modelVariant: 'CHAT',
|
|
1357
1357
|
modelTitle: 'gpt-4-32k',
|
|
1358
1358
|
modelName: 'gpt-4-32k',
|
|
1359
|
-
modelDescription: 'Extended context version of GPT-4 with
|
|
1359
|
+
modelDescription: 'Extended context version of GPT-4 with 32K token window. Maintains all capabilities of standard GPT-4 while supporting analysis of very lengthy documents, code bases, and conversations. Features enhanced ability to maintain context over long interactions and process detailed information from large inputs. Ideal for document analysis, legal review, and complex problem-solving.',
|
|
1360
1360
|
pricing: {
|
|
1361
1361
|
prompt: pricing(`$60.00 / 1M tokens`),
|
|
1362
1362
|
output: pricing(`$120.00 / 1M tokens`),
|
|
@@ -1379,7 +1379,7 @@
|
|
|
1379
1379
|
modelVariant: 'CHAT',
|
|
1380
1380
|
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
1381
1381
|
modelName: 'gpt-4-turbo-2024-04-09',
|
|
1382
|
-
modelDescription: 'Latest stable GPT-4 Turbo
|
|
1382
|
+
modelDescription: 'Latest stable GPT-4 Turbo from April 2024 with 128K context window. Features enhanced reasoning chains, improved factual accuracy with 40% reduction in hallucinations, and better instruction following compared to earlier versions. Includes advanced function calling capabilities and knowledge up to April 2024. Provides optimal performance for enterprise applications requiring reliability.',
|
|
1383
1383
|
pricing: {
|
|
1384
1384
|
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1385
1385
|
output: pricing(`$30.00 / 1M tokens`),
|
|
@@ -1391,7 +1391,7 @@
|
|
|
1391
1391
|
modelVariant: 'CHAT',
|
|
1392
1392
|
modelTitle: 'gpt-3.5-turbo-1106',
|
|
1393
1393
|
modelName: 'gpt-3.5-turbo-1106',
|
|
1394
|
-
modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and
|
|
1394
|
+
modelDescription: 'November 2023 version of GPT-3.5 Turbo with 16K token context window. Features improved instruction following, more consistent output formatting, and enhanced function calling capabilities. Includes knowledge cutoff from April 2023. Suitable for applications requiring good performance at lower cost than GPT-4 models.',
|
|
1395
1395
|
pricing: {
|
|
1396
1396
|
prompt: pricing(`$1.00 / 1M tokens`),
|
|
1397
1397
|
output: pricing(`$2.00 / 1M tokens`),
|
|
@@ -1403,7 +1403,7 @@
|
|
|
1403
1403
|
modelVariant: 'CHAT',
|
|
1404
1404
|
modelTitle: 'gpt-4-turbo',
|
|
1405
1405
|
modelName: 'gpt-4-turbo',
|
|
1406
|
-
modelDescription: 'More capable
|
|
1406
|
+
modelDescription: 'More capable and cost-efficient version of GPT-4 with 128K token context window. Features improved instruction following, advanced function calling capabilities, and better performance on coding tasks. Maintains superior reasoning and knowledge while offering substantial cost reduction compared to base GPT-4. Ideal for complex applications requiring extensive context processing.',
|
|
1407
1407
|
pricing: {
|
|
1408
1408
|
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1409
1409
|
output: pricing(`$30.00 / 1M tokens`),
|
|
@@ -1415,10 +1415,10 @@
|
|
|
1415
1415
|
modelVariant: 'COMPLETION',
|
|
1416
1416
|
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
1417
1417
|
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
1418
|
-
modelDescription: 'September 2023 version of GPT-3.5 Turbo
|
|
1418
|
+
modelDescription: 'September 2023 version of GPT-3.5 Turbo Instruct with 4K context window. Optimized for completion-style instruction following with deterministic responses. Better suited than chat models for applications requiring specific formatted outputs without conversation management. Knowledge cutoff from September 2021.',
|
|
1419
1419
|
pricing: {
|
|
1420
1420
|
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1421
|
-
output: pricing(`$2.00 / 1M tokens`),
|
|
1421
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
1422
1422
|
},
|
|
1423
1423
|
},
|
|
1424
1424
|
/**/
|
|
@@ -1427,7 +1427,7 @@
|
|
|
1427
1427
|
modelVariant: 'COMPLETION',
|
|
1428
1428
|
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
1429
1429
|
modelName: 'gpt-3.5-turbo-instruct',
|
|
1430
|
-
modelDescription: 'Optimized version of GPT-3.5 for completion-style API with
|
|
1430
|
+
modelDescription: 'Optimized version of GPT-3.5 for completion-style API with 4K token context window. Features strong instruction following with single-turn design rather than multi-turn conversation. Provides more consistent, deterministic outputs compared to chat models. Well-suited for templated content generation and structured text transformation tasks.',
|
|
1431
1431
|
pricing: {
|
|
1432
1432
|
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1433
1433
|
output: pricing(`$2.00 / 1M tokens`),
|
|
@@ -1445,7 +1445,7 @@
|
|
|
1445
1445
|
modelVariant: 'CHAT',
|
|
1446
1446
|
modelTitle: 'gpt-3.5-turbo',
|
|
1447
1447
|
modelName: 'gpt-3.5-turbo',
|
|
1448
|
-
modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance
|
|
1448
|
+
modelDescription: 'Latest version of GPT-3.5 Turbo with 4K token default context window (16K available). Features continually improved performance with enhanced instruction following and reduced hallucinations. Offers excellent balance between capability and cost efficiency. Suitable for most general-purpose applications requiring good AI capabilities at reasonable cost.',
|
|
1449
1449
|
pricing: {
|
|
1450
1450
|
prompt: pricing(`$0.50 / 1M tokens`),
|
|
1451
1451
|
output: pricing(`$1.50 / 1M tokens`),
|
|
@@ -1457,7 +1457,7 @@
|
|
|
1457
1457
|
modelVariant: 'CHAT',
|
|
1458
1458
|
modelTitle: 'gpt-3.5-turbo-0301',
|
|
1459
1459
|
modelName: 'gpt-3.5-turbo-0301',
|
|
1460
|
-
modelDescription: 'March 2023 version of GPT-3.5 Turbo with
|
|
1460
|
+
modelDescription: 'March 2023 version of GPT-3.5 Turbo with 4K token context window. Legacy model maintained for backward compatibility with specific application behaviors. Features solid conversational abilities and basic instruction following. Knowledge cutoff from September 2021. Suitable for applications explicitly designed for this version.',
|
|
1461
1461
|
pricing: {
|
|
1462
1462
|
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1463
1463
|
output: pricing(`$2.00 / 1M tokens`),
|
|
@@ -1469,7 +1469,7 @@
|
|
|
1469
1469
|
modelVariant: 'COMPLETION',
|
|
1470
1470
|
modelTitle: 'babbage-002',
|
|
1471
1471
|
modelName: 'babbage-002',
|
|
1472
|
-
modelDescription: 'Efficient legacy completion model with
|
|
1472
|
+
modelDescription: 'Efficient legacy completion model with 4K context window balancing performance and speed. Features moderate reasoning capabilities with focus on straightforward text generation tasks. Significantly more efficient than davinci models while maintaining adequate quality for many applications. Suitable for high-volume, cost-sensitive text generation needs.',
|
|
1473
1473
|
pricing: {
|
|
1474
1474
|
prompt: pricing(`$0.40 / 1M tokens`),
|
|
1475
1475
|
output: pricing(`$0.40 / 1M tokens`),
|
|
@@ -1481,7 +1481,7 @@
|
|
|
1481
1481
|
modelVariant: 'CHAT',
|
|
1482
1482
|
modelTitle: 'gpt-4-1106-preview',
|
|
1483
1483
|
modelName: 'gpt-4-1106-preview',
|
|
1484
|
-
modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and
|
|
1484
|
+
modelDescription: 'November 2023 preview version of GPT-4 Turbo with 128K token context window. Features improved instruction following, better function calling capabilities, and enhanced reasoning. Includes knowledge cutoff from April 2023. Suitable for complex applications requiring extensive document understanding and sophisticated interactions.',
|
|
1485
1485
|
pricing: {
|
|
1486
1486
|
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1487
1487
|
output: pricing(`$30.00 / 1M tokens`),
|
|
@@ -1493,7 +1493,7 @@
|
|
|
1493
1493
|
modelVariant: 'CHAT',
|
|
1494
1494
|
modelTitle: 'gpt-4-0125-preview',
|
|
1495
1495
|
modelName: 'gpt-4-0125-preview',
|
|
1496
|
-
modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and
|
|
1496
|
+
modelDescription: 'January 2024 preview version of GPT-4 Turbo with 128K token context window. Features improved reasoning capabilities, enhanced tool use, and more reliable function calling. Includes knowledge cutoff from October 2023. Offers better performance on complex logical tasks and more consistent outputs than previous preview versions.',
|
|
1497
1497
|
pricing: {
|
|
1498
1498
|
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1499
1499
|
output: pricing(`$30.00 / 1M tokens`),
|
|
@@ -1511,7 +1511,7 @@
|
|
|
1511
1511
|
modelVariant: 'CHAT',
|
|
1512
1512
|
modelTitle: 'gpt-3.5-turbo-0125',
|
|
1513
1513
|
modelName: 'gpt-3.5-turbo-0125',
|
|
1514
|
-
modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and
|
|
1514
|
+
modelDescription: 'January 2024 version of GPT-3.5 Turbo with 16K token context window. Features improved reasoning capabilities, better instruction adherence, and reduced hallucinations compared to previous versions. Includes knowledge cutoff from September 2021. Provides good performance for most general applications at reasonable cost.',
|
|
1515
1515
|
pricing: {
|
|
1516
1516
|
prompt: pricing(`$0.50 / 1M tokens`),
|
|
1517
1517
|
output: pricing(`$1.50 / 1M tokens`),
|
|
@@ -1523,7 +1523,7 @@
|
|
|
1523
1523
|
modelVariant: 'CHAT',
|
|
1524
1524
|
modelTitle: 'gpt-4-turbo-preview',
|
|
1525
1525
|
modelName: 'gpt-4-turbo-preview',
|
|
1526
|
-
modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model
|
|
1526
|
+
modelDescription: 'Preview version of GPT-4 Turbo with 128K token context window that points to the latest development model. Features cutting-edge improvements to instruction following, knowledge representation, and tool use capabilities. Provides access to newest features but may have occasional behavior changes. Best for non-critical applications wanting latest capabilities.',
|
|
1527
1527
|
pricing: {
|
|
1528
1528
|
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1529
1529
|
output: pricing(`$30.00 / 1M tokens`),
|
|
@@ -1535,11 +1535,10 @@
|
|
|
1535
1535
|
modelVariant: 'EMBEDDING',
|
|
1536
1536
|
modelTitle: 'text-embedding-3-large',
|
|
1537
1537
|
modelName: 'text-embedding-3-large',
|
|
1538
|
-
modelDescription: "OpenAI's most capable text embedding model
|
|
1538
|
+
modelDescription: "OpenAI's most capable text embedding model generating 3072-dimensional vectors. Designed for high-quality embeddings for complex similarity tasks, clustering, and information retrieval. Features enhanced cross-lingual capabilities and significantly improved performance on retrieval and classification benchmarks. Ideal for sophisticated RAG systems and semantic search applications.",
|
|
1539
1539
|
pricing: {
|
|
1540
1540
|
prompt: pricing(`$0.13 / 1M tokens`),
|
|
1541
|
-
|
|
1542
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1541
|
+
output: 0,
|
|
1543
1542
|
},
|
|
1544
1543
|
},
|
|
1545
1544
|
/**/
|
|
@@ -1548,11 +1547,10 @@
|
|
|
1548
1547
|
modelVariant: 'EMBEDDING',
|
|
1549
1548
|
modelTitle: 'text-embedding-3-small',
|
|
1550
1549
|
modelName: 'text-embedding-3-small',
|
|
1551
|
-
modelDescription: 'Cost-effective embedding model
|
|
1550
|
+
modelDescription: 'Cost-effective embedding model generating 1536-dimensional vectors. Balances quality and efficiency for simpler tasks while maintaining good performance on text similarity and retrieval applications. Offers 20% better quality than ada-002 at significantly lower cost. Ideal for production embedding applications with cost constraints.',
|
|
1552
1551
|
pricing: {
|
|
1553
1552
|
prompt: pricing(`$0.02 / 1M tokens`),
|
|
1554
|
-
|
|
1555
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1553
|
+
output: 0,
|
|
1556
1554
|
},
|
|
1557
1555
|
},
|
|
1558
1556
|
/**/
|
|
@@ -1561,7 +1559,7 @@
|
|
|
1561
1559
|
modelVariant: 'CHAT',
|
|
1562
1560
|
modelTitle: 'gpt-3.5-turbo-0613',
|
|
1563
1561
|
modelName: 'gpt-3.5-turbo-0613',
|
|
1564
|
-
modelDescription:
|
|
1562
|
+
modelDescription: "June 2023 version of GPT-3.5 Turbo with 4K token context window. Features function calling capabilities for structured data extraction and API interaction. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
|
|
1565
1563
|
pricing: {
|
|
1566
1564
|
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1567
1565
|
output: pricing(`$2.00 / 1M tokens`),
|
|
@@ -1573,11 +1571,10 @@
|
|
|
1573
1571
|
modelVariant: 'EMBEDDING',
|
|
1574
1572
|
modelTitle: 'text-embedding-ada-002',
|
|
1575
1573
|
modelName: 'text-embedding-ada-002',
|
|
1576
|
-
modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval
|
|
1574
|
+
modelDescription: 'Legacy text embedding model generating 1536-dimensional vectors suitable for text similarity and retrieval applications. Processes up to 8K tokens per request with consistent embedding quality. While superseded by newer embedding-3 models, still maintains adequate performance for many semantic search and classification tasks.',
|
|
1577
1575
|
pricing: {
|
|
1578
1576
|
prompt: pricing(`$0.1 / 1M tokens`),
|
|
1579
|
-
|
|
1580
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1577
|
+
output: 0,
|
|
1581
1578
|
},
|
|
1582
1579
|
},
|
|
1583
1580
|
/**/
|
|
@@ -1604,7 +1601,7 @@
|
|
|
1604
1601
|
modelVariant: 'CHAT',
|
|
1605
1602
|
modelTitle: 'gpt-4o-2024-05-13',
|
|
1606
1603
|
modelName: 'gpt-4o-2024-05-13',
|
|
1607
|
-
modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities
|
|
1604
|
+
modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
|
|
1608
1605
|
pricing: {
|
|
1609
1606
|
prompt: pricing(`$5.00 / 1M tokens`),
|
|
1610
1607
|
output: pricing(`$15.00 / 1M tokens`),
|
|
@@ -1616,7 +1613,7 @@
|
|
|
1616
1613
|
modelVariant: 'CHAT',
|
|
1617
1614
|
modelTitle: 'gpt-4o',
|
|
1618
1615
|
modelName: 'gpt-4o',
|
|
1619
|
-
modelDescription: "OpenAI's most advanced multimodal model
|
|
1616
|
+
modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
|
|
1620
1617
|
pricing: {
|
|
1621
1618
|
prompt: pricing(`$5.00 / 1M tokens`),
|
|
1622
1619
|
output: pricing(`$15.00 / 1M tokens`),
|
|
@@ -1628,7 +1625,7 @@
|
|
|
1628
1625
|
modelVariant: 'CHAT',
|
|
1629
1626
|
modelTitle: 'gpt-4o-mini',
|
|
1630
1627
|
modelName: 'gpt-4o-mini',
|
|
1631
|
-
modelDescription: 'Smaller, more cost-effective version of GPT-4o with
|
|
1628
|
+
modelDescription: 'Smaller, more cost-effective version of GPT-4o with 128K context window. Maintains impressive capabilities across text, vision, and audio tasks while operating at significantly lower cost. Features 3x faster inference than GPT-4o with good performance on general tasks. Excellent for applications requiring good quality multimodal capabilities at scale.',
|
|
1632
1629
|
pricing: {
|
|
1633
1630
|
prompt: pricing(`$0.15 / 1M tokens`),
|
|
1634
1631
|
output: pricing(`$0.60 / 1M tokens`),
|
|
@@ -1640,7 +1637,7 @@
|
|
|
1640
1637
|
modelVariant: 'CHAT',
|
|
1641
1638
|
modelTitle: 'o1-preview',
|
|
1642
1639
|
modelName: 'o1-preview',
|
|
1643
|
-
modelDescription: 'Advanced reasoning model with
|
|
1640
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Features exceptional step-by-step problem-solving capabilities, advanced mathematical and scientific reasoning, and superior performance on STEM-focused problems. Significantly outperforms GPT-4 on quantitative reasoning benchmarks. Ideal for professional and specialized applications.',
|
|
1644
1641
|
pricing: {
|
|
1645
1642
|
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1646
1643
|
output: pricing(`$60.00 / 1M tokens`),
|
|
@@ -1652,8 +1649,7 @@
|
|
|
1652
1649
|
modelVariant: 'CHAT',
|
|
1653
1650
|
modelTitle: 'o1-preview-2024-09-12',
|
|
1654
1651
|
modelName: 'o1-preview-2024-09-12',
|
|
1655
|
-
modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for
|
|
1656
|
-
// <- TODO: [💩] Some better system to organize these date suffixes and versions
|
|
1652
|
+
modelDescription: 'September 2024 version of O1 preview with 128K context window. Features specialized reasoning capabilities with 30% improvement on mathematical and scientific accuracy over previous versions. Includes enhanced support for formal logic, statistical analysis, and technical domains. Optimized for professional applications requiring precise analytical thinking and rigorous methodologies.',
|
|
1657
1653
|
pricing: {
|
|
1658
1654
|
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1659
1655
|
output: pricing(`$60.00 / 1M tokens`),
|
|
@@ -1665,7 +1661,7 @@
|
|
|
1665
1661
|
modelVariant: 'CHAT',
|
|
1666
1662
|
modelTitle: 'o1-mini',
|
|
1667
1663
|
modelName: 'o1-mini',
|
|
1668
|
-
modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on
|
|
1664
|
+
modelDescription: 'Smaller, cost-effective version of the O1 model with 128K context window. Maintains strong analytical reasoning abilities while reducing computational requirements by 70%. Features good performance on mathematical, logical, and scientific tasks at significantly lower cost than full O1. Excellent for everyday analytical applications that benefit from reasoning focus.',
|
|
1669
1665
|
pricing: {
|
|
1670
1666
|
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1671
1667
|
output: pricing(`$12.00 / 1M tokens`),
|
|
@@ -1677,7 +1673,7 @@
|
|
|
1677
1673
|
modelVariant: 'CHAT',
|
|
1678
1674
|
modelTitle: 'o1',
|
|
1679
1675
|
modelName: 'o1',
|
|
1680
|
-
modelDescription: "OpenAI's advanced reasoning model
|
|
1676
|
+
modelDescription: "OpenAI's advanced reasoning model with 128K context window focusing on logical problem-solving and analytical thinking. Features exceptional performance on quantitative tasks, step-by-step deduction, and complex technical problems. Maintains 95%+ of o1-preview capabilities with production-ready stability. Ideal for scientific computing, financial analysis, and professional applications.",
|
|
1681
1677
|
pricing: {
|
|
1682
1678
|
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1683
1679
|
output: pricing(`$60.00 / 1M tokens`),
|
|
@@ -1689,11 +1685,10 @@
|
|
|
1689
1685
|
modelVariant: 'CHAT',
|
|
1690
1686
|
modelTitle: 'o3-mini',
|
|
1691
1687
|
modelName: 'o3-mini',
|
|
1692
|
-
modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving.
|
|
1688
|
+
modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
|
|
1693
1689
|
pricing: {
|
|
1694
1690
|
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1695
1691
|
output: pricing(`$12.00 / 1M tokens`),
|
|
1696
|
-
// <- TODO: !! Unsure, check the pricing
|
|
1697
1692
|
},
|
|
1698
1693
|
},
|
|
1699
1694
|
/**/
|
|
@@ -1702,7 +1697,7 @@
|
|
|
1702
1697
|
modelVariant: 'CHAT',
|
|
1703
1698
|
modelTitle: 'o1-mini-2024-09-12',
|
|
1704
1699
|
modelName: 'o1-mini-2024-09-12',
|
|
1705
|
-
modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency.
|
|
1700
|
+
modelDescription: "September 2024 version of O1-mini with 128K context window featuring balanced reasoning capabilities and cost-efficiency. Includes 25% improvement in mathematical accuracy and enhanced performance on coding tasks compared to previous versions. Maintains efficient resource utilization while delivering improved results for analytical applications that don't require the full O1 model.",
|
|
1706
1701
|
pricing: {
|
|
1707
1702
|
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1708
1703
|
output: pricing(`$12.00 / 1M tokens`),
|
|
@@ -1714,7 +1709,7 @@
|
|
|
1714
1709
|
modelVariant: 'CHAT',
|
|
1715
1710
|
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
1716
1711
|
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
1717
|
-
modelDescription:
|
|
1712
|
+
modelDescription: "June 2023 version of GPT-3.5 Turbo with extended 16K token context window. Features good handling of longer conversations and documents with improved memory management across extended contexts. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
|
|
1718
1713
|
pricing: {
|
|
1719
1714
|
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1720
1715
|
output: pricing(`$4.00 / 1M tokens`),
|
|
@@ -2249,7 +2244,7 @@
|
|
|
2249
2244
|
});
|
|
2250
2245
|
const rawRequest = {
|
|
2251
2246
|
// TODO: [👨👨👧👧] ...modelSettings,
|
|
2252
|
-
// TODO: [👨👨👧👧][🧠] What about system message for assistants, does it make
|
|
2247
|
+
// TODO: [👨👨👧👧][🧠] What about system message for assistants, does it make sense - combination of OpenAI assistants with Promptbook Personas
|
|
2253
2248
|
assistant_id: this.assistantId,
|
|
2254
2249
|
thread: {
|
|
2255
2250
|
messages: [
|
|
@@ -2337,7 +2332,7 @@
|
|
|
2337
2332
|
}
|
|
2338
2333
|
}
|
|
2339
2334
|
/**
|
|
2340
|
-
* TODO: [🧠][🧙♂️] Maybe there can be some
|
|
2335
|
+
* TODO: [🧠][🧙♂️] Maybe there can be some wizard for those who want to use just OpenAI
|
|
2341
2336
|
* TODO: Maybe make custom OpenAiError
|
|
2342
2337
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
2343
2338
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
@@ -2530,7 +2525,7 @@
|
|
|
2530
2525
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
|
|
2531
2526
|
*
|
|
2532
2527
|
* @public exported from `@promptbook/openai`
|
|
2533
|
-
* @public exported from `@promptbook/
|
|
2528
|
+
* @public exported from `@promptbook/wizard`
|
|
2534
2529
|
* @public exported from `@promptbook/cli`
|
|
2535
2530
|
*/
|
|
2536
2531
|
const _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
|
|
@@ -2540,7 +2535,7 @@
|
|
|
2540
2535
|
* Note: [🏐] Configurations registrations are done in register-constructor.ts BUT constructor register-constructor.ts
|
|
2541
2536
|
*
|
|
2542
2537
|
* @public exported from `@promptbook/openai`
|
|
2543
|
-
* @public exported from `@promptbook/
|
|
2538
|
+
* @public exported from `@promptbook/wizard`
|
|
2544
2539
|
* @public exported from `@promptbook/cli`
|
|
2545
2540
|
*/
|
|
2546
2541
|
const _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssistantExecutionTools);
|