@botpress/cognitive 0.1.42 → 0.1.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,28 +1,28 @@
1
1
 
2
- > @botpress/cognitive@0.1.42 build /home/runner/work/botpress/botpress/packages/cognitive
2
+ > @botpress/cognitive@0.1.43 build /home/runner/work/botpress/botpress/packages/cognitive
3
3
  > pnpm build:type && pnpm build:neutral && size-limit
4
4
 
5
5
 
6
- > @botpress/cognitive@0.1.42 build:type /home/runner/work/botpress/botpress/packages/cognitive
6
+ > @botpress/cognitive@0.1.43 build:type /home/runner/work/botpress/botpress/packages/cognitive
7
7
  > tsup --tsconfig tsconfig.build.json ./src/index.ts --dts-resolve --dts-only --clean
8
8
 
9
9
  CLI Building entry: ./src/index.ts
10
10
  CLI Using tsconfig: tsconfig.build.json
11
11
  CLI tsup v8.0.2
12
12
  DTS Build start
13
- DTS ⚡️ Build success in 9802ms
14
- DTS dist/index.d.ts 625.60 KB
13
+ DTS ⚡️ Build success in 9146ms
14
+ DTS dist/index.d.ts 625.78 KB
15
15
 
16
- > @botpress/cognitive@0.1.42 build:neutral /home/runner/work/botpress/botpress/packages/cognitive
16
+ > @botpress/cognitive@0.1.43 build:neutral /home/runner/work/botpress/botpress/packages/cognitive
17
17
  > ts-node -T ./build.ts --neutral
18
18
 
19
19
  Done
20
20
 
21
21
  dist/index.cjs
22
22
  Size limit: 50 kB
23
- Size: 14.24 kB brotlied
23
+ Size: 14.49 kB brotlied
24
24
 
25
25
  dist/index.mjs
26
26
  Size limit: 50 kB
27
- Size: 14.09 kB brotlied
27
+ Size: 14.37 kB brotlied
28
28
 
package/dist/index.cjs CHANGED
@@ -1327,6 +1327,96 @@ var models = {
1327
1327
  tags: ["low-cost", "general-purpose"],
1328
1328
  lifecycle: "live"
1329
1329
  },
1330
+ "xai:grok-code-fast-1": {
1331
+ id: "xai:grok-code-fast-1",
1332
+ name: "Grok Code Fast 1",
1333
+ description: "Fast coding-optimized Grok model with large context window.",
1334
+ input: {
1335
+ maxTokens: 256e3,
1336
+ costPer1MTokens: 0.2
1337
+ },
1338
+ output: {
1339
+ maxTokens: 32768,
1340
+ costPer1MTokens: 1.5
1341
+ },
1342
+ tags: ["coding", "general-purpose", "low-cost"],
1343
+ lifecycle: "live"
1344
+ },
1345
+ "xai:grok-4-fast-reasoning": {
1346
+ id: "xai:grok-4-fast-reasoning",
1347
+ name: "Grok 4 Fast (Reasoning)",
1348
+ description: "Advanced fast Grok model with reasoning and very large context.",
1349
+ input: {
1350
+ maxTokens: 2e6,
1351
+ costPer1MTokens: 0.2
1352
+ },
1353
+ output: {
1354
+ maxTokens: 128e3,
1355
+ costPer1MTokens: 0.5
1356
+ },
1357
+ tags: ["reasoning", "recommended", "general-purpose"],
1358
+ lifecycle: "live"
1359
+ },
1360
+ "xai:grok-4-fast-non-reasoning": {
1361
+ id: "xai:grok-4-fast-non-reasoning",
1362
+ name: "Grok 4 Fast (Non-Reasoning)",
1363
+ description: "Fast, cost-effective Grok model for non-reasoning tasks.",
1364
+ input: {
1365
+ maxTokens: 2e6,
1366
+ costPer1MTokens: 0.2
1367
+ },
1368
+ output: {
1369
+ maxTokens: 128e3,
1370
+ costPer1MTokens: 0.5
1371
+ },
1372
+ tags: ["low-cost", "recommended", "general-purpose"],
1373
+ lifecycle: "live"
1374
+ },
1375
+ "xai:grok-4-0709": {
1376
+ id: "xai:grok-4-0709",
1377
+ name: "Grok 4 (0709)",
1378
+ description: "Comprehensive Grok 4 model for general-purpose tasks.",
1379
+ input: {
1380
+ maxTokens: 256e3,
1381
+ costPer1MTokens: 3
1382
+ },
1383
+ output: {
1384
+ maxTokens: 32768,
1385
+ costPer1MTokens: 15
1386
+ },
1387
+ tags: ["reasoning", "general-purpose"],
1388
+ lifecycle: "live"
1389
+ },
1390
+ "xai:grok-3-mini": {
1391
+ id: "xai:grok-3-mini",
1392
+ name: "Grok 3 Mini",
1393
+ description: "Lightweight Grok model for cost-sensitive workloads.",
1394
+ input: {
1395
+ maxTokens: 131072,
1396
+ costPer1MTokens: 0.3
1397
+ },
1398
+ output: {
1399
+ maxTokens: 16384,
1400
+ costPer1MTokens: 0.5
1401
+ },
1402
+ tags: ["low-cost", "general-purpose"],
1403
+ lifecycle: "live"
1404
+ },
1405
+ "xai:grok-3": {
1406
+ id: "xai:grok-3",
1407
+ name: "Grok 3",
1408
+ description: "Enterprise-grade Grok model for general-purpose tasks.",
1409
+ input: {
1410
+ maxTokens: 131072,
1411
+ costPer1MTokens: 3
1412
+ },
1413
+ output: {
1414
+ maxTokens: 16384,
1415
+ costPer1MTokens: 15
1416
+ },
1417
+ tags: ["general-purpose"],
1418
+ lifecycle: "live"
1419
+ },
1330
1420
  "openrouter:gpt-oss-120b": {
1331
1421
  id: "openrouter:gpt-oss-120b",
1332
1422
  name: "GPT-OSS 120B (Preview)",
@@ -1342,8 +1432,8 @@ var models = {
1342
1432
  tags: ["preview", "general-purpose", "reasoning"],
1343
1433
  lifecycle: "live"
1344
1434
  },
1345
- "fireworks:gpt-oss-20b": {
1346
- id: "fireworks:gpt-oss-20b",
1435
+ "fireworks-ai:gpt-oss-20b": {
1436
+ id: "fireworks-ai:gpt-oss-20b",
1347
1437
  name: "GPT-OSS 20B",
1348
1438
  description: "gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.",
1349
1439
  input: {
@@ -1358,8 +1448,8 @@ var models = {
1358
1448
  lifecycle: "live",
1359
1449
  aliases: ["accounts/fireworks/models/gpt-oss-20b"]
1360
1450
  },
1361
- "fireworks:gpt-oss-120b": {
1362
- id: "fireworks:gpt-oss-120b",
1451
+ "fireworks-ai:gpt-oss-120b": {
1452
+ id: "fireworks-ai:gpt-oss-120b",
1363
1453
  name: "GPT-OSS 120B",
1364
1454
  description: "gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.",
1365
1455
  input: {
@@ -1374,8 +1464,8 @@ var models = {
1374
1464
  lifecycle: "live",
1375
1465
  aliases: ["accounts/fireworks/models/gpt-oss-120b"]
1376
1466
  },
1377
- "fireworks:deepseek-r1-0528": {
1378
- id: "fireworks:deepseek-r1-0528",
1467
+ "fireworks-ai:deepseek-r1-0528": {
1468
+ id: "fireworks-ai:deepseek-r1-0528",
1379
1469
  name: "DeepSeek R1 0528",
1380
1470
  description: "The updated DeepSeek R1 0528 model delivers major improvements in reasoning, inference, and accuracy through enhanced post-training optimization and greater computational resources. It now performs at a level approaching top-tier models like OpenAI o3 and Gemini 2.5 Pro, with notable gains in complex tasks such as math and programming. The update also reduces hallucinations, improves function calling, and enhances the coding experience.",
1381
1471
  input: {
@@ -1390,8 +1480,8 @@ var models = {
1390
1480
  lifecycle: "live",
1391
1481
  aliases: ["accounts/fireworks/models/deepseek-r1-0528"]
1392
1482
  },
1393
- "fireworks:deepseek-v3-0324": {
1394
- id: "fireworks:deepseek-v3-0324",
1483
+ "fireworks-ai:deepseek-v3-0324": {
1484
+ id: "fireworks-ai:deepseek-v3-0324",
1395
1485
  name: "DeepSeek V3 0324",
1396
1486
  description: "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team. It succeeds the DeepSeek V3 model and performs really well on a variety of tasks.",
1397
1487
  input: {
@@ -1406,8 +1496,8 @@ var models = {
1406
1496
  lifecycle: "live",
1407
1497
  aliases: ["accounts/fireworks/models/deepseek-v3-0324"]
1408
1498
  },
1409
- "fireworks:llama4-maverick-instruct-basic": {
1410
- id: "fireworks:llama4-maverick-instruct-basic",
1499
+ "fireworks-ai:llama4-maverick-instruct-basic": {
1500
+ id: "fireworks-ai:llama4-maverick-instruct-basic",
1411
1501
  name: "Llama 4 Maverick Instruct (Basic)",
1412
1502
  description: "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction, and suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.",
1413
1503
  input: {
@@ -1422,8 +1512,8 @@ var models = {
1422
1512
  lifecycle: "live",
1423
1513
  aliases: ["accounts/fireworks/models/llama4-maverick-instruct-basic"]
1424
1514
  },
1425
- "fireworks:llama4-scout-instruct-basic": {
1426
- id: "fireworks:llama4-scout-instruct-basic",
1515
+ "fireworks-ai:llama4-scout-instruct-basic": {
1516
+ id: "fireworks-ai:llama4-scout-instruct-basic",
1427
1517
  name: "Llama 4 Scout Instruct (Basic)",
1428
1518
  description: "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, uses 16 experts per forward pass, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, it is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks.",
1429
1519
  input: {
@@ -1438,8 +1528,8 @@ var models = {
1438
1528
  lifecycle: "live",
1439
1529
  aliases: ["accounts/fireworks/models/llama4-scout-instruct-basic"]
1440
1530
  },
1441
- "fireworks:llama-v3p3-70b-instruct": {
1442
- id: "fireworks:llama-v3p3-70b-instruct",
1531
+ "fireworks-ai:llama-v3p3-70b-instruct": {
1532
+ id: "fireworks-ai:llama-v3p3-70b-instruct",
1443
1533
  name: "Llama 3.3 70B Instruct",
1444
1534
  description: "Llama 3.3 70B Instruct is the December update of Llama 3.1 70B. The model improves upon Llama 3.1 70B (released July 2024) with advances in tool calling, multilingual text support, math and coding. The model achieves industry leading results in reasoning, math and instruction following and provides similar performance as 3.1 405B but with significant speed and cost improvements.",
1445
1535
  input: {
@@ -1454,8 +1544,8 @@ var models = {
1454
1544
  lifecycle: "live",
1455
1545
  aliases: ["accounts/fireworks/models/llama-v3p3-70b-instruct"]
1456
1546
  },
1457
- "fireworks:deepseek-r1": {
1458
- id: "fireworks:deepseek-r1",
1547
+ "fireworks-ai:deepseek-r1": {
1548
+ id: "fireworks-ai:deepseek-r1",
1459
1549
  name: "DeepSeek R1 (Fast)",
1460
1550
  description: "This version of the R1 model has a perfect balance between speed and cost-efficiency for real-time interactive experiences, with speeds up to 90 tokens per second.\n\nDeepSeek-R1 is a state-of-the-art large language model optimized with reinforcement learning and cold-start data for exceptional reasoning, math, and code performance. **Note**: This model will always use a temperature of 0.6 as recommended by DeepSeek.",
1461
1551
  input: {
@@ -1470,8 +1560,8 @@ var models = {
1470
1560
  lifecycle: "live",
1471
1561
  aliases: ["accounts/fireworks/models/deepseek-r1"]
1472
1562
  },
1473
- "fireworks:deepseek-r1-basic": {
1474
- id: "fireworks:deepseek-r1-basic",
1563
+ "fireworks-ai:deepseek-r1-basic": {
1564
+ id: "fireworks-ai:deepseek-r1-basic",
1475
1565
  name: "DeepSeek R1 (Basic)",
1476
1566
  description: 'This version of the R1 model is optimized for throughput and cost-effectiveness and has a lower cost but slightly higher latency than the "Fast" version of the model.\n\nDeepSeek-R1 is a state-of-the-art large language model optimized with reinforcement learning and cold-start data for exceptional reasoning, math, and code performance. **Note**: This model will always use a temperature of 0.6 as recommended by DeepSeek.',
1477
1567
  input: {
@@ -1486,8 +1576,8 @@ var models = {
1486
1576
  lifecycle: "live",
1487
1577
  aliases: ["accounts/fireworks/models/deepseek-r1-basic"]
1488
1578
  },
1489
- "fireworks:deepseek-v3": {
1490
- id: "fireworks:deepseek-v3",
1579
+ "fireworks-ai:deepseek-v3": {
1580
+ id: "fireworks-ai:deepseek-v3",
1491
1581
  name: "DeepSeek V3",
1492
1582
  description: "A a strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek.",
1493
1583
  input: {
@@ -1502,8 +1592,8 @@ var models = {
1502
1592
  lifecycle: "deprecated",
1503
1593
  aliases: ["accounts/fireworks/models/deepseek-v3"]
1504
1594
  },
1505
- "fireworks:llama-v3p1-405b-instruct": {
1506
- id: "fireworks:llama-v3p1-405b-instruct",
1595
+ "fireworks-ai:llama-v3p1-405b-instruct": {
1596
+ id: "fireworks-ai:llama-v3p1-405b-instruct",
1507
1597
  name: "Llama 3.1 405B Instruct",
1508
1598
  description: "The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models in 8B, 70B and 405B sizes. The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",
1509
1599
  input: {
@@ -1518,8 +1608,8 @@ var models = {
1518
1608
  lifecycle: "deprecated",
1519
1609
  aliases: ["accounts/fireworks/models/llama-v3p1-405b-instruct"]
1520
1610
  },
1521
- "fireworks:llama-v3p1-70b-instruct": {
1522
- id: "fireworks:llama-v3p1-70b-instruct",
1611
+ "fireworks-ai:llama-v3p1-70b-instruct": {
1612
+ id: "fireworks-ai:llama-v3p1-70b-instruct",
1523
1613
  name: "Llama 3.1 70B Instruct",
1524
1614
  description: "The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models in 8B, 70B and 405B sizes. The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",
1525
1615
  input: {
@@ -1534,8 +1624,8 @@ var models = {
1534
1624
  lifecycle: "deprecated",
1535
1625
  aliases: ["accounts/fireworks/models/llama-v3p1-70b-instruct"]
1536
1626
  },
1537
- "fireworks:llama-v3p1-8b-instruct": {
1538
- id: "fireworks:llama-v3p1-8b-instruct",
1627
+ "fireworks-ai:llama-v3p1-8b-instruct": {
1628
+ id: "fireworks-ai:llama-v3p1-8b-instruct",
1539
1629
  name: "Llama 3.1 8B Instruct",
1540
1630
  description: "The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models in 8B, 70B and 405B sizes. The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",
1541
1631
  input: {
@@ -1550,8 +1640,8 @@ var models = {
1550
1640
  lifecycle: "live",
1551
1641
  aliases: ["accounts/fireworks/models/llama-v3p1-8b-instruct"]
1552
1642
  },
1553
- "fireworks:mixtral-8x22b-instruct": {
1554
- id: "fireworks:mixtral-8x22b-instruct",
1643
+ "fireworks-ai:mixtral-8x22b-instruct": {
1644
+ id: "fireworks-ai:mixtral-8x22b-instruct",
1555
1645
  name: "Mixtral MoE 8x22B Instruct",
1556
1646
  description: "Mistral MoE 8x22B Instruct v0.1 model with Sparse Mixture of Experts. Fine tuned for instruction following.",
1557
1647
  input: {
@@ -1566,8 +1656,8 @@ var models = {
1566
1656
  lifecycle: "live",
1567
1657
  aliases: ["accounts/fireworks/models/mixtral-8x22b-instruct"]
1568
1658
  },
1569
- "fireworks:mixtral-8x7b-instruct": {
1570
- id: "fireworks:mixtral-8x7b-instruct",
1659
+ "fireworks-ai:mixtral-8x7b-instruct": {
1660
+ id: "fireworks-ai:mixtral-8x7b-instruct",
1571
1661
  name: "Mixtral MoE 8x7B Instruct",
1572
1662
  description: "Mistral MoE 8x7B Instruct v0.1 model with Sparse Mixture of Experts. Fine tuned for instruction following",
1573
1663
  input: {
@@ -1582,8 +1672,8 @@ var models = {
1582
1672
  lifecycle: "live",
1583
1673
  aliases: ["accounts/fireworks/models/mixtral-8x7b-instruct"]
1584
1674
  },
1585
- "fireworks:mythomax-l2-13b": {
1586
- id: "fireworks:mythomax-l2-13b",
1675
+ "fireworks-ai:mythomax-l2-13b": {
1676
+ id: "fireworks-ai:mythomax-l2-13b",
1587
1677
  name: "MythoMax L2 13b",
1588
1678
  description: "MythoMax L2 is designed to excel at both roleplaying and storytelling, and is an improved variant of the previous MythoMix model, combining the MythoLogic-L2 and Huginn models.",
1589
1679
  input: {
@@ -1598,8 +1688,8 @@ var models = {
1598
1688
  lifecycle: "live",
1599
1689
  aliases: ["accounts/fireworks/models/mythomax-l2-13b"]
1600
1690
  },
1601
- "fireworks:gemma2-9b-it": {
1602
- id: "fireworks:gemma2-9b-it",
1691
+ "fireworks-ai:gemma2-9b-it": {
1692
+ id: "fireworks-ai:gemma2-9b-it",
1603
1693
  name: "Gemma 2 9B Instruct",
1604
1694
  description: "Redesigned for outsized performance and unmatched efficiency, Gemma 2 optimizes for blazing-fast inference on diverse hardware. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.",
1605
1695
  input: {