@mariozechner/pi-ai 0.5.40 → 0.5.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +74 -15
  2. package/dist/agent/agent-loop.d.ts +6 -0
  3. package/dist/agent/agent-loop.d.ts.map +1 -0
  4. package/dist/agent/{agent.js → agent-loop.js} +16 -9
  5. package/dist/agent/agent-loop.js.map +1 -0
  6. package/dist/agent/index.d.ts +1 -1
  7. package/dist/agent/index.d.ts.map +1 -1
  8. package/dist/agent/index.js +1 -1
  9. package/dist/agent/index.js.map +1 -1
  10. package/dist/agent/types.d.ts +1 -1
  11. package/dist/agent/types.d.ts.map +1 -1
  12. package/dist/agent/types.js.map +1 -1
  13. package/dist/index.d.ts +1 -1
  14. package/dist/index.d.ts.map +1 -1
  15. package/dist/index.js +1 -1
  16. package/dist/index.js.map +1 -1
  17. package/dist/models.generated.d.ts +68 -0
  18. package/dist/models.generated.d.ts.map +1 -1
  19. package/dist/models.generated.js +120 -52
  20. package/dist/models.generated.js.map +1 -1
  21. package/dist/providers/anthropic.d.ts.map +1 -1
  22. package/dist/providers/anthropic.js +10 -7
  23. package/dist/providers/anthropic.js.map +1 -1
  24. package/dist/providers/google.d.ts.map +1 -1
  25. package/dist/providers/google.js +16 -7
  26. package/dist/providers/google.js.map +1 -1
  27. package/dist/providers/openai-completions.d.ts.map +1 -1
  28. package/dist/providers/openai-completions.js +12 -8
  29. package/dist/providers/openai-completions.js.map +1 -1
  30. package/dist/providers/openai-responses.d.ts.map +1 -1
  31. package/dist/providers/openai-responses.js +13 -16
  32. package/dist/providers/openai-responses.js.map +1 -1
  33. package/dist/types.d.ts +7 -7
  34. package/dist/types.d.ts.map +1 -1
  35. package/dist/types.js.map +1 -1
  36. package/dist/{event-stream.d.ts → utils/event-stream.d.ts} +1 -1
  37. package/dist/utils/event-stream.d.ts.map +1 -0
  38. package/dist/{event-stream.js → utils/event-stream.js} +1 -1
  39. package/dist/utils/event-stream.js.map +1 -0
  40. package/dist/utils/json-parse.d.ts.map +1 -0
  41. package/dist/utils/json-parse.js.map +1 -0
  42. package/dist/utils/typebox-helpers.d.ts.map +1 -0
  43. package/dist/utils/typebox-helpers.js.map +1 -0
  44. package/dist/{validation.d.ts → utils/validation.d.ts} +1 -1
  45. package/dist/utils/validation.d.ts.map +1 -0
  46. package/dist/utils/validation.js.map +1 -0
  47. package/package.json +1 -1
  48. package/dist/agent/agent.d.ts +0 -6
  49. package/dist/agent/agent.d.ts.map +0 -1
  50. package/dist/agent/agent.js.map +0 -1
  51. package/dist/event-stream.d.ts.map +0 -1
  52. package/dist/event-stream.js.map +0 -1
  53. package/dist/json-parse.d.ts.map +0 -1
  54. package/dist/json-parse.js.map +0 -1
  55. package/dist/typebox-helpers.d.ts.map +0 -1
  56. package/dist/typebox-helpers.js.map +0 -1
  57. package/dist/validation.d.ts.map +0 -1
  58. package/dist/validation.js.map +0 -1
  59. /package/dist/{json-parse.d.ts → utils/json-parse.d.ts} +0 -0
  60. /package/dist/{json-parse.js → utils/json-parse.js} +0 -0
  61. /package/dist/{typebox-helpers.d.ts → utils/typebox-helpers.d.ts} +0 -0
  62. /package/dist/{typebox-helpers.js → utils/typebox-helpers.js} +0 -0
  63. /package/dist/{validation.js → utils/validation.js} +0 -0
@@ -1410,6 +1410,57 @@ export const MODELS = {
1410
1410
  },
1411
1411
  },
1412
1412
  openrouter: {
1413
+ "alibaba/tongyi-deepresearch-30b-a3b": {
1414
+ id: "alibaba/tongyi-deepresearch-30b-a3b",
1415
+ name: "Tongyi DeepResearch 30B A3B",
1416
+ api: "openai-completions",
1417
+ provider: "openrouter",
1418
+ baseUrl: "https://openrouter.ai/api/v1",
1419
+ reasoning: true,
1420
+ input: ["text"],
1421
+ cost: {
1422
+ input: 0.09,
1423
+ output: 0.44999999999999996,
1424
+ cacheRead: 0,
1425
+ cacheWrite: 0,
1426
+ },
1427
+ contextWindow: 131072,
1428
+ maxTokens: 131072,
1429
+ },
1430
+ "qwen/qwen3-coder-flash": {
1431
+ id: "qwen/qwen3-coder-flash",
1432
+ name: "Qwen: Qwen3 Coder Flash",
1433
+ api: "openai-completions",
1434
+ provider: "openrouter",
1435
+ baseUrl: "https://openrouter.ai/api/v1",
1436
+ reasoning: false,
1437
+ input: ["text"],
1438
+ cost: {
1439
+ input: 0.3,
1440
+ output: 1.5,
1441
+ cacheRead: 0.08,
1442
+ cacheWrite: 0,
1443
+ },
1444
+ contextWindow: 128000,
1445
+ maxTokens: 65536,
1446
+ },
1447
+ "qwen/qwen3-coder-plus": {
1448
+ id: "qwen/qwen3-coder-plus",
1449
+ name: "Qwen: Qwen3 Coder Plus",
1450
+ api: "openai-completions",
1451
+ provider: "openrouter",
1452
+ baseUrl: "https://openrouter.ai/api/v1",
1453
+ reasoning: false,
1454
+ input: ["text"],
1455
+ cost: {
1456
+ input: 1,
1457
+ output: 5,
1458
+ cacheRead: 0.09999999999999999,
1459
+ cacheWrite: 0,
1460
+ },
1461
+ contextWindow: 128000,
1462
+ maxTokens: 65536,
1463
+ },
1413
1464
  "qwen/qwen3-next-80b-a3b-thinking": {
1414
1465
  id: "qwen/qwen3-next-80b-a3b-thinking",
1415
1466
  name: "Qwen: Qwen3 Next 80B A3B Thinking",
@@ -1419,8 +1470,8 @@ export const MODELS = {
1419
1470
  reasoning: true,
1420
1471
  input: ["text"],
1421
1472
  cost: {
1422
- input: 0.09782604,
1423
- output: 0.391304304,
1473
+ input: 0.09999999999999999,
1474
+ output: 0.7999999999999999,
1424
1475
  cacheRead: 0,
1425
1476
  cacheWrite: 0,
1426
1477
  },
@@ -1436,8 +1487,8 @@ export const MODELS = {
1436
1487
  reasoning: false,
1437
1488
  input: ["text"],
1438
1489
  cost: {
1439
- input: 0.09782604,
1440
- output: 0.391304304,
1490
+ input: 0.09999999999999999,
1491
+ output: 0.7999999999999999,
1441
1492
  cacheRead: 0,
1442
1493
  cacheWrite: 0,
1443
1494
  },
@@ -1453,13 +1504,13 @@ export const MODELS = {
1453
1504
  reasoning: false,
1454
1505
  input: ["text"],
1455
1506
  cost: {
1456
- input: 0.15,
1457
- output: 0.75,
1507
+ input: 0.12,
1508
+ output: 0.6,
1458
1509
  cacheRead: 0,
1459
1510
  cacheWrite: 0,
1460
1511
  },
1461
1512
  contextWindow: 131072,
1462
- maxTokens: 131072,
1513
+ maxTokens: 4096,
1463
1514
  },
1464
1515
  "qwen/qwen-plus-2025-07-28": {
1465
1516
  id: "qwen/qwen-plus-2025-07-28",
@@ -1589,8 +1640,8 @@ export const MODELS = {
1589
1640
  reasoning: false,
1590
1641
  input: ["text"],
1591
1642
  cost: {
1592
- input: 0.38043459999999996,
1593
- output: 1.52173896,
1643
+ input: 0.38,
1644
+ output: 1.52,
1594
1645
  cacheRead: 0,
1595
1646
  cacheWrite: 0,
1596
1647
  },
@@ -1640,8 +1691,8 @@ export const MODELS = {
1640
1691
  reasoning: true,
1641
1692
  input: ["text"],
1642
1693
  cost: {
1643
- input: 0.08967387,
1644
- output: 0.358695612,
1694
+ input: 0.08,
1695
+ output: 0.29,
1645
1696
  cacheRead: 0,
1646
1697
  cacheWrite: 0,
1647
1698
  },
@@ -1657,8 +1708,8 @@ export const MODELS = {
1657
1708
  reasoning: true,
1658
1709
  input: ["text"],
1659
1710
  cost: {
1660
- input: 0.127173852,
1661
- output: 0.5086955952000001,
1711
+ input: 0.11,
1712
+ output: 0.38,
1662
1713
  cacheRead: 0,
1663
1714
  cacheWrite: 0,
1664
1715
  },
@@ -1696,7 +1747,7 @@ export const MODELS = {
1696
1747
  cacheRead: 0,
1697
1748
  cacheWrite: 0,
1698
1749
  },
1699
- contextWindow: 32768,
1750
+ contextWindow: 163840,
1700
1751
  maxTokens: 4096,
1701
1752
  },
1702
1753
  "deepseek/deepseek-chat-v3.1": {
@@ -1816,7 +1867,7 @@ export const MODELS = {
1816
1867
  cacheWrite: 0,
1817
1868
  },
1818
1869
  contextWindow: 262144,
1819
- maxTokens: 262144,
1870
+ maxTokens: 4096,
1820
1871
  },
1821
1872
  "qwen/qwen3-30b-a3b-instruct-2507": {
1822
1873
  id: "qwen/qwen3-30b-a3b-instruct-2507",
@@ -1827,8 +1878,8 @@ export const MODELS = {
1827
1878
  reasoning: false,
1828
1879
  input: ["text"],
1829
1880
  cost: {
1830
- input: 0.07065213999999999,
1831
- output: 0.282608664,
1881
+ input: 0.07,
1882
+ output: 0.28,
1832
1883
  cacheRead: 0,
1833
1884
  cacheWrite: 0,
1834
1885
  },
@@ -1844,8 +1895,8 @@ export const MODELS = {
1844
1895
  reasoning: true,
1845
1896
  input: ["text"],
1846
1897
  cost: {
1847
- input: 0.41249980199999997,
1848
- output: 1.6499998152000002,
1898
+ input: 0.41,
1899
+ output: 1.6500000000000001,
1849
1900
  cacheRead: 0,
1850
1901
  cacheWrite: 0,
1851
1902
  },
@@ -1895,8 +1946,8 @@ export const MODELS = {
1895
1946
  reasoning: true,
1896
1947
  input: ["text"],
1897
1948
  cost: {
1898
- input: 0.0974999532,
1899
- output: 0.38999995632,
1949
+ input: 0.09999999999999999,
1950
+ output: 0.39,
1900
1951
  cacheRead: 0,
1901
1952
  cacheWrite: 0,
1902
1953
  },
@@ -1946,8 +1997,8 @@ export const MODELS = {
1946
1997
  reasoning: false,
1947
1998
  input: ["text"],
1948
1999
  cost: {
1949
- input: 0.24999987999999998,
1950
- output: 0.999999888,
2000
+ input: 0.22,
2001
+ output: 0.95,
1951
2002
  cacheRead: 0,
1952
2003
  cacheWrite: 0,
1953
2004
  },
@@ -1963,13 +2014,13 @@ export const MODELS = {
1963
2014
  reasoning: false,
1964
2015
  input: ["text"],
1965
2016
  cost: {
1966
- input: 0.0974999532,
1967
- output: 0.38999995632,
2017
+ input: 0.09999999999999999,
2018
+ output: 0.09999999999999999,
1968
2019
  cacheRead: 0,
1969
2020
  cacheWrite: 0,
1970
2021
  },
1971
2022
  contextWindow: 262144,
1972
- maxTokens: 4096,
2023
+ maxTokens: 262144,
1973
2024
  },
1974
2025
  "moonshotai/kimi-k2:free": {
1975
2026
  id: "moonshotai/kimi-k2:free",
@@ -2082,8 +2133,8 @@ export const MODELS = {
2082
2133
  reasoning: false,
2083
2134
  input: ["text", "image"],
2084
2135
  cost: {
2085
- input: 0.049999999999999996,
2086
- output: 0.09999999999999999,
2136
+ input: 0.075,
2137
+ output: 0.19999999999999998,
2087
2138
  cacheRead: 0,
2088
2139
  cacheWrite: 0,
2089
2140
  },
@@ -2167,8 +2218,8 @@ export const MODELS = {
2167
2218
  reasoning: true,
2168
2219
  input: ["text"],
2169
2220
  cost: {
2170
- input: 0.24999987999999998,
2171
- output: 0.999999888,
2221
+ input: 0.39999999999999997,
2222
+ output: 1.75,
2172
2223
  cacheRead: 0,
2173
2224
  cacheWrite: 0,
2174
2225
  },
@@ -2201,8 +2252,8 @@ export const MODELS = {
2201
2252
  reasoning: false,
2202
2253
  input: ["text"],
2203
2254
  cost: {
2204
- input: 0.035869548,
2205
- output: 0.14347824480000002,
2255
+ input: 0.04,
2256
+ output: 0.14,
2206
2257
  cacheRead: 0,
2207
2258
  cacheWrite: 0,
2208
2259
  },
@@ -2303,8 +2354,8 @@ export const MODELS = {
2303
2354
  reasoning: true,
2304
2355
  input: ["text"],
2305
2356
  cost: {
2306
- input: 0.035869548,
2307
- output: 0.14347824480000002,
2357
+ input: 0.06,
2358
+ output: 0.22,
2308
2359
  cacheRead: 0,
2309
2360
  cacheWrite: 0,
2310
2361
  },
@@ -2337,8 +2388,8 @@ export const MODELS = {
2337
2388
  reasoning: true,
2338
2389
  input: ["text"],
2339
2390
  cost: {
2340
- input: 0.0322825932,
2341
- output: 0.12913042032,
2391
+ input: 0.03,
2392
+ output: 0.13,
2342
2393
  cacheRead: 0,
2343
2394
  cacheWrite: 0,
2344
2395
  },
@@ -2371,8 +2422,8 @@ export const MODELS = {
2371
2422
  reasoning: true,
2372
2423
  input: ["text"],
2373
2424
  cost: {
2374
- input: 0.13,
2375
- output: 0.6,
2425
+ input: 0.18,
2426
+ output: 0.54,
2376
2427
  cacheRead: 0,
2377
2428
  cacheWrite: 0,
2378
2429
  },
@@ -2507,14 +2558,31 @@ export const MODELS = {
2507
2558
  reasoning: false,
2508
2559
  input: ["text", "image"],
2509
2560
  cost: {
2510
- input: 0.03804346,
2511
- output: 0.152173896,
2561
+ input: 0.04,
2562
+ output: 0.15,
2512
2563
  cacheRead: 0,
2513
2564
  cacheWrite: 0,
2514
2565
  },
2515
2566
  contextWindow: 131072,
2516
2567
  maxTokens: 96000,
2517
2568
  },
2569
+ "microsoft/phi-4-multimodal-instruct": {
2570
+ id: "microsoft/phi-4-multimodal-instruct",
2571
+ name: "Microsoft: Phi 4 Multimodal Instruct",
2572
+ api: "openai-completions",
2573
+ provider: "openrouter",
2574
+ baseUrl: "https://openrouter.ai/api/v1",
2575
+ reasoning: false,
2576
+ input: ["text", "image"],
2577
+ cost: {
2578
+ input: 0.049999999999999996,
2579
+ output: 0.09999999999999999,
2580
+ cacheRead: 0,
2581
+ cacheWrite: 0,
2582
+ },
2583
+ contextWindow: 131072,
2584
+ maxTokens: 4096,
2585
+ },
2518
2586
  "qwen/qwq-32b": {
2519
2587
  id: "qwen/qwq-32b",
2520
2588
  name: "Qwen: QwQ 32B",
@@ -2609,8 +2677,8 @@ export const MODELS = {
2609
2677
  reasoning: false,
2610
2678
  input: ["text"],
2611
2679
  cost: {
2612
- input: 0.03804346,
2613
- output: 0.152173896,
2680
+ input: 0.04,
2681
+ output: 0.15,
2614
2682
  cacheRead: 0,
2615
2683
  cacheWrite: 0,
2616
2684
  },
@@ -2626,8 +2694,8 @@ export const MODELS = {
2626
2694
  reasoning: true,
2627
2695
  input: ["text"],
2628
2696
  cost: {
2629
- input: 0.03260868,
2630
- output: 0.130434768,
2697
+ input: 0.03,
2698
+ output: 0.13,
2631
2699
  cacheRead: 0,
2632
2700
  cacheWrite: 0,
2633
2701
  },
@@ -2915,8 +2983,8 @@ export const MODELS = {
2915
2983
  reasoning: false,
2916
2984
  input: ["text"],
2917
2985
  cost: {
2918
- input: 0.06521736,
2919
- output: 0.260869536,
2986
+ input: 0.07,
2987
+ output: 0.26,
2920
2988
  cacheRead: 0,
2921
2989
  cacheWrite: 0,
2922
2990
  },
@@ -3017,12 +3085,12 @@ export const MODELS = {
3017
3085
  reasoning: false,
3018
3086
  input: ["text"],
3019
3087
  cost: {
3020
- input: 0.015,
3021
- output: 0.02,
3088
+ input: 0.02,
3089
+ output: 0.03,
3022
3090
  cacheRead: 0,
3023
3091
  cacheWrite: 0,
3024
3092
  },
3025
- contextWindow: 131072,
3093
+ contextWindow: 16384,
3026
3094
  maxTokens: 16384,
3027
3095
  },
3028
3096
  "meta-llama/llama-3.1-405b-instruct": {
@@ -3068,13 +3136,13 @@ export const MODELS = {
3068
3136
  reasoning: false,
3069
3137
  input: ["text"],
3070
3138
  cost: {
3071
- input: 0.017934774,
3072
- output: 0.07173912240000001,
3139
+ input: 0.02,
3140
+ output: 0.04,
3073
3141
  cacheRead: 0,
3074
3142
  cacheWrite: 0,
3075
3143
  },
3076
3144
  contextWindow: 131072,
3077
- maxTokens: 128000,
3145
+ maxTokens: 16384,
3078
3146
  },
3079
3147
  "mistralai/mistral-7b-instruct:free": {
3080
3148
  id: "mistralai/mistral-7b-instruct:free",