@mariozechner/pi-ai 0.9.2 → 0.9.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -104,6 +104,23 @@ export const MODELS = {
104
104
  contextWindow: 200000,
105
105
  maxTokens: 8192,
106
106
  },
107
+ "claude-opus-4-5": {
108
+ id: "claude-opus-4-5",
109
+ name: "Claude Opus 4.5",
110
+ api: "anthropic-messages",
111
+ provider: "anthropic",
112
+ baseUrl: "https://api.anthropic.com",
113
+ reasoning: true,
114
+ input: ["text", "image"],
115
+ cost: {
116
+ input: 5,
117
+ output: 25,
118
+ cacheRead: 1.5,
119
+ cacheWrite: 18.75,
120
+ },
121
+ contextWindow: 200000,
122
+ maxTokens: 64000,
123
+ },
107
124
  "claude-3-opus-20240229": {
108
125
  id: "claude-3-opus-20240229",
109
126
  name: "Claude Opus 3",
@@ -1937,6 +1954,23 @@ export const MODELS = {
1937
1954
  },
1938
1955
  },
1939
1956
  openrouter: {
1957
+ "anthropic/claude-opus-4.5": {
1958
+ id: "anthropic/claude-opus-4.5",
1959
+ name: "Anthropic: Claude Opus 4.5",
1960
+ api: "openai-completions",
1961
+ provider: "openrouter",
1962
+ baseUrl: "https://openrouter.ai/api/v1",
1963
+ reasoning: true,
1964
+ input: ["text", "image"],
1965
+ cost: {
1966
+ input: 5,
1967
+ output: 25,
1968
+ cacheRead: 0.5,
1969
+ cacheWrite: 6.25,
1970
+ },
1971
+ contextWindow: 200000,
1972
+ maxTokens: 32000,
1973
+ },
1940
1974
  "allenai/olmo-3-7b-instruct": {
1941
1975
  id: "allenai/olmo-3-7b-instruct",
1942
1976
  name: "AllenAI: Olmo 3 7B Instruct",
@@ -4980,34 +5014,34 @@ export const MODELS = {
4980
5014
  contextWindow: 32768,
4981
5015
  maxTokens: 4096,
4982
5016
  },
4983
- "cohere/command-r-08-2024": {
4984
- id: "cohere/command-r-08-2024",
4985
- name: "Cohere: Command R (08-2024)",
5017
+ "cohere/command-r-plus-08-2024": {
5018
+ id: "cohere/command-r-plus-08-2024",
5019
+ name: "Cohere: Command R+ (08-2024)",
4986
5020
  api: "openai-completions",
4987
5021
  provider: "openrouter",
4988
5022
  baseUrl: "https://openrouter.ai/api/v1",
4989
5023
  reasoning: false,
4990
5024
  input: ["text"],
4991
5025
  cost: {
4992
- input: 0.15,
4993
- output: 0.6,
5026
+ input: 2.5,
5027
+ output: 10,
4994
5028
  cacheRead: 0,
4995
5029
  cacheWrite: 0,
4996
5030
  },
4997
5031
  contextWindow: 128000,
4998
5032
  maxTokens: 4000,
4999
5033
  },
5000
- "cohere/command-r-plus-08-2024": {
5001
- id: "cohere/command-r-plus-08-2024",
5002
- name: "Cohere: Command R+ (08-2024)",
5034
+ "cohere/command-r-08-2024": {
5035
+ id: "cohere/command-r-08-2024",
5036
+ name: "Cohere: Command R (08-2024)",
5003
5037
  api: "openai-completions",
5004
5038
  provider: "openrouter",
5005
5039
  baseUrl: "https://openrouter.ai/api/v1",
5006
5040
  reasoning: false,
5007
5041
  input: ["text"],
5008
5042
  cost: {
5009
- input: 2.5,
5010
- output: 10,
5043
+ input: 0.15,
5044
+ output: 0.6,
5011
5045
  cacheRead: 0,
5012
5046
  cacheWrite: 0,
5013
5047
  },
@@ -5082,6 +5116,23 @@ export const MODELS = {
5082
5116
  contextWindow: 128000,
5083
5117
  maxTokens: 16384,
5084
5118
  },
5119
+ "meta-llama/llama-3.1-405b-instruct": {
5120
+ id: "meta-llama/llama-3.1-405b-instruct",
5121
+ name: "Meta: Llama 3.1 405B Instruct",
5122
+ api: "openai-completions",
5123
+ provider: "openrouter",
5124
+ baseUrl: "https://openrouter.ai/api/v1",
5125
+ reasoning: false,
5126
+ input: ["text"],
5127
+ cost: {
5128
+ input: 3.5,
5129
+ output: 3.5,
5130
+ cacheRead: 0,
5131
+ cacheWrite: 0,
5132
+ },
5133
+ contextWindow: 130815,
5134
+ maxTokens: 4096,
5135
+ },
5085
5136
  "meta-llama/llama-3.1-70b-instruct": {
5086
5137
  id: "meta-llama/llama-3.1-70b-instruct",
5087
5138
  name: "Meta: Llama 3.1 70B Instruct",
@@ -5116,23 +5167,6 @@ export const MODELS = {
5116
5167
  contextWindow: 131072,
5117
5168
  maxTokens: 16384,
5118
5169
  },
5119
- "meta-llama/llama-3.1-405b-instruct": {
5120
- id: "meta-llama/llama-3.1-405b-instruct",
5121
- name: "Meta: Llama 3.1 405B Instruct",
5122
- api: "openai-completions",
5123
- provider: "openrouter",
5124
- baseUrl: "https://openrouter.ai/api/v1",
5125
- reasoning: false,
5126
- input: ["text"],
5127
- cost: {
5128
- input: 3.5,
5129
- output: 3.5,
5130
- cacheRead: 0,
5131
- cacheWrite: 0,
5132
- },
5133
- contextWindow: 130815,
5134
- maxTokens: 4096,
5135
- },
5136
5170
  "mistralai/mistral-nemo": {
5137
5171
  id: "mistralai/mistral-nemo",
5138
5172
  name: "Mistral: Mistral Nemo",
@@ -5439,38 +5473,38 @@ export const MODELS = {
5439
5473
  contextWindow: 128000,
5440
5474
  maxTokens: 4096,
5441
5475
  },
5442
- "openai/gpt-4-turbo-preview": {
5443
- id: "openai/gpt-4-turbo-preview",
5444
- name: "OpenAI: GPT-4 Turbo Preview",
5476
+ "openai/gpt-3.5-turbo-0613": {
5477
+ id: "openai/gpt-3.5-turbo-0613",
5478
+ name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5445
5479
  api: "openai-completions",
5446
5480
  provider: "openrouter",
5447
5481
  baseUrl: "https://openrouter.ai/api/v1",
5448
5482
  reasoning: false,
5449
5483
  input: ["text"],
5450
5484
  cost: {
5451
- input: 10,
5452
- output: 30,
5485
+ input: 1,
5486
+ output: 2,
5453
5487
  cacheRead: 0,
5454
5488
  cacheWrite: 0,
5455
5489
  },
5456
- contextWindow: 128000,
5490
+ contextWindow: 4095,
5457
5491
  maxTokens: 4096,
5458
5492
  },
5459
- "openai/gpt-3.5-turbo-0613": {
5460
- id: "openai/gpt-3.5-turbo-0613",
5461
- name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5493
+ "openai/gpt-4-turbo-preview": {
5494
+ id: "openai/gpt-4-turbo-preview",
5495
+ name: "OpenAI: GPT-4 Turbo Preview",
5462
5496
  api: "openai-completions",
5463
5497
  provider: "openrouter",
5464
5498
  baseUrl: "https://openrouter.ai/api/v1",
5465
5499
  reasoning: false,
5466
5500
  input: ["text"],
5467
5501
  cost: {
5468
- input: 1,
5469
- output: 2,
5502
+ input: 10,
5503
+ output: 30,
5470
5504
  cacheRead: 0,
5471
5505
  cacheWrite: 0,
5472
5506
  },
5473
- contextWindow: 4095,
5507
+ contextWindow: 128000,
5474
5508
  maxTokens: 4096,
5475
5509
  },
5476
5510
  "mistralai/mistral-small": {
@@ -5558,26 +5592,26 @@ export const MODELS = {
5558
5592
  contextWindow: 16385,
5559
5593
  maxTokens: 4096,
5560
5594
  },
5561
- "openai/gpt-4-0314": {
5562
- id: "openai/gpt-4-0314",
5563
- name: "OpenAI: GPT-4 (older v0314)",
5595
+ "openai/gpt-3.5-turbo": {
5596
+ id: "openai/gpt-3.5-turbo",
5597
+ name: "OpenAI: GPT-3.5 Turbo",
5564
5598
  api: "openai-completions",
5565
5599
  provider: "openrouter",
5566
5600
  baseUrl: "https://openrouter.ai/api/v1",
5567
5601
  reasoning: false,
5568
5602
  input: ["text"],
5569
5603
  cost: {
5570
- input: 30,
5571
- output: 60,
5604
+ input: 0.5,
5605
+ output: 1.5,
5572
5606
  cacheRead: 0,
5573
5607
  cacheWrite: 0,
5574
5608
  },
5575
- contextWindow: 8191,
5609
+ contextWindow: 16385,
5576
5610
  maxTokens: 4096,
5577
5611
  },
5578
- "openai/gpt-4": {
5579
- id: "openai/gpt-4",
5580
- name: "OpenAI: GPT-4",
5612
+ "openai/gpt-4-0314": {
5613
+ id: "openai/gpt-4-0314",
5614
+ name: "OpenAI: GPT-4 (older v0314)",
5581
5615
  api: "openai-completions",
5582
5616
  provider: "openrouter",
5583
5617
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5592,21 +5626,21 @@ export const MODELS = {
5592
5626
  contextWindow: 8191,
5593
5627
  maxTokens: 4096,
5594
5628
  },
5595
- "openai/gpt-3.5-turbo": {
5596
- id: "openai/gpt-3.5-turbo",
5597
- name: "OpenAI: GPT-3.5 Turbo",
5629
+ "openai/gpt-4": {
5630
+ id: "openai/gpt-4",
5631
+ name: "OpenAI: GPT-4",
5598
5632
  api: "openai-completions",
5599
5633
  provider: "openrouter",
5600
5634
  baseUrl: "https://openrouter.ai/api/v1",
5601
5635
  reasoning: false,
5602
5636
  input: ["text"],
5603
5637
  cost: {
5604
- input: 0.5,
5605
- output: 1.5,
5638
+ input: 30,
5639
+ output: 60,
5606
5640
  cacheRead: 0,
5607
5641
  cacheWrite: 0,
5608
5642
  },
5609
- contextWindow: 16385,
5643
+ contextWindow: 8191,
5610
5644
  maxTokens: 4096,
5611
5645
  },
5612
5646
  "openrouter/auto": {