@mariozechner/pi-ai 0.5.47 → 0.5.48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3805,7 +3805,7 @@ export declare const MODELS: {
3805
3805
  contextWindow: number;
3806
3806
  maxTokens: number;
3807
3807
  };
3808
- readonly "cohere/command-r-08-2024": {
3808
+ readonly "cohere/command-r-plus-08-2024": {
3809
3809
  id: string;
3810
3810
  name: string;
3811
3811
  api: "openai-completions";
@@ -3822,7 +3822,7 @@ export declare const MODELS: {
3822
3822
  contextWindow: number;
3823
3823
  maxTokens: number;
3824
3824
  };
3825
- readonly "cohere/command-r-plus-08-2024": {
3825
+ readonly "cohere/command-r-08-2024": {
3826
3826
  id: string;
3827
3827
  name: string;
3828
3828
  api: "openai-completions";
@@ -3907,7 +3907,7 @@ export declare const MODELS: {
3907
3907
  contextWindow: number;
3908
3908
  maxTokens: number;
3909
3909
  };
3910
- readonly "meta-llama/llama-3.1-70b-instruct": {
3910
+ readonly "meta-llama/llama-3.1-405b-instruct": {
3911
3911
  id: string;
3912
3912
  name: string;
3913
3913
  api: "openai-completions";
@@ -3924,7 +3924,7 @@ export declare const MODELS: {
3924
3924
  contextWindow: number;
3925
3925
  maxTokens: number;
3926
3926
  };
3927
- readonly "meta-llama/llama-3.1-405b-instruct": {
3927
+ readonly "meta-llama/llama-3.1-70b-instruct": {
3928
3928
  id: string;
3929
3929
  name: string;
3930
3930
  api: "openai-completions";
@@ -4060,7 +4060,7 @@ export declare const MODELS: {
4060
4060
  contextWindow: number;
4061
4061
  maxTokens: number;
4062
4062
  };
4063
- readonly "meta-llama/llama-3-70b-instruct": {
4063
+ readonly "meta-llama/llama-3-8b-instruct": {
4064
4064
  id: string;
4065
4065
  name: string;
4066
4066
  api: "openai-completions";
@@ -4077,7 +4077,7 @@ export declare const MODELS: {
4077
4077
  contextWindow: number;
4078
4078
  maxTokens: number;
4079
4079
  };
4080
- readonly "meta-llama/llama-3-8b-instruct": {
4080
+ readonly "meta-llama/llama-3-70b-instruct": {
4081
4081
  id: string;
4082
4082
  name: string;
4083
4083
  api: "openai-completions";
@@ -4128,7 +4128,7 @@ export declare const MODELS: {
4128
4128
  contextWindow: number;
4129
4129
  maxTokens: number;
4130
4130
  };
4131
- readonly "mistralai/mistral-tiny": {
4131
+ readonly "mistralai/mistral-small": {
4132
4132
  id: string;
4133
4133
  name: string;
4134
4134
  api: "openai-completions";
@@ -4145,7 +4145,7 @@ export declare const MODELS: {
4145
4145
  contextWindow: number;
4146
4146
  maxTokens: number;
4147
4147
  };
4148
- readonly "mistralai/mistral-small": {
4148
+ readonly "mistralai/mistral-tiny": {
4149
4149
  id: string;
4150
4150
  name: string;
4151
4151
  api: "openai-completions";
@@ -1912,8 +1912,8 @@ export const MODELS = {
1912
1912
  reasoning: false,
1913
1913
  input: ["text"],
1914
1914
  cost: {
1915
- input: 0.39999999999999997,
1916
- output: 2,
1915
+ input: 0.5700000000000001,
1916
+ output: 2.2800000000000002,
1917
1917
  cacheRead: 0,
1918
1918
  cacheWrite: 0,
1919
1919
  },
@@ -1980,8 +1980,8 @@ export const MODELS = {
1980
1980
  reasoning: true,
1981
1981
  input: ["text"],
1982
1982
  cost: {
1983
- input: 0.5,
1984
- output: 1.75,
1983
+ input: 0.44999999999999996,
1984
+ output: 2.0999999999999996,
1985
1985
  cacheRead: 0,
1986
1986
  cacheWrite: 0,
1987
1987
  },
@@ -3550,7 +3550,7 @@ export const MODELS = {
3550
3550
  cacheWrite: 0,
3551
3551
  },
3552
3552
  contextWindow: 131072,
3553
- maxTokens: 2048,
3553
+ maxTokens: 4096,
3554
3554
  },
3555
3555
  "meta-llama/llama-3.3-70b-instruct": {
3556
3556
  id: "meta-llama/llama-3.3-70b-instruct",
@@ -3807,34 +3807,34 @@ export const MODELS = {
3807
3807
  contextWindow: 32768,
3808
3808
  maxTokens: 4096,
3809
3809
  },
3810
- "cohere/command-r-08-2024": {
3811
- id: "cohere/command-r-08-2024",
3812
- name: "Cohere: Command R (08-2024)",
3810
+ "cohere/command-r-plus-08-2024": {
3811
+ id: "cohere/command-r-plus-08-2024",
3812
+ name: "Cohere: Command R+ (08-2024)",
3813
3813
  api: "openai-completions",
3814
3814
  provider: "openrouter",
3815
3815
  baseUrl: "https://openrouter.ai/api/v1",
3816
3816
  reasoning: false,
3817
3817
  input: ["text"],
3818
3818
  cost: {
3819
- input: 0.15,
3820
- output: 0.6,
3819
+ input: 2.5,
3820
+ output: 10,
3821
3821
  cacheRead: 0,
3822
3822
  cacheWrite: 0,
3823
3823
  },
3824
3824
  contextWindow: 128000,
3825
3825
  maxTokens: 4000,
3826
3826
  },
3827
- "cohere/command-r-plus-08-2024": {
3828
- id: "cohere/command-r-plus-08-2024",
3829
- name: "Cohere: Command R+ (08-2024)",
3827
+ "cohere/command-r-08-2024": {
3828
+ id: "cohere/command-r-08-2024",
3829
+ name: "Cohere: Command R (08-2024)",
3830
3830
  api: "openai-completions",
3831
3831
  provider: "openrouter",
3832
3832
  baseUrl: "https://openrouter.ai/api/v1",
3833
3833
  reasoning: false,
3834
3834
  input: ["text"],
3835
3835
  cost: {
3836
- input: 2.5,
3837
- output: 10,
3836
+ input: 0.15,
3837
+ output: 0.6,
3838
3838
  cacheRead: 0,
3839
3839
  cacheWrite: 0,
3840
3840
  },
@@ -3909,39 +3909,39 @@ export const MODELS = {
3909
3909
  contextWindow: 16384,
3910
3910
  maxTokens: 16384,
3911
3911
  },
3912
- "meta-llama/llama-3.1-70b-instruct": {
3913
- id: "meta-llama/llama-3.1-70b-instruct",
3914
- name: "Meta: Llama 3.1 70B Instruct",
3912
+ "meta-llama/llama-3.1-405b-instruct": {
3913
+ id: "meta-llama/llama-3.1-405b-instruct",
3914
+ name: "Meta: Llama 3.1 405B Instruct",
3915
3915
  api: "openai-completions",
3916
3916
  provider: "openrouter",
3917
3917
  baseUrl: "https://openrouter.ai/api/v1",
3918
3918
  reasoning: false,
3919
3919
  input: ["text"],
3920
3920
  cost: {
3921
- input: 0.39999999999999997,
3922
- output: 0.39999999999999997,
3921
+ input: 0.7999999999999999,
3922
+ output: 0.7999999999999999,
3923
3923
  cacheRead: 0,
3924
3924
  cacheWrite: 0,
3925
3925
  },
3926
- contextWindow: 131072,
3927
- maxTokens: 4096,
3926
+ contextWindow: 32768,
3927
+ maxTokens: 16384,
3928
3928
  },
3929
- "meta-llama/llama-3.1-405b-instruct": {
3930
- id: "meta-llama/llama-3.1-405b-instruct",
3931
- name: "Meta: Llama 3.1 405B Instruct",
3929
+ "meta-llama/llama-3.1-70b-instruct": {
3930
+ id: "meta-llama/llama-3.1-70b-instruct",
3931
+ name: "Meta: Llama 3.1 70B Instruct",
3932
3932
  api: "openai-completions",
3933
3933
  provider: "openrouter",
3934
3934
  baseUrl: "https://openrouter.ai/api/v1",
3935
3935
  reasoning: false,
3936
3936
  input: ["text"],
3937
3937
  cost: {
3938
- input: 0.7999999999999999,
3939
- output: 0.7999999999999999,
3938
+ input: 0.39999999999999997,
3939
+ output: 0.39999999999999997,
3940
3940
  cacheRead: 0,
3941
3941
  cacheWrite: 0,
3942
3942
  },
3943
- contextWindow: 32768,
3944
- maxTokens: 16384,
3943
+ contextWindow: 131072,
3944
+ maxTokens: 4096,
3945
3945
  },
3946
3946
  "mistralai/mistral-nemo": {
3947
3947
  id: "mistralai/mistral-nemo",
@@ -4062,34 +4062,34 @@ export const MODELS = {
4062
4062
  contextWindow: 128000,
4063
4063
  maxTokens: 4096,
4064
4064
  },
4065
- "meta-llama/llama-3-70b-instruct": {
4066
- id: "meta-llama/llama-3-70b-instruct",
4067
- name: "Meta: Llama 3 70B Instruct",
4065
+ "meta-llama/llama-3-8b-instruct": {
4066
+ id: "meta-llama/llama-3-8b-instruct",
4067
+ name: "Meta: Llama 3 8B Instruct",
4068
4068
  api: "openai-completions",
4069
4069
  provider: "openrouter",
4070
4070
  baseUrl: "https://openrouter.ai/api/v1",
4071
4071
  reasoning: false,
4072
4072
  input: ["text"],
4073
4073
  cost: {
4074
- input: 0.3,
4075
- output: 0.39999999999999997,
4074
+ input: 0.03,
4075
+ output: 0.06,
4076
4076
  cacheRead: 0,
4077
4077
  cacheWrite: 0,
4078
4078
  },
4079
4079
  contextWindow: 8192,
4080
4080
  maxTokens: 16384,
4081
4081
  },
4082
- "meta-llama/llama-3-8b-instruct": {
4083
- id: "meta-llama/llama-3-8b-instruct",
4084
- name: "Meta: Llama 3 8B Instruct",
4082
+ "meta-llama/llama-3-70b-instruct": {
4083
+ id: "meta-llama/llama-3-70b-instruct",
4084
+ name: "Meta: Llama 3 70B Instruct",
4085
4085
  api: "openai-completions",
4086
4086
  provider: "openrouter",
4087
4087
  baseUrl: "https://openrouter.ai/api/v1",
4088
4088
  reasoning: false,
4089
4089
  input: ["text"],
4090
4090
  cost: {
4091
- input: 0.03,
4092
- output: 0.06,
4091
+ input: 0.3,
4092
+ output: 0.39999999999999997,
4093
4093
  cacheRead: 0,
4094
4094
  cacheWrite: 0,
4095
4095
  },
@@ -4130,34 +4130,34 @@ export const MODELS = {
4130
4130
  contextWindow: 128000,
4131
4131
  maxTokens: 4096,
4132
4132
  },
4133
- "mistralai/mistral-tiny": {
4134
- id: "mistralai/mistral-tiny",
4135
- name: "Mistral Tiny",
4133
+ "mistralai/mistral-small": {
4134
+ id: "mistralai/mistral-small",
4135
+ name: "Mistral Small",
4136
4136
  api: "openai-completions",
4137
4137
  provider: "openrouter",
4138
4138
  baseUrl: "https://openrouter.ai/api/v1",
4139
4139
  reasoning: false,
4140
4140
  input: ["text"],
4141
4141
  cost: {
4142
- input: 0.25,
4143
- output: 0.25,
4142
+ input: 0.19999999999999998,
4143
+ output: 0.6,
4144
4144
  cacheRead: 0,
4145
4145
  cacheWrite: 0,
4146
4146
  },
4147
4147
  contextWindow: 32768,
4148
4148
  maxTokens: 4096,
4149
4149
  },
4150
- "mistralai/mistral-small": {
4151
- id: "mistralai/mistral-small",
4152
- name: "Mistral Small",
4150
+ "mistralai/mistral-tiny": {
4151
+ id: "mistralai/mistral-tiny",
4152
+ name: "Mistral Tiny",
4153
4153
  api: "openai-completions",
4154
4154
  provider: "openrouter",
4155
4155
  baseUrl: "https://openrouter.ai/api/v1",
4156
4156
  reasoning: false,
4157
4157
  input: ["text"],
4158
4158
  cost: {
4159
- input: 0.19999999999999998,
4160
- output: 0.6,
4159
+ input: 0.25,
4160
+ output: 0.25,
4161
4161
  cacheRead: 0,
4162
4162
  cacheWrite: 0,
4163
4163
  },