@mariozechner/pi-ai 0.19.0 → 0.19.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3605,23 +3605,6 @@ export const MODELS = {
3605
3605
  contextWindow: 262144,
3606
3606
  maxTokens: 4096,
3607
3607
  },
3608
- "meituan/longcat-flash-chat:free": {
3609
- id: "meituan/longcat-flash-chat:free",
3610
- name: "Meituan: LongCat Flash Chat (free)",
3611
- api: "openai-completions",
3612
- provider: "openrouter",
3613
- baseUrl: "https://openrouter.ai/api/v1",
3614
- reasoning: false,
3615
- input: ["text"],
3616
- cost: {
3617
- input: 0,
3618
- output: 0,
3619
- cacheRead: 0,
3620
- cacheWrite: 0,
3621
- },
3622
- contextWindow: 131072,
3623
- maxTokens: 131072,
3624
- },
3625
3608
  "qwen/qwen-plus-2025-07-28": {
3626
3609
  id: "qwen/qwen-plus-2025-07-28",
3627
3610
  name: "Qwen: Qwen Plus 0728",
@@ -6070,34 +6053,34 @@ export const MODELS = {
6070
6053
  contextWindow: 128000,
6071
6054
  maxTokens: 64000,
6072
6055
  },
6073
- "meta-llama/llama-3-70b-instruct": {
6074
- id: "meta-llama/llama-3-70b-instruct",
6075
- name: "Meta: Llama 3 70B Instruct",
6056
+ "meta-llama/llama-3-8b-instruct": {
6057
+ id: "meta-llama/llama-3-8b-instruct",
6058
+ name: "Meta: Llama 3 8B Instruct",
6076
6059
  api: "openai-completions",
6077
6060
  provider: "openrouter",
6078
6061
  baseUrl: "https://openrouter.ai/api/v1",
6079
6062
  reasoning: false,
6080
6063
  input: ["text"],
6081
6064
  cost: {
6082
- input: 0.3,
6083
- output: 0.39999999999999997,
6065
+ input: 0.03,
6066
+ output: 0.06,
6084
6067
  cacheRead: 0,
6085
6068
  cacheWrite: 0,
6086
6069
  },
6087
6070
  contextWindow: 8192,
6088
6071
  maxTokens: 16384,
6089
6072
  },
6090
- "meta-llama/llama-3-8b-instruct": {
6091
- id: "meta-llama/llama-3-8b-instruct",
6092
- name: "Meta: Llama 3 8B Instruct",
6073
+ "meta-llama/llama-3-70b-instruct": {
6074
+ id: "meta-llama/llama-3-70b-instruct",
6075
+ name: "Meta: Llama 3 70B Instruct",
6093
6076
  api: "openai-completions",
6094
6077
  provider: "openrouter",
6095
6078
  baseUrl: "https://openrouter.ai/api/v1",
6096
6079
  reasoning: false,
6097
6080
  input: ["text"],
6098
6081
  cost: {
6099
- input: 0.03,
6100
- output: 0.06,
6082
+ input: 0.3,
6083
+ output: 0.39999999999999997,
6101
6084
  cacheRead: 0,
6102
6085
  cacheWrite: 0,
6103
6086
  },
@@ -6189,38 +6172,38 @@ export const MODELS = {
6189
6172
  contextWindow: 128000,
6190
6173
  maxTokens: 4096,
6191
6174
  },
6192
- "openai/gpt-3.5-turbo-0613": {
6193
- id: "openai/gpt-3.5-turbo-0613",
6194
- name: "OpenAI: GPT-3.5 Turbo (older v0613)",
6175
+ "openai/gpt-4-turbo-preview": {
6176
+ id: "openai/gpt-4-turbo-preview",
6177
+ name: "OpenAI: GPT-4 Turbo Preview",
6195
6178
  api: "openai-completions",
6196
6179
  provider: "openrouter",
6197
6180
  baseUrl: "https://openrouter.ai/api/v1",
6198
6181
  reasoning: false,
6199
6182
  input: ["text"],
6200
6183
  cost: {
6201
- input: 1,
6202
- output: 2,
6184
+ input: 10,
6185
+ output: 30,
6203
6186
  cacheRead: 0,
6204
6187
  cacheWrite: 0,
6205
6188
  },
6206
- contextWindow: 4095,
6189
+ contextWindow: 128000,
6207
6190
  maxTokens: 4096,
6208
6191
  },
6209
- "openai/gpt-4-turbo-preview": {
6210
- id: "openai/gpt-4-turbo-preview",
6211
- name: "OpenAI: GPT-4 Turbo Preview",
6192
+ "openai/gpt-3.5-turbo-0613": {
6193
+ id: "openai/gpt-3.5-turbo-0613",
6194
+ name: "OpenAI: GPT-3.5 Turbo (older v0613)",
6212
6195
  api: "openai-completions",
6213
6196
  provider: "openrouter",
6214
6197
  baseUrl: "https://openrouter.ai/api/v1",
6215
6198
  reasoning: false,
6216
6199
  input: ["text"],
6217
6200
  cost: {
6218
- input: 10,
6219
- output: 30,
6201
+ input: 1,
6202
+ output: 2,
6220
6203
  cacheRead: 0,
6221
6204
  cacheWrite: 0,
6222
6205
  },
6223
- contextWindow: 128000,
6206
+ contextWindow: 4095,
6224
6207
  maxTokens: 4096,
6225
6208
  },
6226
6209
  "mistralai/mistral-tiny": {
@@ -6308,38 +6291,38 @@ export const MODELS = {
6308
6291
  contextWindow: 8191,
6309
6292
  maxTokens: 4096,
6310
6293
  },
6311
- "openai/gpt-3.5-turbo": {
6312
- id: "openai/gpt-3.5-turbo",
6313
- name: "OpenAI: GPT-3.5 Turbo",
6294
+ "openai/gpt-4-0314": {
6295
+ id: "openai/gpt-4-0314",
6296
+ name: "OpenAI: GPT-4 (older v0314)",
6314
6297
  api: "openai-completions",
6315
6298
  provider: "openrouter",
6316
6299
  baseUrl: "https://openrouter.ai/api/v1",
6317
6300
  reasoning: false,
6318
6301
  input: ["text"],
6319
6302
  cost: {
6320
- input: 0.5,
6321
- output: 1.5,
6303
+ input: 30,
6304
+ output: 60,
6322
6305
  cacheRead: 0,
6323
6306
  cacheWrite: 0,
6324
6307
  },
6325
- contextWindow: 16385,
6308
+ contextWindow: 8191,
6326
6309
  maxTokens: 4096,
6327
6310
  },
6328
- "openai/gpt-4-0314": {
6329
- id: "openai/gpt-4-0314",
6330
- name: "OpenAI: GPT-4 (older v0314)",
6311
+ "openai/gpt-3.5-turbo": {
6312
+ id: "openai/gpt-3.5-turbo",
6313
+ name: "OpenAI: GPT-3.5 Turbo",
6331
6314
  api: "openai-completions",
6332
6315
  provider: "openrouter",
6333
6316
  baseUrl: "https://openrouter.ai/api/v1",
6334
6317
  reasoning: false,
6335
6318
  input: ["text"],
6336
6319
  cost: {
6337
- input: 30,
6338
- output: 60,
6320
+ input: 0.5,
6321
+ output: 1.5,
6339
6322
  cacheRead: 0,
6340
6323
  cacheWrite: 0,
6341
6324
  },
6342
- contextWindow: 8191,
6325
+ contextWindow: 16385,
6343
6326
  maxTokens: 4096,
6344
6327
  },
6345
6328
  "openrouter/auto": {