@mariozechner/pi-ai 0.52.10 → 0.52.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1086,7 +1086,7 @@ const response = await complete(model, {
1086
1086
 
1087
1087
  ### Provider Notes
1088
1088
 
1089
- **OpenAI Codex**: Requires a ChatGPT Plus or Pro subscription. Provides access to GPT-5.x Codex models with extended context windows and reasoning capabilities. The library automatically handles session-based prompt caching when `sessionId` is provided in stream options.
1089
+ **OpenAI Codex**: Requires a ChatGPT Plus or Pro subscription. Provides access to GPT-5.x Codex models with extended context windows and reasoning capabilities. The library automatically handles session-based prompt caching when `sessionId` is provided in stream options. You can set `transport` in stream options to `"sse"`, `"websocket"`, or `"auto"` for Codex Responses transport selection. When using WebSocket with a `sessionId`, connections are reused per session and expire after 5 minutes of inactivity.
1090
1090
 
1091
1091
  **Azure OpenAI (Responses)**: Uses the Responses API only. Set `AZURE_OPENAI_API_KEY` and either `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. Use `AZURE_OPENAI_API_VERSION` (defaults to `v1`) to override the API version if needed. Deployment names are treated as model IDs by default, override with `azureDeploymentName` or `AZURE_OPENAI_DEPLOYMENT_NAME_MAP` using comma-separated `model-id=deployment` pairs (for example `gpt-4o-mini=my-deployment,gpt-4o=prod`). Legacy deployment-based URLs are intentionally unsupported.
1092
1092
 
@@ -1258,6 +1258,40 @@ export declare const MODELS: {
1258
1258
  contextWindow: number;
1259
1259
  maxTokens: number;
1260
1260
  };
1261
+ readonly "writer.palmyra-x4-v1:0": {
1262
+ id: string;
1263
+ name: string;
1264
+ api: "bedrock-converse-stream";
1265
+ provider: string;
1266
+ baseUrl: string;
1267
+ reasoning: true;
1268
+ input: "text"[];
1269
+ cost: {
1270
+ input: number;
1271
+ output: number;
1272
+ cacheRead: number;
1273
+ cacheWrite: number;
1274
+ };
1275
+ contextWindow: number;
1276
+ maxTokens: number;
1277
+ };
1278
+ readonly "writer.palmyra-x5-v1:0": {
1279
+ id: string;
1280
+ name: string;
1281
+ api: "bedrock-converse-stream";
1282
+ provider: string;
1283
+ baseUrl: string;
1284
+ reasoning: true;
1285
+ input: "text"[];
1286
+ cost: {
1287
+ input: number;
1288
+ output: number;
1289
+ cacheRead: number;
1290
+ cacheWrite: number;
1291
+ };
1292
+ contextWindow: number;
1293
+ maxTokens: number;
1294
+ };
1261
1295
  readonly "zai.glm-4.7": {
1262
1296
  id: string;
1263
1297
  name: string;
@@ -4208,6 +4242,23 @@ export declare const MODELS: {
4208
4242
  contextWindow: number;
4209
4243
  maxTokens: number;
4210
4244
  };
4245
+ readonly "MiniMax-M2.5": {
4246
+ id: string;
4247
+ name: string;
4248
+ api: "anthropic-messages";
4249
+ provider: string;
4250
+ baseUrl: string;
4251
+ reasoning: true;
4252
+ input: "text"[];
4253
+ cost: {
4254
+ input: number;
4255
+ output: number;
4256
+ cacheRead: number;
4257
+ cacheWrite: number;
4258
+ };
4259
+ contextWindow: number;
4260
+ maxTokens: number;
4261
+ };
4211
4262
  };
4212
4263
  readonly "minimax-cn": {
4213
4264
  readonly "MiniMax-M2": {
@@ -4244,6 +4295,23 @@ export declare const MODELS: {
4244
4295
  contextWindow: number;
4245
4296
  maxTokens: number;
4246
4297
  };
4298
+ readonly "MiniMax-M2.5": {
4299
+ id: string;
4300
+ name: string;
4301
+ api: "anthropic-messages";
4302
+ provider: string;
4303
+ baseUrl: string;
4304
+ reasoning: true;
4305
+ input: "text"[];
4306
+ cost: {
4307
+ input: number;
4308
+ output: number;
4309
+ cacheRead: number;
4310
+ cacheWrite: number;
4311
+ };
4312
+ contextWindow: number;
4313
+ maxTokens: number;
4314
+ };
4247
4315
  };
4248
4316
  readonly mistral: {
4249
4317
  readonly "codestral-latest": {
@@ -8878,6 +8946,23 @@ export declare const MODELS: {
8878
8946
  contextWindow: number;
8879
8947
  maxTokens: number;
8880
8948
  };
8949
+ readonly "qwen/qwen3-4b": {
8950
+ id: string;
8951
+ name: string;
8952
+ api: "openai-completions";
8953
+ provider: string;
8954
+ baseUrl: string;
8955
+ reasoning: true;
8956
+ input: "text"[];
8957
+ cost: {
8958
+ input: number;
8959
+ output: number;
8960
+ cacheRead: number;
8961
+ cacheWrite: number;
8962
+ };
8963
+ contextWindow: number;
8964
+ maxTokens: number;
8965
+ };
8881
8966
  readonly "qwen/qwen3-4b:free": {
8882
8967
  id: string;
8883
8968
  name: string;
@@ -9405,23 +9490,6 @@ export declare const MODELS: {
9405
9490
  contextWindow: number;
9406
9491
  maxTokens: number;
9407
9492
  };
9408
- readonly "tngtech/tng-r1t-chimera:free": {
9409
- id: string;
9410
- name: string;
9411
- api: "openai-completions";
9412
- provider: string;
9413
- baseUrl: string;
9414
- reasoning: true;
9415
- input: "text"[];
9416
- cost: {
9417
- input: number;
9418
- output: number;
9419
- cacheRead: number;
9420
- cacheWrite: number;
9421
- };
9422
- contextWindow: number;
9423
- maxTokens: number;
9424
- };
9425
9493
  readonly "upstage/solar-pro-3:free": {
9426
9494
  id: string;
9427
9495
  name: string;