@mariozechner/pi-ai 0.9.3 → 0.9.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -1986,23 +1986,6 @@ export declare const MODELS: {
|
|
|
1986
1986
|
contextWindow: number;
|
|
1987
1987
|
maxTokens: number;
|
|
1988
1988
|
};
|
|
1989
|
-
readonly "x-ai/grok-4.1-fast": {
|
|
1990
|
-
id: string;
|
|
1991
|
-
name: string;
|
|
1992
|
-
api: "openai-completions";
|
|
1993
|
-
provider: string;
|
|
1994
|
-
baseUrl: string;
|
|
1995
|
-
reasoning: true;
|
|
1996
|
-
input: ("image" | "text")[];
|
|
1997
|
-
cost: {
|
|
1998
|
-
input: number;
|
|
1999
|
-
output: number;
|
|
2000
|
-
cacheRead: number;
|
|
2001
|
-
cacheWrite: number;
|
|
2002
|
-
};
|
|
2003
|
-
contextWindow: number;
|
|
2004
|
-
maxTokens: number;
|
|
2005
|
-
};
|
|
2006
1989
|
readonly "x-ai/grok-4.1-fast:free": {
|
|
2007
1990
|
id: string;
|
|
2008
1991
|
name: string;
|
|
@@ -4825,7 +4808,7 @@ export declare const MODELS: {
|
|
|
4825
4808
|
contextWindow: number;
|
|
4826
4809
|
maxTokens: number;
|
|
4827
4810
|
};
|
|
4828
|
-
readonly "anthropic/claude-3.5-haiku
|
|
4811
|
+
readonly "anthropic/claude-3.5-haiku": {
|
|
4829
4812
|
id: string;
|
|
4830
4813
|
name: string;
|
|
4831
4814
|
api: "openai-completions";
|
|
@@ -4842,7 +4825,7 @@ export declare const MODELS: {
|
|
|
4842
4825
|
contextWindow: number;
|
|
4843
4826
|
maxTokens: number;
|
|
4844
4827
|
};
|
|
4845
|
-
readonly "anthropic/claude-3.5-haiku": {
|
|
4828
|
+
readonly "anthropic/claude-3.5-haiku-20241022": {
|
|
4846
4829
|
id: string;
|
|
4847
4830
|
name: string;
|
|
4848
4831
|
api: "openai-completions";
|
|
@@ -4910,23 +4893,6 @@ export declare const MODELS: {
|
|
|
4910
4893
|
contextWindow: number;
|
|
4911
4894
|
maxTokens: number;
|
|
4912
4895
|
};
|
|
4913
|
-
readonly "qwen/qwen-2.5-7b-instruct": {
|
|
4914
|
-
id: string;
|
|
4915
|
-
name: string;
|
|
4916
|
-
api: "openai-completions";
|
|
4917
|
-
provider: string;
|
|
4918
|
-
baseUrl: string;
|
|
4919
|
-
reasoning: false;
|
|
4920
|
-
input: "text"[];
|
|
4921
|
-
cost: {
|
|
4922
|
-
input: number;
|
|
4923
|
-
output: number;
|
|
4924
|
-
cacheRead: number;
|
|
4925
|
-
cacheWrite: number;
|
|
4926
|
-
};
|
|
4927
|
-
contextWindow: number;
|
|
4928
|
-
maxTokens: number;
|
|
4929
|
-
};
|
|
4930
4896
|
readonly "nvidia/llama-3.1-nemotron-70b-instruct": {
|
|
4931
4897
|
id: string;
|
|
4932
4898
|
name: string;
|
|
@@ -5012,7 +4978,7 @@ export declare const MODELS: {
|
|
|
5012
4978
|
contextWindow: number;
|
|
5013
4979
|
maxTokens: number;
|
|
5014
4980
|
};
|
|
5015
|
-
readonly "cohere/command-r-
|
|
4981
|
+
readonly "cohere/command-r-08-2024": {
|
|
5016
4982
|
id: string;
|
|
5017
4983
|
name: string;
|
|
5018
4984
|
api: "openai-completions";
|
|
@@ -5029,7 +4995,7 @@ export declare const MODELS: {
|
|
|
5029
4995
|
contextWindow: number;
|
|
5030
4996
|
maxTokens: number;
|
|
5031
4997
|
};
|
|
5032
|
-
readonly "cohere/command-r-08-2024": {
|
|
4998
|
+
readonly "cohere/command-r-plus-08-2024": {
|
|
5033
4999
|
id: string;
|
|
5034
5000
|
name: string;
|
|
5035
5001
|
api: "openai-completions";
|
|
@@ -5114,7 +5080,7 @@ export declare const MODELS: {
|
|
|
5114
5080
|
contextWindow: number;
|
|
5115
5081
|
maxTokens: number;
|
|
5116
5082
|
};
|
|
5117
|
-
readonly "meta-llama/llama-3.1-
|
|
5083
|
+
readonly "meta-llama/llama-3.1-8b-instruct": {
|
|
5118
5084
|
id: string;
|
|
5119
5085
|
name: string;
|
|
5120
5086
|
api: "openai-completions";
|
|
@@ -5131,7 +5097,7 @@ export declare const MODELS: {
|
|
|
5131
5097
|
contextWindow: number;
|
|
5132
5098
|
maxTokens: number;
|
|
5133
5099
|
};
|
|
5134
|
-
readonly "meta-llama/llama-3.1-
|
|
5100
|
+
readonly "meta-llama/llama-3.1-405b-instruct": {
|
|
5135
5101
|
id: string;
|
|
5136
5102
|
name: string;
|
|
5137
5103
|
api: "openai-completions";
|
|
@@ -5148,7 +5114,7 @@ export declare const MODELS: {
|
|
|
5148
5114
|
contextWindow: number;
|
|
5149
5115
|
maxTokens: number;
|
|
5150
5116
|
};
|
|
5151
|
-
readonly "meta-llama/llama-3.1-
|
|
5117
|
+
readonly "meta-llama/llama-3.1-70b-instruct": {
|
|
5152
5118
|
id: string;
|
|
5153
5119
|
name: string;
|
|
5154
5120
|
api: "openai-completions";
|
|
@@ -5182,7 +5148,7 @@ export declare const MODELS: {
|
|
|
5182
5148
|
contextWindow: number;
|
|
5183
5149
|
maxTokens: number;
|
|
5184
5150
|
};
|
|
5185
|
-
readonly "openai/gpt-4o-mini
|
|
5151
|
+
readonly "openai/gpt-4o-mini": {
|
|
5186
5152
|
id: string;
|
|
5187
5153
|
name: string;
|
|
5188
5154
|
api: "openai-completions";
|
|
@@ -5199,7 +5165,7 @@ export declare const MODELS: {
|
|
|
5199
5165
|
contextWindow: number;
|
|
5200
5166
|
maxTokens: number;
|
|
5201
5167
|
};
|
|
5202
|
-
readonly "openai/gpt-4o-mini": {
|
|
5168
|
+
readonly "openai/gpt-4o-mini-2024-07-18": {
|
|
5203
5169
|
id: string;
|
|
5204
5170
|
name: string;
|
|
5205
5171
|
api: "openai-completions";
|
|
@@ -5301,7 +5267,7 @@ export declare const MODELS: {
|
|
|
5301
5267
|
contextWindow: number;
|
|
5302
5268
|
maxTokens: number;
|
|
5303
5269
|
};
|
|
5304
|
-
readonly "openai/gpt-4o
|
|
5270
|
+
readonly "openai/gpt-4o": {
|
|
5305
5271
|
id: string;
|
|
5306
5272
|
name: string;
|
|
5307
5273
|
api: "openai-completions";
|
|
@@ -5318,7 +5284,7 @@ export declare const MODELS: {
|
|
|
5318
5284
|
contextWindow: number;
|
|
5319
5285
|
maxTokens: number;
|
|
5320
5286
|
};
|
|
5321
|
-
readonly "openai/gpt-4o": {
|
|
5287
|
+
readonly "openai/gpt-4o:extended": {
|
|
5322
5288
|
id: string;
|
|
5323
5289
|
name: string;
|
|
5324
5290
|
api: "openai-completions";
|
|
@@ -5335,7 +5301,7 @@ export declare const MODELS: {
|
|
|
5335
5301
|
contextWindow: number;
|
|
5336
5302
|
maxTokens: number;
|
|
5337
5303
|
};
|
|
5338
|
-
readonly "openai/gpt-4o
|
|
5304
|
+
readonly "openai/gpt-4o-2024-05-13": {
|
|
5339
5305
|
id: string;
|
|
5340
5306
|
name: string;
|
|
5341
5307
|
api: "openai-completions";
|
|
@@ -5352,7 +5318,7 @@ export declare const MODELS: {
|
|
|
5352
5318
|
contextWindow: number;
|
|
5353
5319
|
maxTokens: number;
|
|
5354
5320
|
};
|
|
5355
|
-
readonly "meta-llama/llama-3-
|
|
5321
|
+
readonly "meta-llama/llama-3-8b-instruct": {
|
|
5356
5322
|
id: string;
|
|
5357
5323
|
name: string;
|
|
5358
5324
|
api: "openai-completions";
|
|
@@ -5369,7 +5335,7 @@ export declare const MODELS: {
|
|
|
5369
5335
|
contextWindow: number;
|
|
5370
5336
|
maxTokens: number;
|
|
5371
5337
|
};
|
|
5372
|
-
readonly "meta-llama/llama-3-
|
|
5338
|
+
readonly "meta-llama/llama-3-70b-instruct": {
|
|
5373
5339
|
id: string;
|
|
5374
5340
|
name: string;
|
|
5375
5341
|
api: "openai-completions";
|
|
@@ -5471,7 +5437,7 @@ export declare const MODELS: {
|
|
|
5471
5437
|
contextWindow: number;
|
|
5472
5438
|
maxTokens: number;
|
|
5473
5439
|
};
|
|
5474
|
-
readonly "openai/gpt-
|
|
5440
|
+
readonly "openai/gpt-4-turbo-preview": {
|
|
5475
5441
|
id: string;
|
|
5476
5442
|
name: string;
|
|
5477
5443
|
api: "openai-completions";
|
|
@@ -5488,7 +5454,7 @@ export declare const MODELS: {
|
|
|
5488
5454
|
contextWindow: number;
|
|
5489
5455
|
maxTokens: number;
|
|
5490
5456
|
};
|
|
5491
|
-
readonly "openai/gpt-
|
|
5457
|
+
readonly "openai/gpt-3.5-turbo-0613": {
|
|
5492
5458
|
id: string;
|
|
5493
5459
|
name: string;
|
|
5494
5460
|
api: "openai-completions";
|
|
@@ -5590,7 +5556,7 @@ export declare const MODELS: {
|
|
|
5590
5556
|
contextWindow: number;
|
|
5591
5557
|
maxTokens: number;
|
|
5592
5558
|
};
|
|
5593
|
-
readonly "openai/gpt-
|
|
5559
|
+
readonly "openai/gpt-4-0314": {
|
|
5594
5560
|
id: string;
|
|
5595
5561
|
name: string;
|
|
5596
5562
|
api: "openai-completions";
|
|
@@ -5607,7 +5573,7 @@ export declare const MODELS: {
|
|
|
5607
5573
|
contextWindow: number;
|
|
5608
5574
|
maxTokens: number;
|
|
5609
5575
|
};
|
|
5610
|
-
readonly "openai/gpt-4
|
|
5576
|
+
readonly "openai/gpt-4": {
|
|
5611
5577
|
id: string;
|
|
5612
5578
|
name: string;
|
|
5613
5579
|
api: "openai-completions";
|
|
@@ -5624,7 +5590,7 @@ export declare const MODELS: {
|
|
|
5624
5590
|
contextWindow: number;
|
|
5625
5591
|
maxTokens: number;
|
|
5626
5592
|
};
|
|
5627
|
-
readonly "openai/gpt-
|
|
5593
|
+
readonly "openai/gpt-3.5-turbo": {
|
|
5628
5594
|
id: string;
|
|
5629
5595
|
name: string;
|
|
5630
5596
|
api: "openai-completions";
|