tokencostauto 0.1.402__tar.gz → 0.1.406__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tokencostauto-0.1.402/tokencostauto.egg-info → tokencostauto-0.1.406}/PKG-INFO +1 -1
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/pyproject.toml +1 -1
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto/model_prices.json +468 -4
- {tokencostauto-0.1.402 → tokencostauto-0.1.406/tokencostauto.egg-info}/PKG-INFO +1 -1
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/LICENSE +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/MANIFEST.in +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/README.md +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/setup.cfg +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tests/test_costs.py +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto/__init__.py +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto/constants.py +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto/costs.py +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto.egg-info/SOURCES.txt +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto.egg-info/dependency_links.txt +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto.egg-info/requires.txt +0 -0
- {tokencostauto-0.1.402 → tokencostauto-0.1.406}/tokencostauto.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.406
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
@@ -9781,8 +9781,8 @@
|
|
|
9781
9781
|
"input_cost_per_token": 3e-06,
|
|
9782
9782
|
"litellm_provider": "anthropic",
|
|
9783
9783
|
"max_input_tokens": 200000,
|
|
9784
|
-
"max_output_tokens":
|
|
9785
|
-
"max_tokens":
|
|
9784
|
+
"max_output_tokens": 64000,
|
|
9785
|
+
"max_tokens": 64000,
|
|
9786
9786
|
"mode": "chat",
|
|
9787
9787
|
"output_cost_per_token": 1.5e-05,
|
|
9788
9788
|
"search_context_cost_per_query": {
|
|
@@ -9809,8 +9809,8 @@
|
|
|
9809
9809
|
"input_cost_per_token": 3e-06,
|
|
9810
9810
|
"litellm_provider": "anthropic",
|
|
9811
9811
|
"max_input_tokens": 200000,
|
|
9812
|
-
"max_output_tokens":
|
|
9813
|
-
"max_tokens":
|
|
9812
|
+
"max_output_tokens": 64000,
|
|
9813
|
+
"max_tokens": 64000,
|
|
9814
9814
|
"mode": "chat",
|
|
9815
9815
|
"output_cost_per_token": 1.5e-05,
|
|
9816
9816
|
"search_context_cost_per_query": {
|
|
@@ -32246,5 +32246,469 @@
|
|
|
32246
32246
|
"supports_reasoning": true,
|
|
32247
32247
|
"supports_tool_choice": true,
|
|
32248
32248
|
"supports_vision": true
|
|
32249
|
+
},
|
|
32250
|
+
"azure_ai/deepseek-v3.2": {
|
|
32251
|
+
"input_cost_per_token": 5.8e-07,
|
|
32252
|
+
"litellm_provider": "azure_ai",
|
|
32253
|
+
"max_input_tokens": 163840,
|
|
32254
|
+
"max_output_tokens": 163840,
|
|
32255
|
+
"max_tokens": 8192,
|
|
32256
|
+
"mode": "chat",
|
|
32257
|
+
"output_cost_per_token": 1.68e-06,
|
|
32258
|
+
"supports_assistant_prefill": true,
|
|
32259
|
+
"supports_function_calling": true,
|
|
32260
|
+
"supports_prompt_caching": true,
|
|
32261
|
+
"supports_reasoning": true,
|
|
32262
|
+
"supports_tool_choice": true
|
|
32263
|
+
},
|
|
32264
|
+
"azure_ai/deepseek-v3.2-speciale": {
|
|
32265
|
+
"input_cost_per_token": 5.8e-07,
|
|
32266
|
+
"litellm_provider": "azure_ai",
|
|
32267
|
+
"max_input_tokens": 163840,
|
|
32268
|
+
"max_output_tokens": 163840,
|
|
32269
|
+
"max_tokens": 8192,
|
|
32270
|
+
"mode": "chat",
|
|
32271
|
+
"output_cost_per_token": 1.68e-06,
|
|
32272
|
+
"supports_assistant_prefill": true,
|
|
32273
|
+
"supports_function_calling": true,
|
|
32274
|
+
"supports_prompt_caching": true,
|
|
32275
|
+
"supports_reasoning": true,
|
|
32276
|
+
"supports_tool_choice": true
|
|
32277
|
+
},
|
|
32278
|
+
"github_copilot/claude-haiku-4.5": {
|
|
32279
|
+
"litellm_provider": "github_copilot",
|
|
32280
|
+
"max_input_tokens": 128000,
|
|
32281
|
+
"max_output_tokens": 16000,
|
|
32282
|
+
"max_tokens": 16000,
|
|
32283
|
+
"mode": "chat",
|
|
32284
|
+
"supported_endpoints": [
|
|
32285
|
+
"/chat/completions"
|
|
32286
|
+
],
|
|
32287
|
+
"supports_function_calling": true,
|
|
32288
|
+
"supports_parallel_function_calling": true,
|
|
32289
|
+
"supports_vision": true
|
|
32290
|
+
},
|
|
32291
|
+
"github_copilot/claude-opus-4.5": {
|
|
32292
|
+
"litellm_provider": "github_copilot",
|
|
32293
|
+
"max_input_tokens": 128000,
|
|
32294
|
+
"max_output_tokens": 16000,
|
|
32295
|
+
"max_tokens": 16000,
|
|
32296
|
+
"mode": "chat",
|
|
32297
|
+
"supported_endpoints": [
|
|
32298
|
+
"/chat/completions"
|
|
32299
|
+
],
|
|
32300
|
+
"supports_function_calling": true,
|
|
32301
|
+
"supports_parallel_function_calling": true,
|
|
32302
|
+
"supports_vision": true
|
|
32303
|
+
},
|
|
32304
|
+
"github_copilot/claude-opus-41": {
|
|
32305
|
+
"litellm_provider": "github_copilot",
|
|
32306
|
+
"max_input_tokens": 80000,
|
|
32307
|
+
"max_output_tokens": 16000,
|
|
32308
|
+
"max_tokens": 16000,
|
|
32309
|
+
"mode": "chat",
|
|
32310
|
+
"supported_endpoints": [
|
|
32311
|
+
"/chat/completions"
|
|
32312
|
+
],
|
|
32313
|
+
"supports_vision": true
|
|
32314
|
+
},
|
|
32315
|
+
"github_copilot/claude-sonnet-4": {
|
|
32316
|
+
"litellm_provider": "github_copilot",
|
|
32317
|
+
"max_input_tokens": 128000,
|
|
32318
|
+
"max_output_tokens": 16000,
|
|
32319
|
+
"max_tokens": 16000,
|
|
32320
|
+
"mode": "chat",
|
|
32321
|
+
"supported_endpoints": [
|
|
32322
|
+
"/chat/completions"
|
|
32323
|
+
],
|
|
32324
|
+
"supports_function_calling": true,
|
|
32325
|
+
"supports_parallel_function_calling": true,
|
|
32326
|
+
"supports_vision": true
|
|
32327
|
+
},
|
|
32328
|
+
"github_copilot/claude-sonnet-4.5": {
|
|
32329
|
+
"litellm_provider": "github_copilot",
|
|
32330
|
+
"max_input_tokens": 128000,
|
|
32331
|
+
"max_output_tokens": 16000,
|
|
32332
|
+
"max_tokens": 16000,
|
|
32333
|
+
"mode": "chat",
|
|
32334
|
+
"supported_endpoints": [
|
|
32335
|
+
"/chat/completions"
|
|
32336
|
+
],
|
|
32337
|
+
"supports_function_calling": true,
|
|
32338
|
+
"supports_parallel_function_calling": true,
|
|
32339
|
+
"supports_vision": true
|
|
32340
|
+
},
|
|
32341
|
+
"github_copilot/gemini-2.5-pro": {
|
|
32342
|
+
"litellm_provider": "github_copilot",
|
|
32343
|
+
"max_input_tokens": 128000,
|
|
32344
|
+
"max_output_tokens": 64000,
|
|
32345
|
+
"max_tokens": 64000,
|
|
32346
|
+
"mode": "chat",
|
|
32347
|
+
"supports_function_calling": true,
|
|
32348
|
+
"supports_parallel_function_calling": true,
|
|
32349
|
+
"supports_vision": true
|
|
32350
|
+
},
|
|
32351
|
+
"github_copilot/gemini-3-pro-preview": {
|
|
32352
|
+
"litellm_provider": "github_copilot",
|
|
32353
|
+
"max_input_tokens": 128000,
|
|
32354
|
+
"max_output_tokens": 64000,
|
|
32355
|
+
"max_tokens": 64000,
|
|
32356
|
+
"mode": "chat",
|
|
32357
|
+
"supports_function_calling": true,
|
|
32358
|
+
"supports_parallel_function_calling": true,
|
|
32359
|
+
"supports_vision": true
|
|
32360
|
+
},
|
|
32361
|
+
"github_copilot/gpt-3.5-turbo": {
|
|
32362
|
+
"litellm_provider": "github_copilot",
|
|
32363
|
+
"max_input_tokens": 16384,
|
|
32364
|
+
"max_output_tokens": 4096,
|
|
32365
|
+
"max_tokens": 4096,
|
|
32366
|
+
"mode": "chat",
|
|
32367
|
+
"supports_function_calling": true
|
|
32368
|
+
},
|
|
32369
|
+
"github_copilot/gpt-3.5-turbo-0613": {
|
|
32370
|
+
"litellm_provider": "github_copilot",
|
|
32371
|
+
"max_input_tokens": 16384,
|
|
32372
|
+
"max_output_tokens": 4096,
|
|
32373
|
+
"max_tokens": 4096,
|
|
32374
|
+
"mode": "chat",
|
|
32375
|
+
"supports_function_calling": true
|
|
32376
|
+
},
|
|
32377
|
+
"github_copilot/gpt-4": {
|
|
32378
|
+
"litellm_provider": "github_copilot",
|
|
32379
|
+
"max_input_tokens": 32768,
|
|
32380
|
+
"max_output_tokens": 4096,
|
|
32381
|
+
"max_tokens": 4096,
|
|
32382
|
+
"mode": "chat",
|
|
32383
|
+
"supports_function_calling": true
|
|
32384
|
+
},
|
|
32385
|
+
"github_copilot/gpt-4-0613": {
|
|
32386
|
+
"litellm_provider": "github_copilot",
|
|
32387
|
+
"max_input_tokens": 32768,
|
|
32388
|
+
"max_output_tokens": 4096,
|
|
32389
|
+
"max_tokens": 4096,
|
|
32390
|
+
"mode": "chat",
|
|
32391
|
+
"supports_function_calling": true
|
|
32392
|
+
},
|
|
32393
|
+
"github_copilot/gpt-4-o-preview": {
|
|
32394
|
+
"litellm_provider": "github_copilot",
|
|
32395
|
+
"max_input_tokens": 64000,
|
|
32396
|
+
"max_output_tokens": 4096,
|
|
32397
|
+
"max_tokens": 4096,
|
|
32398
|
+
"mode": "chat",
|
|
32399
|
+
"supports_function_calling": true,
|
|
32400
|
+
"supports_parallel_function_calling": true
|
|
32401
|
+
},
|
|
32402
|
+
"github_copilot/gpt-4.1": {
|
|
32403
|
+
"litellm_provider": "github_copilot",
|
|
32404
|
+
"max_input_tokens": 128000,
|
|
32405
|
+
"max_output_tokens": 16384,
|
|
32406
|
+
"max_tokens": 16384,
|
|
32407
|
+
"mode": "chat",
|
|
32408
|
+
"supports_function_calling": true,
|
|
32409
|
+
"supports_parallel_function_calling": true,
|
|
32410
|
+
"supports_response_schema": true,
|
|
32411
|
+
"supports_vision": true
|
|
32412
|
+
},
|
|
32413
|
+
"github_copilot/gpt-4.1-2025-04-14": {
|
|
32414
|
+
"litellm_provider": "github_copilot",
|
|
32415
|
+
"max_input_tokens": 128000,
|
|
32416
|
+
"max_output_tokens": 16384,
|
|
32417
|
+
"max_tokens": 16384,
|
|
32418
|
+
"mode": "chat",
|
|
32419
|
+
"supports_function_calling": true,
|
|
32420
|
+
"supports_parallel_function_calling": true,
|
|
32421
|
+
"supports_response_schema": true,
|
|
32422
|
+
"supports_vision": true
|
|
32423
|
+
},
|
|
32424
|
+
"github_copilot/gpt-41-copilot": {
|
|
32425
|
+
"litellm_provider": "github_copilot",
|
|
32426
|
+
"mode": "completion"
|
|
32427
|
+
},
|
|
32428
|
+
"github_copilot/gpt-4o": {
|
|
32429
|
+
"litellm_provider": "github_copilot",
|
|
32430
|
+
"max_input_tokens": 64000,
|
|
32431
|
+
"max_output_tokens": 4096,
|
|
32432
|
+
"max_tokens": 4096,
|
|
32433
|
+
"mode": "chat",
|
|
32434
|
+
"supports_function_calling": true,
|
|
32435
|
+
"supports_parallel_function_calling": true,
|
|
32436
|
+
"supports_vision": true
|
|
32437
|
+
},
|
|
32438
|
+
"github_copilot/gpt-4o-2024-05-13": {
|
|
32439
|
+
"litellm_provider": "github_copilot",
|
|
32440
|
+
"max_input_tokens": 64000,
|
|
32441
|
+
"max_output_tokens": 4096,
|
|
32442
|
+
"max_tokens": 4096,
|
|
32443
|
+
"mode": "chat",
|
|
32444
|
+
"supports_function_calling": true,
|
|
32445
|
+
"supports_parallel_function_calling": true,
|
|
32446
|
+
"supports_vision": true
|
|
32447
|
+
},
|
|
32448
|
+
"github_copilot/gpt-4o-2024-08-06": {
|
|
32449
|
+
"litellm_provider": "github_copilot",
|
|
32450
|
+
"max_input_tokens": 64000,
|
|
32451
|
+
"max_output_tokens": 16384,
|
|
32452
|
+
"max_tokens": 16384,
|
|
32453
|
+
"mode": "chat",
|
|
32454
|
+
"supports_function_calling": true,
|
|
32455
|
+
"supports_parallel_function_calling": true
|
|
32456
|
+
},
|
|
32457
|
+
"github_copilot/gpt-4o-2024-11-20": {
|
|
32458
|
+
"litellm_provider": "github_copilot",
|
|
32459
|
+
"max_input_tokens": 64000,
|
|
32460
|
+
"max_output_tokens": 16384,
|
|
32461
|
+
"max_tokens": 16384,
|
|
32462
|
+
"mode": "chat",
|
|
32463
|
+
"supports_function_calling": true,
|
|
32464
|
+
"supports_parallel_function_calling": true,
|
|
32465
|
+
"supports_vision": true
|
|
32466
|
+
},
|
|
32467
|
+
"github_copilot/gpt-4o-mini": {
|
|
32468
|
+
"litellm_provider": "github_copilot",
|
|
32469
|
+
"max_input_tokens": 64000,
|
|
32470
|
+
"max_output_tokens": 4096,
|
|
32471
|
+
"max_tokens": 4096,
|
|
32472
|
+
"mode": "chat",
|
|
32473
|
+
"supports_function_calling": true,
|
|
32474
|
+
"supports_parallel_function_calling": true
|
|
32475
|
+
},
|
|
32476
|
+
"github_copilot/gpt-4o-mini-2024-07-18": {
|
|
32477
|
+
"litellm_provider": "github_copilot",
|
|
32478
|
+
"max_input_tokens": 64000,
|
|
32479
|
+
"max_output_tokens": 4096,
|
|
32480
|
+
"max_tokens": 4096,
|
|
32481
|
+
"mode": "chat",
|
|
32482
|
+
"supports_function_calling": true,
|
|
32483
|
+
"supports_parallel_function_calling": true
|
|
32484
|
+
},
|
|
32485
|
+
"github_copilot/gpt-5": {
|
|
32486
|
+
"litellm_provider": "github_copilot",
|
|
32487
|
+
"max_input_tokens": 128000,
|
|
32488
|
+
"max_output_tokens": 128000,
|
|
32489
|
+
"max_tokens": 128000,
|
|
32490
|
+
"mode": "chat",
|
|
32491
|
+
"supported_endpoints": [
|
|
32492
|
+
"/chat/completions",
|
|
32493
|
+
"/responses"
|
|
32494
|
+
],
|
|
32495
|
+
"supports_function_calling": true,
|
|
32496
|
+
"supports_parallel_function_calling": true,
|
|
32497
|
+
"supports_response_schema": true,
|
|
32498
|
+
"supports_vision": true
|
|
32499
|
+
},
|
|
32500
|
+
"github_copilot/gpt-5-mini": {
|
|
32501
|
+
"litellm_provider": "github_copilot",
|
|
32502
|
+
"max_input_tokens": 128000,
|
|
32503
|
+
"max_output_tokens": 64000,
|
|
32504
|
+
"max_tokens": 64000,
|
|
32505
|
+
"mode": "chat",
|
|
32506
|
+
"supports_function_calling": true,
|
|
32507
|
+
"supports_parallel_function_calling": true,
|
|
32508
|
+
"supports_response_schema": true,
|
|
32509
|
+
"supports_vision": true
|
|
32510
|
+
},
|
|
32511
|
+
"github_copilot/gpt-5.1": {
|
|
32512
|
+
"litellm_provider": "github_copilot",
|
|
32513
|
+
"max_input_tokens": 128000,
|
|
32514
|
+
"max_output_tokens": 64000,
|
|
32515
|
+
"max_tokens": 64000,
|
|
32516
|
+
"mode": "chat",
|
|
32517
|
+
"supported_endpoints": [
|
|
32518
|
+
"/chat/completions",
|
|
32519
|
+
"/responses"
|
|
32520
|
+
],
|
|
32521
|
+
"supports_function_calling": true,
|
|
32522
|
+
"supports_parallel_function_calling": true,
|
|
32523
|
+
"supports_response_schema": true,
|
|
32524
|
+
"supports_vision": true
|
|
32525
|
+
},
|
|
32526
|
+
"github_copilot/gpt-5.1-codex-max": {
|
|
32527
|
+
"litellm_provider": "github_copilot",
|
|
32528
|
+
"max_input_tokens": 128000,
|
|
32529
|
+
"max_output_tokens": 128000,
|
|
32530
|
+
"max_tokens": 128000,
|
|
32531
|
+
"mode": "responses",
|
|
32532
|
+
"supported_endpoints": [
|
|
32533
|
+
"/responses"
|
|
32534
|
+
],
|
|
32535
|
+
"supports_function_calling": true,
|
|
32536
|
+
"supports_parallel_function_calling": true,
|
|
32537
|
+
"supports_response_schema": true,
|
|
32538
|
+
"supports_vision": true
|
|
32539
|
+
},
|
|
32540
|
+
"github_copilot/gpt-5.2": {
|
|
32541
|
+
"litellm_provider": "github_copilot",
|
|
32542
|
+
"max_input_tokens": 128000,
|
|
32543
|
+
"max_output_tokens": 64000,
|
|
32544
|
+
"max_tokens": 64000,
|
|
32545
|
+
"mode": "chat",
|
|
32546
|
+
"supported_endpoints": [
|
|
32547
|
+
"/chat/completions",
|
|
32548
|
+
"/responses"
|
|
32549
|
+
],
|
|
32550
|
+
"supports_function_calling": true,
|
|
32551
|
+
"supports_parallel_function_calling": true,
|
|
32552
|
+
"supports_response_schema": true,
|
|
32553
|
+
"supports_vision": true
|
|
32554
|
+
},
|
|
32555
|
+
"github_copilot/text-embedding-3-small": {
|
|
32556
|
+
"litellm_provider": "github_copilot",
|
|
32557
|
+
"max_input_tokens": 8191,
|
|
32558
|
+
"max_tokens": 8191,
|
|
32559
|
+
"mode": "embedding"
|
|
32560
|
+
},
|
|
32561
|
+
"github_copilot/text-embedding-3-small-inference": {
|
|
32562
|
+
"litellm_provider": "github_copilot",
|
|
32563
|
+
"max_input_tokens": 8191,
|
|
32564
|
+
"max_tokens": 8191,
|
|
32565
|
+
"mode": "embedding"
|
|
32566
|
+
},
|
|
32567
|
+
"github_copilot/text-embedding-ada-002": {
|
|
32568
|
+
"litellm_provider": "github_copilot",
|
|
32569
|
+
"max_input_tokens": 8191,
|
|
32570
|
+
"max_tokens": 8191,
|
|
32571
|
+
"mode": "embedding"
|
|
32572
|
+
},
|
|
32573
|
+
"fireworks_ai/accounts/fireworks/models/": {
|
|
32574
|
+
"max_tokens": 40960,
|
|
32575
|
+
"max_input_tokens": 40960,
|
|
32576
|
+
"max_output_tokens": 40960,
|
|
32577
|
+
"input_cost_per_token": 1e-07,
|
|
32578
|
+
"output_cost_per_token": 0.0,
|
|
32579
|
+
"litellm_provider": "fireworks_ai",
|
|
32580
|
+
"mode": "embedding"
|
|
32581
|
+
},
|
|
32582
|
+
"gpt-4o-transcribe-diarize": {
|
|
32583
|
+
"input_cost_per_audio_token": 6e-06,
|
|
32584
|
+
"input_cost_per_token": 2.5e-06,
|
|
32585
|
+
"litellm_provider": "openai",
|
|
32586
|
+
"max_input_tokens": 16000,
|
|
32587
|
+
"max_output_tokens": 2000,
|
|
32588
|
+
"mode": "audio_transcription",
|
|
32589
|
+
"output_cost_per_token": 1e-05,
|
|
32590
|
+
"supported_endpoints": [
|
|
32591
|
+
"/v1/audio/transcriptions"
|
|
32592
|
+
]
|
|
32593
|
+
},
|
|
32594
|
+
"gemini/gemini-3-flash-preview": {
|
|
32595
|
+
"cache_read_input_token_cost": 5e-08,
|
|
32596
|
+
"input_cost_per_audio_token": 1e-06,
|
|
32597
|
+
"input_cost_per_token": 5e-07,
|
|
32598
|
+
"litellm_provider": "gemini",
|
|
32599
|
+
"max_audio_length_hours": 8.4,
|
|
32600
|
+
"max_audio_per_prompt": 1,
|
|
32601
|
+
"max_images_per_prompt": 3000,
|
|
32602
|
+
"max_input_tokens": 1048576,
|
|
32603
|
+
"max_output_tokens": 65535,
|
|
32604
|
+
"max_pdf_size_mb": 30,
|
|
32605
|
+
"max_tokens": 65535,
|
|
32606
|
+
"max_video_length": 1,
|
|
32607
|
+
"max_videos_per_prompt": 10,
|
|
32608
|
+
"mode": "chat",
|
|
32609
|
+
"output_cost_per_reasoning_token": 3e-06,
|
|
32610
|
+
"output_cost_per_token": 3e-06,
|
|
32611
|
+
"rpm": 2000,
|
|
32612
|
+
"source": "https://ai.google.dev/pricing/gemini-3",
|
|
32613
|
+
"supported_endpoints": [
|
|
32614
|
+
"/v1/chat/completions",
|
|
32615
|
+
"/v1/completions",
|
|
32616
|
+
"/v1/batch"
|
|
32617
|
+
],
|
|
32618
|
+
"supported_modalities": [
|
|
32619
|
+
"text",
|
|
32620
|
+
"image",
|
|
32621
|
+
"audio",
|
|
32622
|
+
"video"
|
|
32623
|
+
],
|
|
32624
|
+
"supported_output_modalities": [
|
|
32625
|
+
"text"
|
|
32626
|
+
],
|
|
32627
|
+
"supports_audio_output": false,
|
|
32628
|
+
"supports_function_calling": true,
|
|
32629
|
+
"supports_parallel_function_calling": true,
|
|
32630
|
+
"supports_pdf_input": true,
|
|
32631
|
+
"supports_prompt_caching": true,
|
|
32632
|
+
"supports_reasoning": true,
|
|
32633
|
+
"supports_response_schema": true,
|
|
32634
|
+
"supports_system_messages": true,
|
|
32635
|
+
"supports_tool_choice": true,
|
|
32636
|
+
"supports_url_context": true,
|
|
32637
|
+
"supports_vision": true,
|
|
32638
|
+
"supports_web_search": true,
|
|
32639
|
+
"tpm": 800000
|
|
32640
|
+
},
|
|
32641
|
+
"gemini-3-flash-preview": {
|
|
32642
|
+
"cache_read_input_token_cost": 5e-08,
|
|
32643
|
+
"input_cost_per_audio_token": 1e-06,
|
|
32644
|
+
"input_cost_per_token": 5e-07,
|
|
32645
|
+
"litellm_provider": "vertex_ai-language-models",
|
|
32646
|
+
"max_audio_length_hours": 8.4,
|
|
32647
|
+
"max_audio_per_prompt": 1,
|
|
32648
|
+
"max_images_per_prompt": 3000,
|
|
32649
|
+
"max_input_tokens": 1048576,
|
|
32650
|
+
"max_output_tokens": 65535,
|
|
32651
|
+
"max_pdf_size_mb": 30,
|
|
32652
|
+
"max_tokens": 65535,
|
|
32653
|
+
"max_video_length": 1,
|
|
32654
|
+
"max_videos_per_prompt": 10,
|
|
32655
|
+
"mode": "chat",
|
|
32656
|
+
"output_cost_per_reasoning_token": 3e-06,
|
|
32657
|
+
"output_cost_per_token": 3e-06,
|
|
32658
|
+
"source": "https://ai.google.dev/pricing/gemini-3",
|
|
32659
|
+
"supported_endpoints": [
|
|
32660
|
+
"/v1/chat/completions",
|
|
32661
|
+
"/v1/completions",
|
|
32662
|
+
"/v1/batch"
|
|
32663
|
+
],
|
|
32664
|
+
"supported_modalities": [
|
|
32665
|
+
"text",
|
|
32666
|
+
"image",
|
|
32667
|
+
"audio",
|
|
32668
|
+
"video"
|
|
32669
|
+
],
|
|
32670
|
+
"supported_output_modalities": [
|
|
32671
|
+
"text"
|
|
32672
|
+
],
|
|
32673
|
+
"supports_audio_output": false,
|
|
32674
|
+
"supports_function_calling": true,
|
|
32675
|
+
"supports_parallel_function_calling": true,
|
|
32676
|
+
"supports_pdf_input": true,
|
|
32677
|
+
"supports_prompt_caching": true,
|
|
32678
|
+
"supports_reasoning": true,
|
|
32679
|
+
"supports_response_schema": true,
|
|
32680
|
+
"supports_system_messages": true,
|
|
32681
|
+
"supports_tool_choice": true,
|
|
32682
|
+
"supports_url_context": true,
|
|
32683
|
+
"supports_vision": true,
|
|
32684
|
+
"supports_web_search": true
|
|
32685
|
+
},
|
|
32686
|
+
"gpt-image-1.5": {
|
|
32687
|
+
"cache_read_input_image_token_cost": 2e-06,
|
|
32688
|
+
"cache_read_input_token_cost": 1.25e-06,
|
|
32689
|
+
"input_cost_per_token": 5e-06,
|
|
32690
|
+
"litellm_provider": "openai",
|
|
32691
|
+
"mode": "image_generation",
|
|
32692
|
+
"output_cost_per_token": 1e-05,
|
|
32693
|
+
"input_cost_per_image_token": 8e-06,
|
|
32694
|
+
"output_cost_per_image_token": 3.2e-05,
|
|
32695
|
+
"supported_endpoints": [
|
|
32696
|
+
"/v1/images/generations"
|
|
32697
|
+
],
|
|
32698
|
+
"supports_vision": true
|
|
32699
|
+
},
|
|
32700
|
+
"gpt-image-1.5-2025-12-16": {
|
|
32701
|
+
"cache_read_input_image_token_cost": 2e-06,
|
|
32702
|
+
"cache_read_input_token_cost": 1.25e-06,
|
|
32703
|
+
"input_cost_per_token": 5e-06,
|
|
32704
|
+
"litellm_provider": "openai",
|
|
32705
|
+
"mode": "image_generation",
|
|
32706
|
+
"output_cost_per_token": 1e-05,
|
|
32707
|
+
"input_cost_per_image_token": 8e-06,
|
|
32708
|
+
"output_cost_per_image_token": 3.2e-05,
|
|
32709
|
+
"supported_endpoints": [
|
|
32710
|
+
"/v1/images/generations"
|
|
32711
|
+
],
|
|
32712
|
+
"supports_vision": true
|
|
32249
32713
|
}
|
|
32250
32714
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.406
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|