costtracker 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Vishal Verma
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,121 @@
1
+ Metadata-Version: 2.4
2
+ Name: costtracker
3
+ Version: 0.1.0
4
+ Summary: Simple and lightweight Python package to track LLM API cost
5
+ Author: Vishal Verma
6
+ License: MIT
7
+ Requires-Python: >=3.11
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: python-dotenv
11
+ Requires-Dist: google-genai
12
+ Dynamic: license-file
13
+
14
+ # πŸš€ costtracker
15
+
16
+ <p align="center">
17
+ <b>Track your LLM API cost in real time β€” with one line of code.</b>
18
+ </p>
19
+
20
+ <p align="center">
21
+ <img src="https://img.shields.io/badge/python-3.11+-blue.svg" />
22
+ <img src="https://img.shields.io/badge/status-active-success.svg" />
23
+ <img src="https://img.shields.io/badge/license-MIT-green.svg" />
24
+ <img src="https://img.shields.io/badge/built%20with-GenAI-purple.svg" />
25
+ </p>
26
+
27
+ ---
28
+
29
+ ## ✨ Why costtracker?
30
+
31
+ Building with LLMs is easy.
32
+ **Tracking cost isn’t.**
33
+
34
+ costtracker solves this by giving you:
35
+
36
+ > ⚑ Instant cost visibility for every API call
37
+
38
+ No dashboards. No setup. No complexity.
39
+
40
+ ---
41
+
42
+ ## πŸ”₯ Features
43
+
44
+ - πŸ“Š Real-time token + cost tracking
45
+ - ⚑ One-line integration (`track(response)`)
46
+ - 🧠 Works with OpenAI-compatible APIs for now (Gemini included)
47
+ - πŸͺΆ Lightweight & zero-config
48
+ - 🧱 Built for developers
49
+
50
+ ---
51
+
52
+ ## ⚑ Quick Demo
53
+
54
+ ```python
55
+ from costtracker import track
56
+
57
+ track(response)
58
+ ```
59
+
60
+ ## ⚑ Output
61
+ ```
62
+ Model: gemini-2.5-flash-lite
63
+ Tokens: 13 (input: 4, output: 9)
64
+ Cost: $0.000011
65
+ ```
66
+
67
+ ## πŸ“¦ Installation
68
+ ```
69
+ pip install -e .
70
+ ```
71
+
72
+ ## βš™οΈ Setup
73
+
74
+ Create a .env file in your root directory:
75
+ ```.env
76
+ GEMINI_API_KEY=your_api_key_here
77
+ ```
78
+
79
+ ## πŸ§ͺ Usage
80
+ ```python
81
+ import os
82
+ from dotenv import load_dotenv
83
+ from openai import OpenAI
84
+ from costtracker import track
85
+
86
+ load_dotenv()
87
+
88
+ client = OpenAI(
89
+ api_key=os.getenv("GEMINI_API_KEY"),
90
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
91
+ )
92
+
93
+ response = client.chat.completions.create(
94
+ model="gemini-2.5-flash-lite",
95
+ messages=[
96
+ {"role": "user", "content": "Hello there!"}
97
+ ]
98
+ )
99
+
100
+ track(response)
101
+ ```
102
+
103
+ ## πŸ’‘ Use Cases
104
+
105
+ - πŸ§‘β€πŸ’» AI app developers tracking API usage
106
+ - πŸš€ Startup teams controlling LLM costs
107
+ - πŸ“Š Experimentation with prompt optimization
108
+ - πŸ§ͺ GenAI project building
109
+
110
+ ## 🀝 Contributing
111
+
112
+ - Contributions are welcome!
113
+ - Feel free to open issues or submit PRs.
114
+
115
+ ## ⭐ If you like this project
116
+
117
+ **Give it a star**
118
+
119
+ ## πŸ“„ License
120
+
121
+ MIT License
@@ -0,0 +1,108 @@
1
+ # πŸš€ costtracker
2
+
3
+ <p align="center">
4
+ <b>Track your LLM API cost in real time β€” with one line of code.</b>
5
+ </p>
6
+
7
+ <p align="center">
8
+ <img src="https://img.shields.io/badge/python-3.11+-blue.svg" />
9
+ <img src="https://img.shields.io/badge/status-active-success.svg" />
10
+ <img src="https://img.shields.io/badge/license-MIT-green.svg" />
11
+ <img src="https://img.shields.io/badge/built%20with-GenAI-purple.svg" />
12
+ </p>
13
+
14
+ ---
15
+
16
+ ## ✨ Why costtracker?
17
+
18
+ Building with LLMs is easy.
19
+ **Tracking cost isn’t.**
20
+
21
+ costtracker solves this by giving you:
22
+
23
+ > ⚑ Instant cost visibility for every API call
24
+
25
+ No dashboards. No setup. No complexity.
26
+
27
+ ---
28
+
29
+ ## πŸ”₯ Features
30
+
31
+ - πŸ“Š Real-time token + cost tracking
32
+ - ⚑ One-line integration (`track(response)`)
33
+ - 🧠 Works with OpenAI-compatible APIs for now (Gemini included)
34
+ - πŸͺΆ Lightweight & zero-config
35
+ - 🧱 Built for developers
36
+
37
+ ---
38
+
39
+ ## ⚑ Quick Demo
40
+
41
+ ```python
42
+ from costtracker import track
43
+
44
+ track(response)
45
+ ```
46
+
47
+ ## ⚑ Output
48
+ ```
49
+ Model: gemini-2.5-flash-lite
50
+ Tokens: 13 (input: 4, output: 9)
51
+ Cost: $0.000011
52
+ ```
53
+
54
+ ## πŸ“¦ Installation
55
+ ```
56
+ pip install -e .
57
+ ```
58
+
59
+ ## βš™οΈ Setup
60
+
61
+ Create a .env file in your root directory:
62
+ ```.env
63
+ GEMINI_API_KEY=your_api_key_here
64
+ ```
65
+
66
+ ## πŸ§ͺ Usage
67
+ ```python
68
+ import os
69
+ from dotenv import load_dotenv
70
+ from openai import OpenAI
71
+ from costtracker import track
72
+
73
+ load_dotenv()
74
+
75
+ client = OpenAI(
76
+ api_key=os.getenv("GEMINI_API_KEY"),
77
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
78
+ )
79
+
80
+ response = client.chat.completions.create(
81
+ model="gemini-2.5-flash-lite",
82
+ messages=[
83
+ {"role": "user", "content": "Hello there!"}
84
+ ]
85
+ )
86
+
87
+ track(response)
88
+ ```
89
+
90
+ ## πŸ’‘ Use Cases
91
+
92
+ - πŸ§‘β€πŸ’» AI app developers tracking API usage
93
+ - πŸš€ Startup teams controlling LLM costs
94
+ - πŸ“Š Experimentation with prompt optimization
95
+ - πŸ§ͺ GenAI project building
96
+
97
+ ## 🀝 Contributing
98
+
99
+ - Contributions are welcome!
100
+ - Feel free to open issues or submit PRs.
101
+
102
+ ## ⭐ If you like this project
103
+
104
+ **Give it a star**
105
+
106
+ ## πŸ“„ License
107
+
108
+ MIT License
@@ -0,0 +1 @@
1
+ from .tracker import track
@@ -0,0 +1,10 @@
1
+ def calculate_cost(input_tokens:int, output_tokens:int, pricing:dict)->dict:
2
+ input_cost=input_tokens*pricing["input"]
3
+ output_cost=output_tokens*pricing["output"]
4
+ total_cost=input_cost+output_cost
5
+
6
+ return {
7
+ "input_cost": input_cost,
8
+ "output_cost": output_cost,
9
+ "total_cost": total_cost
10
+ }
@@ -0,0 +1,75 @@
1
+ import click
2
+ import runpy
3
+
4
+ from .state import reset_state, get_state, set_budget, get_budget
5
+ from .logger import set_log_file, log_output
6
+ from .config import load_config
7
+
8
+ @click.group()
9
+ def cli():
10
+ """
11
+ costtracker CLI tool
12
+ """
13
+ pass
14
+
15
+ @cli.command()
16
+ @click.argument("script")
17
+ @click.option("--budget", type=float, default=None, help="Set budget limit")
18
+ @click.option("--log-file", type=str, default=None, help="Logs output to file")
19
+ @click.option("--provider", type=str, default=None, help="Sets the provider to calculate cost")
20
+ def run(script, budget, log_file, provider):
21
+ """
22
+ Run a Python script with cost tracking enabled.
23
+ """
24
+
25
+ # Load config file
26
+ config=load_config()
27
+
28
+ # Merge values (priority: CLI > config > default)
29
+ final_budget=budget if budget is not None else config.get("budget")
30
+ final_log_file=log_file if log_file is not None else config.get("log_file")
31
+ final_provider=provider if provider is not None else config.get("provider")
32
+
33
+ # Reset previous state
34
+ reset_state()
35
+
36
+ # Apply config
37
+ if final_budget is not None:
38
+ set_budget(final_budget)
39
+
40
+ if final_log_file:
41
+ set_log_file(final_log_file)
42
+
43
+ # Set budget if provided
44
+ if budget is not None:
45
+ set_budget(budget)
46
+
47
+ # Set log file
48
+ if log_file:
49
+ set_log_file(log_file)
50
+
51
+ # Inform user
52
+ log_output(f"\nRunning {script}\n")
53
+
54
+ # Execute script
55
+ runpy.run_path(script, run_name="__main__")
56
+
57
+ # Get final state
58
+ state=get_state()
59
+
60
+ # Print per-call breakdown
61
+ log_output("\n------------------------------------")
62
+ for i, cost in enumerate(state["calls"], start=1):
63
+ log_output(f"Call {i} -> ${cost:.6f}")
64
+
65
+ # Print summary
66
+ log_output("\n------------------------------------")
67
+ log_output(f"Total Calls: {state['total_calls']}")
68
+ log_output(f"Total Cost: ${state['total_cost']:.6f}")
69
+ log_output("------------------------------------\n")
70
+
71
+ # Budget check
72
+ budget_value=get_budget()
73
+ if budget_value is not None and state['total_cost'] > budget_value:
74
+ log_output(f"\n⚠️ Budget exceeded! Limit: ${budget_value:.6f} | Current: ${state['total_cost']:.6f}")
75
+ log_output("------------------------------------\n")
@@ -0,0 +1,34 @@
1
+ import os
2
+ import tomllib
3
+
4
+ DEFAULT_CONFIG = {
5
+ "budget":None,
6
+ "log_file":None,
7
+ "provider":"gemini"
8
+ }
9
+
10
+ def load_config(config_path:str = "costtracker.toml")->dict:
11
+ """
12
+ Load configuration from TOML file.
13
+ If file does not exist or is invalid, return default config.
14
+ """
15
+
16
+ # Default config
17
+ config=DEFAULT_CONFIG.copy()
18
+
19
+ # Check if config file exists
20
+ if not os.path.exists(config_path):
21
+ return config
22
+
23
+ try:
24
+ with open(config_path, "rb") as f:
25
+ file_config=tomllib.load(f)
26
+
27
+ # Merge file config into default config
28
+ config.update(file_config)
29
+
30
+ except Exception:
31
+ # Fail silently
32
+ pass
33
+
34
+ return config
@@ -0,0 +1,9 @@
1
+ from .tracker import track as track_function
2
+
3
+ def track(func):
4
+ def wrapper(*args, **kwargs):
5
+ response=func(*args, **kwargs)
6
+ track_function(response)
7
+ return response
8
+
9
+ return wrapper
@@ -0,0 +1,11 @@
1
+ class CostTrackerError(Exception):
2
+ """Base exception for costtracker"""
3
+ pass
4
+
5
+ class UnsupportedModelError(CostTrackerError):
6
+ """Raised when model pricing is not available"""
7
+ pass
8
+
9
+ class InvalidResponseError(CostTrackerError):
10
+ """Raised when response format is invalid"""
11
+ pass
@@ -0,0 +1,15 @@
1
+ from .openai_extractor import extract_openai
2
+ from .gemini_extractor import extract_gemini
3
+
4
+ def extract_usage(response, provider:str):
5
+ """
6
+ Router extraction based on provider
7
+ """
8
+
9
+ if provider=="openai":
10
+ return extract_openai(response)
11
+ elif provider=="gemini":
12
+ return extract_gemini(response)
13
+
14
+ else:
15
+ raise ValueError(f"Unsupported provider: {provider}")
@@ -0,0 +1,12 @@
1
+ from typing import Dict
2
+
3
+ def standardize_output(input_tokens:int, output_tokens:int, model:str) -> Dict:
4
+ """
5
+ Ensure all extractors return a consistent format
6
+ """
7
+
8
+ return {
9
+ "input_tokens":input_tokens,
10
+ "output_tokens":output_tokens,
11
+ "model":model
12
+ }
@@ -0,0 +1,23 @@
1
+ from .base import standardize_output
2
+
3
+ def extract_gemini(response):
4
+ """
5
+ Extract usage from Gemini response
6
+ """
7
+ try:
8
+ model=getattr(response, "model_version", "unknown")
9
+
10
+ usage=getattr(response, "usage_metadata", None)
11
+
12
+ if usage:
13
+ input_tokens=getattr(usage, "prompt_token_count", 0)
14
+ output_tokens=getattr(usage, "candidates_token_count", 0)
15
+
16
+ else:
17
+ input_tokens=0
18
+ output_tokens=0
19
+
20
+ return standardize_output(input_tokens, output_tokens, model)
21
+
22
+ except Exception:
23
+ return standardize_output(0,0,"unknown")
@@ -0,0 +1,17 @@
1
+ from .base import standardize_output
2
+
3
+ def extract_openai(response):
4
+ """
5
+ Extract usage from OpenAI-style response
6
+ """
7
+
8
+ usage=getattr(response, "usage", None)
9
+
10
+ if not usage:
11
+ return standardize_output(0,0,getattr(response, "model", "unknown"))
12
+
13
+ input_tokens=getattr(usage, "prompt_tokens", 0)
14
+ output_tokens=getattr(usage, "completion_tokens", 0)
15
+ model=getattr(response, "model", "unknown")
16
+
17
+ return standardize_output(input_tokens,output_tokens,model)
@@ -0,0 +1,7 @@
1
+ def format_output(data:dict)->str:
2
+ return (
3
+ f"\nModel: {data['model']}\n"
4
+ f"Tokens: {data['total_tokens']} "
5
+ f"(input: {data['input_tokens']}, output: {data['output_tokens']})\n"
6
+ f"Cost: ${data['total_cost']:.6f}\n"
7
+ )
@@ -0,0 +1,22 @@
1
+ LOG_FILE=None
2
+
3
+ def set_log_file(file_path:str):
4
+ """
5
+ Set log file path
6
+ """
7
+
8
+ global LOG_FILE
9
+ LOG_FILE=file_path
10
+
11
+
12
+ def log_output(message:str):
13
+ """
14
+ Print to console + optionally write to file
15
+ """
16
+
17
+ print(message)
18
+
19
+ # If logging is enabled then write to file
20
+ if LOG_FILE:
21
+ with open(LOG_FILE, "a") as f:
22
+ f.write(message + "\n")
@@ -0,0 +1,16 @@
1
+ from .pricing_loader import fetch_remote_pricing
2
+
3
+ def get_pricing(model:str):
4
+ """
5
+ Get pricing for a given model from remote source
6
+ """
7
+
8
+ pricing_data=fetch_remote_pricing()
9
+
10
+ if not pricing_data:
11
+ raise ValueError("Pricing service unavailable")
12
+ if model not in pricing_data:
13
+ raise ValueError(f"Pricing not found for model: {model}")
14
+
15
+ return pricing_data[model]
16
+
@@ -0,0 +1,25 @@
1
+ import json
2
+ import urllib.request
3
+
4
+ PRICING_URL = "https://raw.githubusercontent.com/viishalvermaa/costtrack-pricing-remote/refs/heads/main/pricing.json"
5
+
6
+ _cached_pricing = None
7
+
8
+ def fetch_remote_pricing():
9
+ """
10
+ Fetch price from remote source with caching
11
+ """
12
+
13
+ global _cached_pricing
14
+
15
+ # Return cached if available
16
+ if _cached_pricing is not None:
17
+ return _cached_pricing
18
+
19
+ try:
20
+ with urllib.request.urlopen(PRICING_URL) as response:
21
+ data=response.read()
22
+ _cached_pricing=json.loads(data)
23
+ return _cached_pricing
24
+ except Exception:
25
+ return {}
@@ -0,0 +1,50 @@
1
+ # Global state (shared across entire script execution)
2
+
3
+ TOTAL_COST=0.0
4
+ TOTAL_CALLS=0
5
+ CALL_LOGS=[]
6
+ BUDGET=None
7
+
8
+ def set_budget(value:float):
9
+ """
10
+ Set budget for the session
11
+ """
12
+
13
+ global BUDGET
14
+ BUDGET=value
15
+
16
+ def get_budget():
17
+ return BUDGET
18
+
19
+ def update_state(cost: float):
20
+ """
21
+ Update total cost and call count
22
+ """
23
+
24
+ global TOTAL_COST, TOTAL_CALLS, CALL_LOGS
25
+
26
+ TOTAL_COST+=cost
27
+ TOTAL_CALLS+=1
28
+ CALL_LOGS.append(cost)
29
+
30
+ def get_state():
31
+ """
32
+ Get current tracking state
33
+ """
34
+
35
+ return {
36
+ "total_cost": TOTAL_COST,
37
+ "total_calls": TOTAL_CALLS,
38
+ "calls": CALL_LOGS
39
+ }
40
+
41
+ def reset_state():
42
+ """
43
+ Reset state before running a new script
44
+ """
45
+
46
+ global TOTAL_COST, TOTAL_CALLS, CALL_LOGS
47
+
48
+ TOTAL_COST=0.0
49
+ TOTAL_CALLS=0
50
+ CALL_LOGS=[]
@@ -0,0 +1,115 @@
1
+ from .extractor import extract_usage
2
+ from .pricing import get_pricing
3
+ from .calculator import calculate_cost
4
+ from .formatter import format_output
5
+ from .logger import log_output
6
+ from .exceptions import CostTrackerError
7
+ from .state import update_state
8
+ from .config import load_config
9
+ from .utils.provider_detector import detect_provider
10
+
11
+ def track(response, provider=None):
12
+ """
13
+ Track cost of a response.
14
+ Provider can be passed explicitly or resolved via config/default
15
+ """
16
+
17
+ try:
18
+ config=load_config()
19
+
20
+ # explicit provider
21
+ if provider:
22
+ final_provider=provider
23
+
24
+ # auto-detect first
25
+ else:
26
+ detected=detect_provider(response)
27
+
28
+ if detected:
29
+ final_provider=detected
30
+
31
+ # fallback to config(future release)
32
+ elif config.get("provider"):
33
+ final_provider=config.get("provider")
34
+
35
+ else:
36
+ log_output("⚠️ Could not detect provider. Skipping cost tracking.")
37
+ return None
38
+
39
+
40
+ # Extract usage
41
+ try:
42
+ usage_data=extract_usage(response, final_provider)
43
+ except Exception:
44
+ log_output("⚠️ Failed to extract usage data. Skipping.")
45
+ return None
46
+
47
+ input_tokens=usage_data.get("input_tokens", 0)
48
+ output_tokens=usage_data.get("output_tokens", 0)
49
+ model=usage_data.get("model", "unknown")
50
+
51
+ if model == "unknown":
52
+ log_output("⚠️ Model not detected. Skipping cost calculation.")
53
+
54
+ update_state(0)
55
+
56
+ return {
57
+ "model":model,
58
+ "input_tokens":input_tokens,
59
+ "output_tokens":output_tokens,
60
+ "total_tokens": input_tokens+output_tokens,
61
+ "total_cost": 0
62
+ }
63
+
64
+
65
+ # Get pricing
66
+ try:
67
+ pricing=get_pricing(model)
68
+ except Exception:
69
+ log_output(f"⚠️ Pricing not found for model: {model}. Skipping cost.")
70
+
71
+ update_state(0)
72
+
73
+ return {
74
+ "model":model,
75
+ "input_tokens":input_tokens,
76
+ "output_tokens":output_tokens,
77
+ "total_tokens": input_tokens+output_tokens,
78
+ "total_cost": 0
79
+ }
80
+
81
+
82
+ # Calculate cost
83
+ try:
84
+ cost_data=calculate_cost(input_tokens, output_tokens, pricing)
85
+ total_cost=cost_data.get("total_cost", 0)
86
+ except Exception:
87
+ log_output("⚠️ Cost calculation failed. Setting cost to 0.")
88
+ total_cost=0
89
+
90
+
91
+ update_state(total_cost)
92
+
93
+ # Final data
94
+ final_data={
95
+ "model":model,
96
+ "input_tokens":input_tokens,
97
+ "output_tokens":output_tokens,
98
+ "total_tokens": input_tokens+output_tokens,
99
+ "total_cost": total_cost
100
+ }
101
+
102
+ # Format output
103
+ try:
104
+ output=format_output(final_data)
105
+ except Exception:
106
+ log_output(f"Tracked: {final_data}")
107
+
108
+ # Log output
109
+ # log_output(output)
110
+
111
+ return final_data
112
+
113
+ except Exception as e:
114
+ log_output(f"[Unexpected Error] {str(e)}")
115
+ return None
@@ -0,0 +1,15 @@
1
+ def detect_provider(response):
2
+ """
3
+ Detect provider based on response structure
4
+ """
5
+
6
+ # Gemini
7
+ if hasattr(response, "model_version"):
8
+ return "gemini"
9
+
10
+ # OpenAI
11
+ if hasattr(response, "usage"):
12
+ return "openai"
13
+
14
+ # Unknown provider
15
+ return None
@@ -0,0 +1,121 @@
1
+ Metadata-Version: 2.4
2
+ Name: costtracker
3
+ Version: 0.1.0
4
+ Summary: Simple and lightweight Python package to track LLM API cost
5
+ Author: Vishal Verma
6
+ License: MIT
7
+ Requires-Python: >=3.11
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: python-dotenv
11
+ Requires-Dist: google-genai
12
+ Dynamic: license-file
13
+
14
+ # πŸš€ costtracker
15
+
16
+ <p align="center">
17
+ <b>Track your LLM API cost in real time β€” with one line of code.</b>
18
+ </p>
19
+
20
+ <p align="center">
21
+ <img src="https://img.shields.io/badge/python-3.11+-blue.svg" />
22
+ <img src="https://img.shields.io/badge/status-active-success.svg" />
23
+ <img src="https://img.shields.io/badge/license-MIT-green.svg" />
24
+ <img src="https://img.shields.io/badge/built%20with-GenAI-purple.svg" />
25
+ </p>
26
+
27
+ ---
28
+
29
+ ## ✨ Why costtracker?
30
+
31
+ Building with LLMs is easy.
32
+ **Tracking cost isn’t.**
33
+
34
+ costtracker solves this by giving you:
35
+
36
+ > ⚑ Instant cost visibility for every API call
37
+
38
+ No dashboards. No setup. No complexity.
39
+
40
+ ---
41
+
42
+ ## πŸ”₯ Features
43
+
44
+ - πŸ“Š Real-time token + cost tracking
45
+ - ⚑ One-line integration (`track(response)`)
46
+ - 🧠 Works with OpenAI-compatible APIs for now (Gemini included)
47
+ - πŸͺΆ Lightweight & zero-config
48
+ - 🧱 Built for developers
49
+
50
+ ---
51
+
52
+ ## ⚑ Quick Demo
53
+
54
+ ```python
55
+ from costtracker import track
56
+
57
+ track(response)
58
+ ```
59
+
60
+ ## ⚑ Output
61
+ ```
62
+ Model: gemini-2.5-flash-lite
63
+ Tokens: 13 (input: 4, output: 9)
64
+ Cost: $0.000011
65
+ ```
66
+
67
+ ## πŸ“¦ Installation
68
+ ```
69
+ pip install -e .
70
+ ```
71
+
72
+ ## βš™οΈ Setup
73
+
74
+ Create a .env file in your root directory:
75
+ ```.env
76
+ GEMINI_API_KEY=your_api_key_here
77
+ ```
78
+
79
+ ## πŸ§ͺ Usage
80
+ ```python
81
+ import os
82
+ from dotenv import load_dotenv
83
+ from openai import OpenAI
84
+ from costtracker import track
85
+
86
+ load_dotenv()
87
+
88
+ client = OpenAI(
89
+ api_key=os.getenv("GEMINI_API_KEY"),
90
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
91
+ )
92
+
93
+ response = client.chat.completions.create(
94
+ model="gemini-2.5-flash-lite",
95
+ messages=[
96
+ {"role": "user", "content": "Hello there!"}
97
+ ]
98
+ )
99
+
100
+ track(response)
101
+ ```
102
+
103
+ ## πŸ’‘ Use Cases
104
+
105
+ - πŸ§‘β€πŸ’» AI app developers tracking API usage
106
+ - πŸš€ Startup teams controlling LLM costs
107
+ - πŸ“Š Experimentation with prompt optimization
108
+ - πŸ§ͺ GenAI project building
109
+
110
+ ## 🀝 Contributing
111
+
112
+ - Contributions are welcome!
113
+ - Feel free to open issues or submit PRs.
114
+
115
+ ## ⭐ If you like this project
116
+
117
+ **Give it a star**
118
+
119
+ ## πŸ“„ License
120
+
121
+ MIT License
@@ -0,0 +1,26 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ costtracker/__init__.py
5
+ costtracker/calculator.py
6
+ costtracker/cli.py
7
+ costtracker/config.py
8
+ costtracker/decorators.py
9
+ costtracker/exceptions.py
10
+ costtracker/formatter.py
11
+ costtracker/logger.py
12
+ costtracker/pricing.py
13
+ costtracker/pricing_loader.py
14
+ costtracker/state.py
15
+ costtracker/tracker.py
16
+ costtracker.egg-info/PKG-INFO
17
+ costtracker.egg-info/SOURCES.txt
18
+ costtracker.egg-info/dependency_links.txt
19
+ costtracker.egg-info/entry_points.txt
20
+ costtracker.egg-info/requires.txt
21
+ costtracker.egg-info/top_level.txt
22
+ costtracker/extractor/__init__.py
23
+ costtracker/extractor/base.py
24
+ costtracker/extractor/gemini_extractor.py
25
+ costtracker/extractor/openai_extractor.py
26
+ costtracker/utils/provider_detector.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ costtracker = costtracker.cli:cli
@@ -0,0 +1,2 @@
1
+ python-dotenv
2
+ google-genai
@@ -0,0 +1 @@
1
+ costtracker
@@ -0,0 +1,22 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "costtracker"
7
+ version = "0.1.0"
8
+ description = "Simple and lightweight Python package to track LLM API cost"
9
+ authors = [
10
+ {name="Vishal Verma"}
11
+ ]
12
+ readme = "README.md"
13
+ requires-python = ">=3.11"
14
+ license = {text = "MIT"}
15
+
16
+ dependencies = [
17
+ "python-dotenv",
18
+ "google-genai"
19
+ ]
20
+
21
+ [project.scripts]
22
+ costtracker="costtracker.cli:cli"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+