agentcost 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,29 @@
1
+ # AgentCost SDK - Environment Variables
2
+ # Copy this file to .env and update with your values
3
+ # ⚠️ NEVER commit the actual .env file to version control!
4
+
5
+ # ===========================================
6
+ # LLM API KEYS (for demos/testing)
7
+ # ===========================================
8
+
9
+ # OpenAI (https://platform.openai.com/api-keys)
10
+ OPENAI_API_KEY=sk-your-openai-key-here
11
+
12
+ # Groq (https://console.groq.com/keys) - FREE!
13
+ GROQ_API_KEY=gsk_your-groq-key-here
14
+
15
+ # Anthropic (https://console.anthropic.com/)
16
+ ANTHROPIC_API_KEY=sk-ant-your-anthropic-key-here
17
+
18
+ # ===========================================
19
+ # AgentCost Configuration
20
+ # ===========================================
21
+
22
+ # AgentCost backend URL
23
+ AGENTCOST_API_URL=http://localhost:8000
24
+
25
+ # Project API key (create via backend API or dashboard)
26
+ AGENTCOST_API_KEY=sk_your-agentcost-project-key-here
27
+
28
+ # Project ID (shown when creating project)
29
+ AGENTCOST_PROJECT_ID=your-project-uuid-here
@@ -0,0 +1,60 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+
27
+ # Virtual environments
28
+ venv/
29
+ ENV/
30
+ env/
31
+ .venv/
32
+
33
+ # IDE
34
+ .idea/
35
+ .vscode/
36
+ *.swp
37
+ *.swo
38
+ *~
39
+
40
+ # Environment variables - NEVER COMMIT
41
+ .env
42
+ .env.local
43
+ .env.*.local
44
+
45
+ # Logs
46
+ *.log
47
+ logs/
48
+
49
+ # Testing
50
+ .pytest_cache/
51
+ .coverage
52
+ htmlcov/
53
+ .tox/
54
+
55
+ # mypy
56
+ .mypy_cache/
57
+
58
+ # OS files
59
+ .DS_Store
60
+ Thumbs.db
@@ -0,0 +1,30 @@
1
+ # Contributing to AgentCost
2
+
3
+ Thank you for your interest in contributing!
4
+
5
+ ## How to Contribute
6
+
7
+ 1. Fork the repository
8
+ 2. Create a feature branch (`git checkout -b feature/amazing-feature`)
9
+ 3. Commit your changes (`git commit -m 'Add amazing feature'`)
10
+ 4. Push to the branch (`git push origin feature/amazing-feature`)
11
+ 5. Open a Pull Request
12
+
13
+ ## Development Setup
14
+ ```bash
15
+ git clone https://github.com/agentcost-ai/agentcost-sdk.git
16
+ cd agentcost-sdk
17
+ pip install -r requirements.txt
18
+ pytest # Run tests
19
+ ```
20
+
21
+ ## Code Style
22
+
23
+ We use:
24
+ - Black for code formatting
25
+ - Flake8 for linting
26
+ - Type hints for all functions
27
+
28
+ ## Issues
29
+
30
+ Check existing issues or create a new one before starting work.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 agentcost
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,3 @@
1
+ include README.md
2
+ include LICENSE
3
+ recursive-include agentcost *.py
@@ -0,0 +1,294 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentcost
3
+ Version: 0.1.0
4
+ Summary: Track LLM costs in LangChain applications with zero code changes
5
+ Home-page: https://github.com/agentcost-ai/agentcost-sdk
6
+ Author: Kushagra Agrawal
7
+ Author-email: kushagraagrawal128@gmail.com
8
+ License: MIT
9
+ Project-URL: Bug Tracker, https://github.com/agentcost-ai/agentcost-sdk/issues
10
+ Project-URL: Documentation, https://www.agentcost.tech/docs/sdk
11
+ Keywords: llm,langchain,openai,anthropic,cost-tracking,tokens,ai,monitoring,observability,langchain ai agents cost tracking monitoring
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
24
+ Requires-Python: >=3.8
25
+ Description-Content-Type: text/markdown
26
+ License-File: LICENSE
27
+ Requires-Dist: tiktoken>=0.5.0
28
+ Requires-Dist: requests>=2.28.0
29
+ Requires-Dist: langchain-core>=0.1.0
30
+ Provides-Extra: dev
31
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
32
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
33
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
34
+ Requires-Dist: black>=23.0.0; extra == "dev"
35
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
36
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
37
+ Provides-Extra: langchain
38
+ Requires-Dist: langchain>=0.1.0; extra == "langchain"
39
+ Requires-Dist: langchain-openai>=0.0.5; extra == "langchain"
40
+ Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain"
41
+ Dynamic: author
42
+ Dynamic: author-email
43
+ Dynamic: classifier
44
+ Dynamic: description
45
+ Dynamic: description-content-type
46
+ Dynamic: home-page
47
+ Dynamic: keywords
48
+ Dynamic: license
49
+ Dynamic: license-file
50
+ Dynamic: project-url
51
+ Dynamic: provides-extra
52
+ Dynamic: requires-dist
53
+ Dynamic: requires-python
54
+ Dynamic: summary
55
+
56
+ # AgentCost SDK
57
+
58
+ **Zero-friction LLM cost tracking for LangChain applications.**
59
+
60
+ ## Installation
61
+
62
+ ```bash
63
+ pip install agentcost
64
+ ```
65
+
66
+ Or install from source:
67
+
68
+ ```bash
69
+ cd agentcost-sdk
70
+ pip install -e .
71
+ ```
72
+
73
+ ## Quick Start
74
+
75
+ ```python
76
+ from agentcost import track_costs
77
+
78
+ # 2 lines to add cost tracking!
79
+ track_costs.init(api_key="your_api_key", project_id="my-project")
80
+
81
+ # Your existing code works unchanged
82
+ from langchain_openai import ChatOpenAI
83
+
84
+ llm = ChatOpenAI(model="gpt-4")
85
+ response = llm.invoke("Hello!") # Automatically tracked
86
+ ```
87
+
88
+ ## Features
89
+
90
+ - **Zero Code Changes**: Monkey patches LangChain - your code works as-is
91
+ - **Automatic Tracking**: Captures all `invoke()`, `ainvoke()`, `stream()`, `astream()` calls
92
+ - **Accurate Tokens**: Uses `tiktoken` for precise token counting
93
+ - **Real-Time Costs**: Calculates costs using up-to-date model pricing
94
+ - **Batched Sending**: Efficient network usage (size-based + time-based batching)
95
+ - **Rate Limiting**: Built-in rate limiter to protect your backend
96
+ - **Local Mode**: Test without a backend
97
+
98
+ ## Configuration
99
+
100
+ ```python
101
+ track_costs.init(
102
+ # Required for cloud mode
103
+ api_key="sk_...",
104
+ project_id="my-project",
105
+
106
+ # Optional settings
107
+ base_url="https://api.agentcost.tech", # Your backend URL
108
+ batch_size=10, # Events before auto-flush
109
+ flush_interval=5.0, # Seconds between flushes
110
+ debug=True, # Enable debug logging
111
+ default_agent_name="my-agent", # Default agent tag
112
+ local_mode=False, # Store locally (no backend)
113
+ enabled=True, # Enable/disable tracking
114
+
115
+ # Custom pricing (overrides defaults)
116
+ custom_pricing={
117
+ "my-custom-model": {"input": 0.001, "output": 0.002}
118
+ },
119
+
120
+ # Global metadata (attached to all events)
121
+ global_metadata={
122
+ "environment": "production",
123
+ "version": "1.0.0"
124
+ }
125
+ )
126
+ ```
127
+
128
+ ## Agent Tagging
129
+
130
+ Tag LLM calls by agent for granular analytics:
131
+
132
+ ```python
133
+ # Option 1: Set default agent
134
+ track_costs.set_agent_name("router-agent")
135
+
136
+ # Option 2: Context manager (recommended)
137
+ with track_costs.agent("technical-agent"):
138
+ llm.invoke("How do I fix this?") # Tagged as "technical-agent"
139
+
140
+ with track_costs.agent("billing-agent"):
141
+ llm.invoke("What's my balance?") # Tagged as "billing-agent"
142
+ ```
143
+
144
+ ## Metadata
145
+
146
+ Attach custom metadata for filtering/grouping:
147
+
148
+ ```python
149
+ # Persistent metadata
150
+ track_costs.add_metadata("user_id", "user_123")
151
+ track_costs.add_metadata("tenant_id", "acme_corp")
152
+
153
+ # Temporary metadata (context manager)
154
+ with track_costs.metadata(conversation_id="conv_456", step="routing"):
155
+ llm.invoke("Route this query")
156
+ ```
157
+
158
+ ## Local Testing
159
+
160
+ Test without running a backend:
161
+
162
+ ```python
163
+ track_costs.init(local_mode=True, debug=True)
164
+
165
+ # Make LLM calls
166
+ llm.invoke("Hello!")
167
+ llm.invoke("World!")
168
+
169
+ # Retrieve captured events
170
+ events = track_costs.get_local_events()
171
+ for event in events:
172
+ print(f"Model: {event['model']}")
173
+ print(f"Tokens: {event['total_tokens']}")
174
+ print(f"Cost: ${event['cost']:.6f}")
175
+ ```
176
+
177
+ ## Streaming Support
178
+
179
+ Streaming calls are automatically tracked:
180
+
181
+ ```python
182
+ # Sync streaming
183
+ for chunk in llm.stream("Tell me a story"):
184
+ print(chunk.content, end="")
185
+ # Event recorded after stream completes
186
+
187
+ # Async streaming
188
+ async for chunk in llm.astream("Tell me a story"):
189
+ print(chunk.content, end="")
190
+ # Event recorded after stream completes
191
+ ```
192
+
193
+ ## Event Structure
194
+
195
+ Each tracked event contains:
196
+
197
+ ```python
198
+ {
199
+ "agent_name": "my-agent",
200
+ "model": "gpt-4",
201
+ "input_tokens": 150,
202
+ "output_tokens": 80,
203
+ "total_tokens": 230,
204
+ "cost": 0.0093, # USD, real-time calculated
205
+ "latency_ms": 1234, # Measured latency
206
+ "timestamp": "2026-01-23T10:30:45.123Z",
207
+ "success": True,
208
+ "error": None,
209
+ "streaming": False,
210
+ "metadata": {"conversation_id": "conv_456"}
211
+ }
212
+ ```
213
+
214
+ ## Dynamic Pricing (Real-Time Updates)
215
+
216
+ The SDK automatically fetches the latest pricing from the backend. This means:
217
+
218
+ - **No code changes** when model prices change
219
+ - Pricing is **cached for 24 hours** (efficient)
220
+ - Falls back to built-in defaults if backend is unavailable
221
+
222
+ ### How It Works
223
+
224
+ ```python
225
+ # SDK automatically fetches pricing from backend
226
+ track_costs.init(
227
+ api_key="...",
228
+ project_id="...",
229
+ base_url="http://localhost:8000", # Pricing fetched from here
230
+ )
231
+
232
+ # Prices are fetched once and cached
233
+ # GET http://localhost:8000/v1/pricing → {"pricing": {"gpt-4": {"input": 0.03, ...}}}
234
+ ```
235
+
236
+ ### Manually Update Pricing
237
+
238
+ ```python
239
+ from agentcost.cost_calculator import refresh_pricing, update_pricing
240
+
241
+ # Force refresh from backend
242
+ refresh_pricing()
243
+
244
+ # Or manually set pricing (doesn't require backend)
245
+ update_pricing({
246
+ "my-custom-model": {"input": 0.001, "output": 0.002}
247
+ })
248
+ ```
249
+
250
+ ### Backend Pricing API
251
+
252
+ ```bash
253
+ # Get all pricing
254
+ curl http://localhost:8000/v1/pricing
255
+
256
+ # Get specific model
257
+ curl http://localhost:8000/v1/pricing/gpt-4
258
+
259
+ # Update pricing (admin)
260
+ curl -X POST http://localhost:8000/v1/pricing \
261
+ -H "Content-Type: application/json" \
262
+ -d '{"gpt-4": {"input": 0.025, "output": 0.05}}'
263
+ ```
264
+
265
+ ## Supported Models (30+)
266
+
267
+ | Provider | Models |
268
+ | --------- | ------------------------------------------------------------------- |
269
+ | OpenAI | gpt-4, gpt-4-turbo, gpt-4o, gpt-4o-mini, gpt-3.5-turbo, o1, o1-mini |
270
+ | Anthropic | claude-3-opus/sonnet/haiku, claude-3.5-sonnet/haiku |
271
+ | Google | gemini-pro, gemini-1.5-pro/flash, gemini-2.0-flash |
272
+ | Groq | llama-3.1-8b/70b, llama-3.3-70b, mixtral-8x7b |
273
+ | DeepSeek | deepseek-chat, deepseek-coder, deepseek-reasoner |
274
+ | Cohere | command, command-r, command-r-plus |
275
+ | Mistral | mistral-small/medium/large |
276
+
277
+ ## Statistics
278
+
279
+ ```python
280
+ stats = track_costs.get_stats()
281
+ print(f"Events sent: {stats['batcher']['events_sent']}")
282
+ print(f"Batches sent: {stats['batcher']['batches_sent']}")
283
+ ```
284
+
285
+ ## Graceful Shutdown
286
+
287
+ ```python
288
+ track_costs.flush() # Send pending events
289
+ track_costs.shutdown() # Full shutdown
290
+ ```
291
+
292
+ ## License
293
+
294
+ MIT License
@@ -0,0 +1,239 @@
1
+ # AgentCost SDK
2
+
3
+ **Zero-friction LLM cost tracking for LangChain applications.**
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install agentcost
9
+ ```
10
+
11
+ Or install from source:
12
+
13
+ ```bash
14
+ cd agentcost-sdk
15
+ pip install -e .
16
+ ```
17
+
18
+ ## Quick Start
19
+
20
+ ```python
21
+ from agentcost import track_costs
22
+
23
+ # 2 lines to add cost tracking!
24
+ track_costs.init(api_key="your_api_key", project_id="my-project")
25
+
26
+ # Your existing code works unchanged
27
+ from langchain_openai import ChatOpenAI
28
+
29
+ llm = ChatOpenAI(model="gpt-4")
30
+ response = llm.invoke("Hello!") # Automatically tracked
31
+ ```
32
+
33
+ ## Features
34
+
35
+ - **Zero Code Changes**: Monkey patches LangChain - your code works as-is
36
+ - **Automatic Tracking**: Captures all `invoke()`, `ainvoke()`, `stream()`, `astream()` calls
37
+ - **Accurate Tokens**: Uses `tiktoken` for precise token counting
38
+ - **Real-Time Costs**: Calculates costs using up-to-date model pricing
39
+ - **Batched Sending**: Efficient network usage (size-based + time-based batching)
40
+ - **Rate Limiting**: Built-in rate limiter to protect your backend
41
+ - **Local Mode**: Test without a backend
42
+
43
+ ## Configuration
44
+
45
+ ```python
46
+ track_costs.init(
47
+ # Required for cloud mode
48
+ api_key="sk_...",
49
+ project_id="my-project",
50
+
51
+ # Optional settings
52
+ base_url="https://api.agentcost.tech", # Your backend URL
53
+ batch_size=10, # Events before auto-flush
54
+ flush_interval=5.0, # Seconds between flushes
55
+ debug=True, # Enable debug logging
56
+ default_agent_name="my-agent", # Default agent tag
57
+ local_mode=False, # Store locally (no backend)
58
+ enabled=True, # Enable/disable tracking
59
+
60
+ # Custom pricing (overrides defaults)
61
+ custom_pricing={
62
+ "my-custom-model": {"input": 0.001, "output": 0.002}
63
+ },
64
+
65
+ # Global metadata (attached to all events)
66
+ global_metadata={
67
+ "environment": "production",
68
+ "version": "1.0.0"
69
+ }
70
+ )
71
+ ```
72
+
73
+ ## Agent Tagging
74
+
75
+ Tag LLM calls by agent for granular analytics:
76
+
77
+ ```python
78
+ # Option 1: Set default agent
79
+ track_costs.set_agent_name("router-agent")
80
+
81
+ # Option 2: Context manager (recommended)
82
+ with track_costs.agent("technical-agent"):
83
+ llm.invoke("How do I fix this?") # Tagged as "technical-agent"
84
+
85
+ with track_costs.agent("billing-agent"):
86
+ llm.invoke("What's my balance?") # Tagged as "billing-agent"
87
+ ```
88
+
89
+ ## Metadata
90
+
91
+ Attach custom metadata for filtering/grouping:
92
+
93
+ ```python
94
+ # Persistent metadata
95
+ track_costs.add_metadata("user_id", "user_123")
96
+ track_costs.add_metadata("tenant_id", "acme_corp")
97
+
98
+ # Temporary metadata (context manager)
99
+ with track_costs.metadata(conversation_id="conv_456", step="routing"):
100
+ llm.invoke("Route this query")
101
+ ```
102
+
103
+ ## Local Testing
104
+
105
+ Test without running a backend:
106
+
107
+ ```python
108
+ track_costs.init(local_mode=True, debug=True)
109
+
110
+ # Make LLM calls
111
+ llm.invoke("Hello!")
112
+ llm.invoke("World!")
113
+
114
+ # Retrieve captured events
115
+ events = track_costs.get_local_events()
116
+ for event in events:
117
+ print(f"Model: {event['model']}")
118
+ print(f"Tokens: {event['total_tokens']}")
119
+ print(f"Cost: ${event['cost']:.6f}")
120
+ ```
121
+
122
+ ## Streaming Support
123
+
124
+ Streaming calls are automatically tracked:
125
+
126
+ ```python
127
+ # Sync streaming
128
+ for chunk in llm.stream("Tell me a story"):
129
+ print(chunk.content, end="")
130
+ # Event recorded after stream completes
131
+
132
+ # Async streaming
133
+ async for chunk in llm.astream("Tell me a story"):
134
+ print(chunk.content, end="")
135
+ # Event recorded after stream completes
136
+ ```
137
+
138
+ ## Event Structure
139
+
140
+ Each tracked event contains:
141
+
142
+ ```python
143
+ {
144
+ "agent_name": "my-agent",
145
+ "model": "gpt-4",
146
+ "input_tokens": 150,
147
+ "output_tokens": 80,
148
+ "total_tokens": 230,
149
+ "cost": 0.0093, # USD, real-time calculated
150
+ "latency_ms": 1234, # Measured latency
151
+ "timestamp": "2026-01-23T10:30:45.123Z",
152
+ "success": True,
153
+ "error": None,
154
+ "streaming": False,
155
+ "metadata": {"conversation_id": "conv_456"}
156
+ }
157
+ ```
158
+
159
+ ## Dynamic Pricing (Real-Time Updates)
160
+
161
+ The SDK automatically fetches the latest pricing from the backend. This means:
162
+
163
+ - **No code changes** when model prices change
164
+ - Pricing is **cached for 24 hours** (efficient)
165
+ - Falls back to built-in defaults if backend is unavailable
166
+
167
+ ### How It Works
168
+
169
+ ```python
170
+ # SDK automatically fetches pricing from backend
171
+ track_costs.init(
172
+ api_key="...",
173
+ project_id="...",
174
+ base_url="http://localhost:8000", # Pricing fetched from here
175
+ )
176
+
177
+ # Prices are fetched once and cached
178
+ # GET http://localhost:8000/v1/pricing → {"pricing": {"gpt-4": {"input": 0.03, ...}}}
179
+ ```
180
+
181
+ ### Manually Update Pricing
182
+
183
+ ```python
184
+ from agentcost.cost_calculator import refresh_pricing, update_pricing
185
+
186
+ # Force refresh from backend
187
+ refresh_pricing()
188
+
189
+ # Or manually set pricing (doesn't require backend)
190
+ update_pricing({
191
+ "my-custom-model": {"input": 0.001, "output": 0.002}
192
+ })
193
+ ```
194
+
195
+ ### Backend Pricing API
196
+
197
+ ```bash
198
+ # Get all pricing
199
+ curl http://localhost:8000/v1/pricing
200
+
201
+ # Get specific model
202
+ curl http://localhost:8000/v1/pricing/gpt-4
203
+
204
+ # Update pricing (admin)
205
+ curl -X POST http://localhost:8000/v1/pricing \
206
+ -H "Content-Type: application/json" \
207
+ -d '{"gpt-4": {"input": 0.025, "output": 0.05}}'
208
+ ```
209
+
210
+ ## Supported Models (30+)
211
+
212
+ | Provider | Models |
213
+ | --------- | ------------------------------------------------------------------- |
214
+ | OpenAI | gpt-4, gpt-4-turbo, gpt-4o, gpt-4o-mini, gpt-3.5-turbo, o1, o1-mini |
215
+ | Anthropic | claude-3-opus/sonnet/haiku, claude-3.5-sonnet/haiku |
216
+ | Google | gemini-pro, gemini-1.5-pro/flash, gemini-2.0-flash |
217
+ | Groq | llama-3.1-8b/70b, llama-3.3-70b, mixtral-8x7b |
218
+ | DeepSeek | deepseek-chat, deepseek-coder, deepseek-reasoner |
219
+ | Cohere | command, command-r, command-r-plus |
220
+ | Mistral | mistral-small/medium/large |
221
+
222
+ ## Statistics
223
+
224
+ ```python
225
+ stats = track_costs.get_stats()
226
+ print(f"Events sent: {stats['batcher']['events_sent']}")
227
+ print(f"Batches sent: {stats['batcher']['batches_sent']}")
228
+ ```
229
+
230
+ ## Graceful Shutdown
231
+
232
+ ```python
233
+ track_costs.flush() # Send pending events
234
+ track_costs.shutdown() # Full shutdown
235
+ ```
236
+
237
+ ## License
238
+
239
+ MIT License
@@ -0,0 +1 @@
1
+ 0.1.0