optexity-browser-use 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. browser_use/__init__.py +157 -0
  2. browser_use/actor/__init__.py +11 -0
  3. browser_use/actor/element.py +1175 -0
  4. browser_use/actor/mouse.py +134 -0
  5. browser_use/actor/page.py +561 -0
  6. browser_use/actor/playground/flights.py +41 -0
  7. browser_use/actor/playground/mixed_automation.py +54 -0
  8. browser_use/actor/playground/playground.py +236 -0
  9. browser_use/actor/utils.py +176 -0
  10. browser_use/agent/cloud_events.py +282 -0
  11. browser_use/agent/gif.py +424 -0
  12. browser_use/agent/judge.py +170 -0
  13. browser_use/agent/message_manager/service.py +473 -0
  14. browser_use/agent/message_manager/utils.py +52 -0
  15. browser_use/agent/message_manager/views.py +98 -0
  16. browser_use/agent/prompts.py +413 -0
  17. browser_use/agent/service.py +2316 -0
  18. browser_use/agent/system_prompt.md +185 -0
  19. browser_use/agent/system_prompt_flash.md +10 -0
  20. browser_use/agent/system_prompt_no_thinking.md +183 -0
  21. browser_use/agent/views.py +743 -0
  22. browser_use/browser/__init__.py +41 -0
  23. browser_use/browser/cloud/cloud.py +203 -0
  24. browser_use/browser/cloud/views.py +89 -0
  25. browser_use/browser/events.py +578 -0
  26. browser_use/browser/profile.py +1158 -0
  27. browser_use/browser/python_highlights.py +548 -0
  28. browser_use/browser/session.py +3225 -0
  29. browser_use/browser/session_manager.py +399 -0
  30. browser_use/browser/video_recorder.py +162 -0
  31. browser_use/browser/views.py +200 -0
  32. browser_use/browser/watchdog_base.py +260 -0
  33. browser_use/browser/watchdogs/__init__.py +0 -0
  34. browser_use/browser/watchdogs/aboutblank_watchdog.py +253 -0
  35. browser_use/browser/watchdogs/crash_watchdog.py +335 -0
  36. browser_use/browser/watchdogs/default_action_watchdog.py +2729 -0
  37. browser_use/browser/watchdogs/dom_watchdog.py +817 -0
  38. browser_use/browser/watchdogs/downloads_watchdog.py +1277 -0
  39. browser_use/browser/watchdogs/local_browser_watchdog.py +461 -0
  40. browser_use/browser/watchdogs/permissions_watchdog.py +43 -0
  41. browser_use/browser/watchdogs/popups_watchdog.py +143 -0
  42. browser_use/browser/watchdogs/recording_watchdog.py +126 -0
  43. browser_use/browser/watchdogs/screenshot_watchdog.py +62 -0
  44. browser_use/browser/watchdogs/security_watchdog.py +280 -0
  45. browser_use/browser/watchdogs/storage_state_watchdog.py +335 -0
  46. browser_use/cli.py +2359 -0
  47. browser_use/code_use/__init__.py +16 -0
  48. browser_use/code_use/formatting.py +192 -0
  49. browser_use/code_use/namespace.py +665 -0
  50. browser_use/code_use/notebook_export.py +276 -0
  51. browser_use/code_use/service.py +1340 -0
  52. browser_use/code_use/system_prompt.md +574 -0
  53. browser_use/code_use/utils.py +150 -0
  54. browser_use/code_use/views.py +171 -0
  55. browser_use/config.py +505 -0
  56. browser_use/controller/__init__.py +3 -0
  57. browser_use/dom/enhanced_snapshot.py +161 -0
  58. browser_use/dom/markdown_extractor.py +169 -0
  59. browser_use/dom/playground/extraction.py +312 -0
  60. browser_use/dom/playground/multi_act.py +32 -0
  61. browser_use/dom/serializer/clickable_elements.py +200 -0
  62. browser_use/dom/serializer/code_use_serializer.py +287 -0
  63. browser_use/dom/serializer/eval_serializer.py +478 -0
  64. browser_use/dom/serializer/html_serializer.py +212 -0
  65. browser_use/dom/serializer/paint_order.py +197 -0
  66. browser_use/dom/serializer/serializer.py +1170 -0
  67. browser_use/dom/service.py +825 -0
  68. browser_use/dom/utils.py +129 -0
  69. browser_use/dom/views.py +906 -0
  70. browser_use/exceptions.py +5 -0
  71. browser_use/filesystem/__init__.py +0 -0
  72. browser_use/filesystem/file_system.py +619 -0
  73. browser_use/init_cmd.py +376 -0
  74. browser_use/integrations/gmail/__init__.py +24 -0
  75. browser_use/integrations/gmail/actions.py +115 -0
  76. browser_use/integrations/gmail/service.py +225 -0
  77. browser_use/llm/__init__.py +155 -0
  78. browser_use/llm/anthropic/chat.py +242 -0
  79. browser_use/llm/anthropic/serializer.py +312 -0
  80. browser_use/llm/aws/__init__.py +36 -0
  81. browser_use/llm/aws/chat_anthropic.py +242 -0
  82. browser_use/llm/aws/chat_bedrock.py +289 -0
  83. browser_use/llm/aws/serializer.py +257 -0
  84. browser_use/llm/azure/chat.py +91 -0
  85. browser_use/llm/base.py +57 -0
  86. browser_use/llm/browser_use/__init__.py +3 -0
  87. browser_use/llm/browser_use/chat.py +201 -0
  88. browser_use/llm/cerebras/chat.py +193 -0
  89. browser_use/llm/cerebras/serializer.py +109 -0
  90. browser_use/llm/deepseek/chat.py +212 -0
  91. browser_use/llm/deepseek/serializer.py +109 -0
  92. browser_use/llm/exceptions.py +29 -0
  93. browser_use/llm/google/__init__.py +3 -0
  94. browser_use/llm/google/chat.py +542 -0
  95. browser_use/llm/google/serializer.py +120 -0
  96. browser_use/llm/groq/chat.py +229 -0
  97. browser_use/llm/groq/parser.py +158 -0
  98. browser_use/llm/groq/serializer.py +159 -0
  99. browser_use/llm/messages.py +238 -0
  100. browser_use/llm/models.py +271 -0
  101. browser_use/llm/oci_raw/__init__.py +10 -0
  102. browser_use/llm/oci_raw/chat.py +443 -0
  103. browser_use/llm/oci_raw/serializer.py +229 -0
  104. browser_use/llm/ollama/chat.py +97 -0
  105. browser_use/llm/ollama/serializer.py +143 -0
  106. browser_use/llm/openai/chat.py +264 -0
  107. browser_use/llm/openai/like.py +15 -0
  108. browser_use/llm/openai/serializer.py +165 -0
  109. browser_use/llm/openrouter/chat.py +211 -0
  110. browser_use/llm/openrouter/serializer.py +26 -0
  111. browser_use/llm/schema.py +176 -0
  112. browser_use/llm/views.py +48 -0
  113. browser_use/logging_config.py +330 -0
  114. browser_use/mcp/__init__.py +18 -0
  115. browser_use/mcp/__main__.py +12 -0
  116. browser_use/mcp/client.py +544 -0
  117. browser_use/mcp/controller.py +264 -0
  118. browser_use/mcp/server.py +1114 -0
  119. browser_use/observability.py +204 -0
  120. browser_use/py.typed +0 -0
  121. browser_use/sandbox/__init__.py +41 -0
  122. browser_use/sandbox/sandbox.py +637 -0
  123. browser_use/sandbox/views.py +132 -0
  124. browser_use/screenshots/__init__.py +1 -0
  125. browser_use/screenshots/service.py +52 -0
  126. browser_use/sync/__init__.py +6 -0
  127. browser_use/sync/auth.py +357 -0
  128. browser_use/sync/service.py +161 -0
  129. browser_use/telemetry/__init__.py +51 -0
  130. browser_use/telemetry/service.py +112 -0
  131. browser_use/telemetry/views.py +101 -0
  132. browser_use/tokens/__init__.py +0 -0
  133. browser_use/tokens/custom_pricing.py +24 -0
  134. browser_use/tokens/mappings.py +4 -0
  135. browser_use/tokens/service.py +580 -0
  136. browser_use/tokens/views.py +108 -0
  137. browser_use/tools/registry/service.py +572 -0
  138. browser_use/tools/registry/views.py +174 -0
  139. browser_use/tools/service.py +1675 -0
  140. browser_use/tools/utils.py +82 -0
  141. browser_use/tools/views.py +100 -0
  142. browser_use/utils.py +670 -0
  143. optexity_browser_use-0.9.5.dist-info/METADATA +344 -0
  144. optexity_browser_use-0.9.5.dist-info/RECORD +147 -0
  145. optexity_browser_use-0.9.5.dist-info/WHEEL +4 -0
  146. optexity_browser_use-0.9.5.dist-info/entry_points.txt +3 -0
  147. optexity_browser_use-0.9.5.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,580 @@
1
+ """
2
+ Token cost service that tracks LLM token usage and costs.
3
+
4
+ Fetches pricing data from LiteLLM repository and caches it for 1 day.
5
+ Automatically tracks token usage when LLMs are registered and invoked.
6
+ """
7
+
8
+ import asyncio
9
+ import logging
10
+ import os
11
+ from datetime import datetime, timedelta
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ import anyio
16
+ import httpx
17
+ from dotenv import load_dotenv
18
+
19
+ from browser_use.llm.base import BaseChatModel
20
+ from browser_use.llm.views import ChatInvokeUsage
21
+ from browser_use.tokens.custom_pricing import CUSTOM_MODEL_PRICING
22
+ from browser_use.tokens.mappings import MODEL_TO_LITELLM
23
+ from browser_use.tokens.views import (
24
+ CachedPricingData,
25
+ ModelPricing,
26
+ ModelUsageStats,
27
+ ModelUsageTokens,
28
+ TokenCostCalculated,
29
+ TokenUsageEntry,
30
+ UsageSummary,
31
+ )
32
+
33
+ load_dotenv()
34
+
35
+ from browser_use.config import CONFIG
36
+
37
+ logger = logging.getLogger(__name__)
38
+ cost_logger = logging.getLogger('cost')
39
+
40
+
41
+ def xdg_cache_home() -> Path:
42
+ default = Path.home() / '.cache'
43
+ if CONFIG.XDG_CACHE_HOME and (path := Path(CONFIG.XDG_CACHE_HOME)).is_absolute():
44
+ return path
45
+ return default
46
+
47
+
48
+ class TokenCost:
49
+ """Service for tracking token usage and calculating costs"""
50
+
51
+ CACHE_DIR_NAME = 'browser_use/token_cost'
52
+ CACHE_DURATION = timedelta(days=1)
53
+ PRICING_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'
54
+
55
+ def __init__(self, include_cost: bool = False):
56
+ self.include_cost = include_cost or os.getenv('BROWSER_USE_CALCULATE_COST', 'false').lower() == 'true'
57
+
58
+ self.usage_history: list[TokenUsageEntry] = []
59
+ self.registered_llms: dict[str, BaseChatModel] = {}
60
+ self._pricing_data: dict[str, Any] | None = None
61
+ self._initialized = False
62
+ self._cache_dir = xdg_cache_home() / self.CACHE_DIR_NAME
63
+
64
+ async def initialize(self) -> None:
65
+ """Initialize the service by loading pricing data"""
66
+ if not self._initialized:
67
+ if self.include_cost:
68
+ await self._load_pricing_data()
69
+ self._initialized = True
70
+
71
+ async def _load_pricing_data(self) -> None:
72
+ """Load pricing data from cache or fetch from GitHub"""
73
+ # Try to find a valid cache file
74
+ cache_file = await self._find_valid_cache()
75
+
76
+ if cache_file:
77
+ await self._load_from_cache(cache_file)
78
+ else:
79
+ await self._fetch_and_cache_pricing_data()
80
+
81
+ async def _find_valid_cache(self) -> Path | None:
82
+ """Find the most recent valid cache file"""
83
+ try:
84
+ # Ensure cache directory exists
85
+ self._cache_dir.mkdir(parents=True, exist_ok=True)
86
+
87
+ # List all JSON files in the cache directory
88
+ cache_files = list(self._cache_dir.glob('*.json'))
89
+
90
+ if not cache_files:
91
+ return None
92
+
93
+ # Sort by modification time (most recent first)
94
+ cache_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
95
+
96
+ # Check each file until we find a valid one
97
+ for cache_file in cache_files:
98
+ if await self._is_cache_valid(cache_file):
99
+ return cache_file
100
+ else:
101
+ # Clean up old cache files
102
+ try:
103
+ os.remove(cache_file)
104
+ except Exception:
105
+ pass
106
+
107
+ return None
108
+ except Exception:
109
+ return None
110
+
111
+ async def _is_cache_valid(self, cache_file: Path) -> bool:
112
+ """Check if a specific cache file is valid and not expired"""
113
+ try:
114
+ if not cache_file.exists():
115
+ return False
116
+
117
+ # Read the cached data
118
+ cached = CachedPricingData.model_validate_json(await anyio.Path(cache_file).read_text())
119
+
120
+ # Check if cache is still valid
121
+ return datetime.now() - cached.timestamp < self.CACHE_DURATION
122
+ except Exception:
123
+ return False
124
+
125
+ async def _load_from_cache(self, cache_file: Path) -> None:
126
+ """Load pricing data from a specific cache file"""
127
+ try:
128
+ content = await anyio.Path(cache_file).read_text()
129
+ cached = CachedPricingData.model_validate_json(content)
130
+ self._pricing_data = cached.data
131
+ except Exception as e:
132
+ logger.debug(f'Error loading cached pricing data from {cache_file}: {e}')
133
+ # Fall back to fetching
134
+ await self._fetch_and_cache_pricing_data()
135
+
136
+ async def _fetch_and_cache_pricing_data(self) -> None:
137
+ """Fetch pricing data from LiteLLM GitHub and cache it with timestamp"""
138
+ try:
139
+ async with httpx.AsyncClient() as client:
140
+ response = await client.get(self.PRICING_URL, timeout=30)
141
+ response.raise_for_status()
142
+
143
+ self._pricing_data = response.json()
144
+
145
+ # Create cache object with timestamp
146
+ cached = CachedPricingData(timestamp=datetime.now(), data=self._pricing_data or {})
147
+
148
+ # Ensure cache directory exists
149
+ self._cache_dir.mkdir(parents=True, exist_ok=True)
150
+
151
+ # Create cache file with timestamp in filename
152
+ timestamp_str = datetime.now().strftime('%Y%m%d_%H%M%S')
153
+ cache_file = self._cache_dir / f'pricing_{timestamp_str}.json'
154
+
155
+ await anyio.Path(cache_file).write_text(cached.model_dump_json(indent=2))
156
+ except Exception as e:
157
+ logger.debug(f'Error fetching pricing data: {e}')
158
+ # Fall back to empty pricing data
159
+ self._pricing_data = {}
160
+
161
+ async def get_model_pricing(self, model_name: str) -> ModelPricing | None:
162
+ """Get pricing information for a specific model"""
163
+ # Ensure we're initialized
164
+ if not self._initialized:
165
+ await self.initialize()
166
+
167
+ # Check custom pricing first
168
+ if model_name in CUSTOM_MODEL_PRICING:
169
+ data = CUSTOM_MODEL_PRICING[model_name]
170
+ return ModelPricing(
171
+ model=model_name,
172
+ input_cost_per_token=data.get('input_cost_per_token'),
173
+ output_cost_per_token=data.get('output_cost_per_token'),
174
+ max_tokens=data.get('max_tokens'),
175
+ max_input_tokens=data.get('max_input_tokens'),
176
+ max_output_tokens=data.get('max_output_tokens'),
177
+ cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
178
+ cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
179
+ )
180
+
181
+ # Map model name to LiteLLM model name if needed
182
+ litellm_model_name = MODEL_TO_LITELLM.get(model_name, model_name)
183
+
184
+ if not self._pricing_data or litellm_model_name not in self._pricing_data:
185
+ return None
186
+
187
+ data = self._pricing_data[litellm_model_name]
188
+ return ModelPricing(
189
+ model=model_name,
190
+ input_cost_per_token=data.get('input_cost_per_token'),
191
+ output_cost_per_token=data.get('output_cost_per_token'),
192
+ max_tokens=data.get('max_tokens'),
193
+ max_input_tokens=data.get('max_input_tokens'),
194
+ max_output_tokens=data.get('max_output_tokens'),
195
+ cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
196
+ cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
197
+ )
198
+
199
+ async def calculate_cost(self, model: str, usage: ChatInvokeUsage) -> TokenCostCalculated | None:
200
+ if not self.include_cost:
201
+ return None
202
+
203
+ data = await self.get_model_pricing(model)
204
+ if data is None:
205
+ return None
206
+
207
+ uncached_prompt_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
208
+
209
+ return TokenCostCalculated(
210
+ new_prompt_tokens=usage.prompt_tokens,
211
+ new_prompt_cost=uncached_prompt_tokens * (data.input_cost_per_token or 0),
212
+ # Cached tokens
213
+ prompt_read_cached_tokens=usage.prompt_cached_tokens,
214
+ prompt_read_cached_cost=usage.prompt_cached_tokens * data.cache_read_input_token_cost
215
+ if usage.prompt_cached_tokens and data.cache_read_input_token_cost
216
+ else None,
217
+ # Cache creation tokens
218
+ prompt_cached_creation_tokens=usage.prompt_cache_creation_tokens,
219
+ prompt_cache_creation_cost=usage.prompt_cache_creation_tokens * data.cache_creation_input_token_cost
220
+ if data.cache_creation_input_token_cost and usage.prompt_cache_creation_tokens
221
+ else None,
222
+ # Completion tokens
223
+ completion_tokens=usage.completion_tokens,
224
+ completion_cost=usage.completion_tokens * float(data.output_cost_per_token or 0),
225
+ )
226
+
227
+ def add_usage(self, model: str, usage: ChatInvokeUsage) -> TokenUsageEntry:
228
+ """Add token usage entry to history (without calculating cost)"""
229
+ entry = TokenUsageEntry(
230
+ model=model,
231
+ timestamp=datetime.now(),
232
+ usage=usage,
233
+ )
234
+
235
+ self.usage_history.append(entry)
236
+
237
+ return entry
238
+
239
+ # async def _log_non_usage_llm(self, llm: BaseChatModel) -> None:
240
+ # """Log non-usage to the logger"""
241
+ # C_CYAN = '\033[96m'
242
+ # C_RESET = '\033[0m'
243
+
244
+ # cost_logger.debug(f'🧠 llm : {C_CYAN}{llm.model}{C_RESET} (no usage found)')
245
+
246
+ async def _log_usage(self, model: str, usage: TokenUsageEntry) -> None:
247
+ """Log usage to the logger"""
248
+ if not self._initialized:
249
+ await self.initialize()
250
+
251
+ # ANSI color codes
252
+ C_CYAN = '\033[96m'
253
+ C_YELLOW = '\033[93m'
254
+ C_GREEN = '\033[92m'
255
+ C_BLUE = '\033[94m'
256
+ C_RESET = '\033[0m'
257
+
258
+ # Always get cost breakdown for token details (even if not showing costs)
259
+ cost = await self.calculate_cost(model, usage.usage)
260
+
261
+ # Build input tokens breakdown
262
+ input_part = self._build_input_tokens_display(usage.usage, cost)
263
+
264
+ # Build output tokens display
265
+ completion_tokens_fmt = self._format_tokens(usage.usage.completion_tokens)
266
+ if self.include_cost and cost and cost.completion_cost > 0:
267
+ output_part = f'📤 {C_GREEN}{completion_tokens_fmt} (${cost.completion_cost:.4f}){C_RESET}'
268
+ else:
269
+ output_part = f'📤 {C_GREEN}{completion_tokens_fmt}{C_RESET}'
270
+
271
+ cost_logger.debug(f'🧠 {C_CYAN}{model}{C_RESET} | {input_part} | {output_part}')
272
+
273
+ def _build_input_tokens_display(self, usage: ChatInvokeUsage, cost: TokenCostCalculated | None) -> str:
274
+ """Build a clear display of input tokens breakdown with emojis and optional costs"""
275
+ C_YELLOW = '\033[93m'
276
+ C_BLUE = '\033[94m'
277
+ C_RESET = '\033[0m'
278
+
279
+ parts = []
280
+
281
+ # Always show token breakdown if we have cache information, regardless of cost tracking
282
+ if usage.prompt_cached_tokens or usage.prompt_cache_creation_tokens:
283
+ # Calculate actual new tokens (non-cached)
284
+ new_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
285
+
286
+ if new_tokens > 0:
287
+ new_tokens_fmt = self._format_tokens(new_tokens)
288
+ if self.include_cost and cost and cost.new_prompt_cost > 0:
289
+ parts.append(f'🆕 {C_YELLOW}{new_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
290
+ else:
291
+ parts.append(f'🆕 {C_YELLOW}{new_tokens_fmt}{C_RESET}')
292
+
293
+ if usage.prompt_cached_tokens:
294
+ cached_tokens_fmt = self._format_tokens(usage.prompt_cached_tokens)
295
+ if self.include_cost and cost and cost.prompt_read_cached_cost:
296
+ parts.append(f'💾 {C_BLUE}{cached_tokens_fmt} (${cost.prompt_read_cached_cost:.4f}){C_RESET}')
297
+ else:
298
+ parts.append(f'💾 {C_BLUE}{cached_tokens_fmt}{C_RESET}')
299
+
300
+ if usage.prompt_cache_creation_tokens:
301
+ creation_tokens_fmt = self._format_tokens(usage.prompt_cache_creation_tokens)
302
+ if self.include_cost and cost and cost.prompt_cache_creation_cost:
303
+ parts.append(f'🔧 {C_BLUE}{creation_tokens_fmt} (${cost.prompt_cache_creation_cost:.4f}){C_RESET}')
304
+ else:
305
+ parts.append(f'🔧 {C_BLUE}{creation_tokens_fmt}{C_RESET}')
306
+
307
+ if not parts:
308
+ # Fallback to simple display when no cache information available
309
+ total_tokens_fmt = self._format_tokens(usage.prompt_tokens)
310
+ if self.include_cost and cost and cost.new_prompt_cost > 0:
311
+ parts.append(f'📥 {C_YELLOW}{total_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
312
+ else:
313
+ parts.append(f'📥 {C_YELLOW}{total_tokens_fmt}{C_RESET}')
314
+
315
+ return ' + '.join(parts)
316
+
317
+ def register_llm(self, llm: BaseChatModel) -> BaseChatModel:
318
+ """
319
+ Register an LLM to automatically track its token usage
320
+
321
+ @dev Guarantees that the same instance is not registered multiple times
322
+ """
323
+ # Use instance ID as key to avoid collisions between multiple instances
324
+ instance_id = str(id(llm))
325
+
326
+ # Check if this exact instance is already registered
327
+ if instance_id in self.registered_llms:
328
+ logger.debug(f'LLM instance {instance_id} ({llm.provider}_{llm.model}) is already registered')
329
+ return llm
330
+
331
+ self.registered_llms[instance_id] = llm
332
+
333
+ # Store the original method
334
+ original_ainvoke = llm.ainvoke
335
+ # Store reference to self for use in the closure
336
+ token_cost_service = self
337
+
338
+ # Create a wrapped version that tracks usage
339
+ async def tracked_ainvoke(messages, output_format=None, **kwargs):
340
+ # Call the original method, passing through any additional kwargs
341
+ result = await original_ainvoke(messages, output_format, **kwargs)
342
+
343
+ # Track usage if available (no await needed since add_usage is now sync)
344
+ # Use llm.model instead of llm.name for consistency with get_usage_tokens_for_model()
345
+ if result.usage:
346
+ usage = token_cost_service.add_usage(llm.model, result.usage)
347
+
348
+ logger.debug(f'Token cost service: {usage}')
349
+
350
+ asyncio.create_task(token_cost_service._log_usage(llm.model, usage))
351
+
352
+ # else:
353
+ # await token_cost_service._log_non_usage_llm(llm)
354
+
355
+ return result
356
+
357
+ # Replace the method with our tracked version
358
+ # Using setattr to avoid type checking issues with overloaded methods
359
+ setattr(llm, 'ainvoke', tracked_ainvoke)
360
+
361
+ return llm
362
+
363
+ def get_usage_tokens_for_model(self, model: str) -> ModelUsageTokens:
364
+ """Get usage tokens for a specific model"""
365
+ filtered_usage = [u for u in self.usage_history if u.model == model]
366
+
367
+ return ModelUsageTokens(
368
+ model=model,
369
+ prompt_tokens=sum(u.usage.prompt_tokens for u in filtered_usage),
370
+ prompt_cached_tokens=sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage),
371
+ completion_tokens=sum(u.usage.completion_tokens for u in filtered_usage),
372
+ total_tokens=sum(u.usage.prompt_tokens + u.usage.completion_tokens for u in filtered_usage),
373
+ )
374
+
375
+ async def get_usage_summary(self, model: str | None = None, since: datetime | None = None) -> UsageSummary:
376
+ """Get summary of token usage and costs (costs calculated on-the-fly)"""
377
+ filtered_usage = self.usage_history
378
+
379
+ if model:
380
+ filtered_usage = [u for u in filtered_usage if u.model == model]
381
+
382
+ if since:
383
+ filtered_usage = [u for u in filtered_usage if u.timestamp >= since]
384
+
385
+ if not filtered_usage:
386
+ return UsageSummary(
387
+ total_prompt_tokens=0,
388
+ total_prompt_cost=0.0,
389
+ total_prompt_cached_tokens=0,
390
+ total_prompt_cached_cost=0.0,
391
+ total_completion_tokens=0,
392
+ total_completion_cost=0.0,
393
+ total_tokens=0,
394
+ total_cost=0.0,
395
+ entry_count=0,
396
+ )
397
+
398
+ # Calculate totals
399
+ total_prompt = sum(u.usage.prompt_tokens for u in filtered_usage)
400
+ total_completion = sum(u.usage.completion_tokens for u in filtered_usage)
401
+ total_tokens = total_prompt + total_completion
402
+ total_prompt_cached = sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage)
403
+ models = list({u.model for u in filtered_usage})
404
+
405
+ # Calculate per-model stats with record-by-record cost calculation
406
+ model_stats: dict[str, ModelUsageStats] = {}
407
+ total_prompt_cost = 0.0
408
+ total_completion_cost = 0.0
409
+ total_prompt_cached_cost = 0.0
410
+
411
+ for entry in filtered_usage:
412
+ if entry.model not in model_stats:
413
+ model_stats[entry.model] = ModelUsageStats(model=entry.model)
414
+
415
+ stats = model_stats[entry.model]
416
+ stats.prompt_tokens += entry.usage.prompt_tokens
417
+ stats.completion_tokens += entry.usage.completion_tokens
418
+ stats.total_tokens += entry.usage.prompt_tokens + entry.usage.completion_tokens
419
+ stats.invocations += 1
420
+
421
+ if self.include_cost:
422
+ # Calculate cost record by record using the updated calculate_cost function
423
+ cost = await self.calculate_cost(entry.model, entry.usage)
424
+ if cost:
425
+ stats.cost += cost.total_cost
426
+ total_prompt_cost += cost.prompt_cost
427
+ total_completion_cost += cost.completion_cost
428
+ total_prompt_cached_cost += cost.prompt_read_cached_cost or 0
429
+
430
+ # Calculate averages
431
+ for stats in model_stats.values():
432
+ if stats.invocations > 0:
433
+ stats.average_tokens_per_invocation = stats.total_tokens / stats.invocations
434
+
435
+ return UsageSummary(
436
+ total_prompt_tokens=total_prompt,
437
+ total_prompt_cost=total_prompt_cost,
438
+ total_prompt_cached_tokens=total_prompt_cached,
439
+ total_prompt_cached_cost=total_prompt_cached_cost,
440
+ total_completion_tokens=total_completion,
441
+ total_completion_cost=total_completion_cost,
442
+ total_tokens=total_tokens,
443
+ total_cost=total_prompt_cost + total_completion_cost + total_prompt_cached_cost,
444
+ entry_count=len(filtered_usage),
445
+ by_model=model_stats,
446
+ )
447
+
448
+ def _format_tokens(self, tokens: int) -> str:
449
+ """Format token count with k suffix for thousands"""
450
+ if tokens >= 1000000000:
451
+ return f'{tokens / 1000000000:.1f}B'
452
+ if tokens >= 1000000:
453
+ return f'{tokens / 1000000:.1f}M'
454
+ if tokens >= 1000:
455
+ return f'{tokens / 1000:.1f}k'
456
+ return str(tokens)
457
+
458
+ async def log_usage_summary(self) -> None:
459
+ """Log a comprehensive usage summary per model with colors and nice formatting"""
460
+ if not self.usage_history:
461
+ return
462
+
463
+ summary = await self.get_usage_summary()
464
+
465
+ if summary.entry_count == 0:
466
+ return
467
+
468
+ # ANSI color codes
469
+ C_CYAN = '\033[96m'
470
+ C_YELLOW = '\033[93m'
471
+ C_GREEN = '\033[92m'
472
+ C_BLUE = '\033[94m'
473
+ C_MAGENTA = '\033[95m'
474
+ C_RESET = '\033[0m'
475
+ C_BOLD = '\033[1m'
476
+
477
+ # Log overall summary
478
+ total_tokens_fmt = self._format_tokens(summary.total_tokens)
479
+ prompt_tokens_fmt = self._format_tokens(summary.total_prompt_tokens)
480
+ completion_tokens_fmt = self._format_tokens(summary.total_completion_tokens)
481
+
482
+ # Format cost breakdowns for input and output (only if cost tracking is enabled)
483
+ if self.include_cost and summary.total_cost > 0:
484
+ total_cost_part = f' (${C_MAGENTA}{summary.total_cost:.4f}{C_RESET})'
485
+ prompt_cost_part = f' (${summary.total_prompt_cost:.4f})'
486
+ completion_cost_part = f' (${summary.total_completion_cost:.4f})'
487
+ else:
488
+ total_cost_part = ''
489
+ prompt_cost_part = ''
490
+ completion_cost_part = ''
491
+
492
+ if len(summary.by_model) > 1:
493
+ cost_logger.debug(
494
+ f'💲 {C_BOLD}Total Usage Summary{C_RESET}: {C_BLUE}{total_tokens_fmt} tokens{C_RESET}{total_cost_part} | '
495
+ f'⬅️ {C_YELLOW}{prompt_tokens_fmt}{prompt_cost_part}{C_RESET} | ➡️ {C_GREEN}{completion_tokens_fmt}{completion_cost_part}{C_RESET}'
496
+ )
497
+
498
+ for model, stats in summary.by_model.items():
499
+ # Format tokens
500
+ model_total_fmt = self._format_tokens(stats.total_tokens)
501
+ model_prompt_fmt = self._format_tokens(stats.prompt_tokens)
502
+ model_completion_fmt = self._format_tokens(stats.completion_tokens)
503
+ avg_tokens_fmt = self._format_tokens(int(stats.average_tokens_per_invocation))
504
+
505
+ # Format cost display (only if cost tracking is enabled)
506
+ if self.include_cost:
507
+ # Calculate per-model costs on-the-fly
508
+ total_model_cost = 0.0
509
+ model_prompt_cost = 0.0
510
+ model_completion_cost = 0.0
511
+
512
+ # Calculate costs for this model
513
+ for entry in self.usage_history:
514
+ if entry.model == model:
515
+ cost = await self.calculate_cost(entry.model, entry.usage)
516
+ if cost:
517
+ model_prompt_cost += cost.prompt_cost
518
+ model_completion_cost += cost.completion_cost
519
+
520
+ total_model_cost = model_prompt_cost + model_completion_cost
521
+
522
+ if total_model_cost > 0:
523
+ cost_part = f' (${C_MAGENTA}{total_model_cost:.4f}{C_RESET})'
524
+ prompt_part = f'{C_YELLOW}{model_prompt_fmt} (${model_prompt_cost:.4f}){C_RESET}'
525
+ completion_part = f'{C_GREEN}{model_completion_fmt} (${model_completion_cost:.4f}){C_RESET}'
526
+ else:
527
+ cost_part = ''
528
+ prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
529
+ completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
530
+ else:
531
+ cost_part = ''
532
+ prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
533
+ completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
534
+
535
+ cost_logger.debug(
536
+ f' 🤖 {C_CYAN}{model}{C_RESET}: {C_BLUE}{model_total_fmt} tokens{C_RESET}{cost_part} | '
537
+ f'⬅️ {prompt_part} | ➡️ {completion_part} | '
538
+ f'📞 {stats.invocations} calls | 📈 {avg_tokens_fmt}/call'
539
+ )
540
+
541
+ async def get_cost_by_model(self) -> dict[str, ModelUsageStats]:
542
+ """Get cost breakdown by model"""
543
+ summary = await self.get_usage_summary()
544
+ return summary.by_model
545
+
546
+ def clear_history(self) -> None:
547
+ """Clear usage history"""
548
+ self.usage_history = []
549
+
550
+ async def refresh_pricing_data(self) -> None:
551
+ """Force refresh of pricing data from GitHub"""
552
+ if self.include_cost:
553
+ await self._fetch_and_cache_pricing_data()
554
+
555
+ async def clean_old_caches(self, keep_count: int = 3) -> None:
556
+ """Clean up old cache files, keeping only the most recent ones"""
557
+ try:
558
+ # List all JSON files in the cache directory
559
+ cache_files = list(self._cache_dir.glob('*.json'))
560
+
561
+ if len(cache_files) <= keep_count:
562
+ return
563
+
564
+ # Sort by modification time (oldest first)
565
+ cache_files.sort(key=lambda f: f.stat().st_mtime)
566
+
567
+ # Remove all but the most recent files
568
+ for cache_file in cache_files[:-keep_count]:
569
+ try:
570
+ os.remove(cache_file)
571
+ except Exception:
572
+ pass
573
+ except Exception as e:
574
+ logger.debug(f'Error cleaning old cache files: {e}')
575
+
576
+ async def ensure_pricing_loaded(self) -> None:
577
+ """Ensure pricing data is loaded in the background. Call this after creating the service."""
578
+ if not self._initialized and self.include_cost:
579
+ # This will run in the background and won't block
580
+ await self.initialize()
@@ -0,0 +1,108 @@
1
+ from datetime import datetime
2
+ from typing import Any, TypeVar
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from browser_use.llm.views import ChatInvokeUsage
7
+
8
+ T = TypeVar('T', bound=BaseModel)
9
+
10
+
11
+ class TokenUsageEntry(BaseModel):
12
+ """Single token usage entry"""
13
+
14
+ model: str
15
+ timestamp: datetime
16
+ usage: ChatInvokeUsage
17
+
18
+
19
+ class TokenCostCalculated(BaseModel):
20
+ """Token cost"""
21
+
22
+ new_prompt_tokens: int
23
+ new_prompt_cost: float
24
+
25
+ prompt_read_cached_tokens: int | None
26
+ prompt_read_cached_cost: float | None
27
+
28
+ prompt_cached_creation_tokens: int | None
29
+ prompt_cache_creation_cost: float | None
30
+ """Anthropic only: The cost of creating the cache."""
31
+
32
+ completion_tokens: int
33
+ completion_cost: float
34
+
35
+ @property
36
+ def prompt_cost(self) -> float:
37
+ return self.new_prompt_cost + (self.prompt_read_cached_cost or 0) + (self.prompt_cache_creation_cost or 0)
38
+
39
+ @property
40
+ def total_cost(self) -> float:
41
+ return (
42
+ self.new_prompt_cost
43
+ + (self.prompt_read_cached_cost or 0)
44
+ + (self.prompt_cache_creation_cost or 0)
45
+ + self.completion_cost
46
+ )
47
+
48
+
49
+ class ModelPricing(BaseModel):
50
+ """Pricing information for a model"""
51
+
52
+ model: str
53
+ input_cost_per_token: float | None
54
+ output_cost_per_token: float | None
55
+
56
+ cache_read_input_token_cost: float | None
57
+ cache_creation_input_token_cost: float | None
58
+
59
+ max_tokens: int | None
60
+ max_input_tokens: int | None
61
+ max_output_tokens: int | None
62
+
63
+
64
+ class CachedPricingData(BaseModel):
65
+ """Cached pricing data with timestamp"""
66
+
67
+ timestamp: datetime
68
+ data: dict[str, Any]
69
+
70
+
71
+ class ModelUsageStats(BaseModel):
72
+ """Usage statistics for a single model"""
73
+
74
+ model: str
75
+ prompt_tokens: int = 0
76
+ completion_tokens: int = 0
77
+ total_tokens: int = 0
78
+ cost: float = 0.0
79
+ invocations: int = 0
80
+ average_tokens_per_invocation: float = 0.0
81
+
82
+
83
+ class ModelUsageTokens(BaseModel):
84
+ """Usage tokens for a single model"""
85
+
86
+ model: str
87
+ prompt_tokens: int
88
+ prompt_cached_tokens: int
89
+ completion_tokens: int
90
+ total_tokens: int
91
+
92
+
93
+ class UsageSummary(BaseModel):
94
+ """Summary of token usage and costs"""
95
+
96
+ total_prompt_tokens: int
97
+ total_prompt_cost: float
98
+
99
+ total_prompt_cached_tokens: int
100
+ total_prompt_cached_cost: float
101
+
102
+ total_completion_tokens: int
103
+ total_completion_cost: float
104
+ total_tokens: int
105
+ total_cost: float
106
+ entry_count: int
107
+
108
+ by_model: dict[str, ModelUsageStats] = Field(default_factory=dict)