lm-deluge 0.0.55__tar.gz → 0.0.56__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (80) hide show
  1. {lm_deluge-0.0.55/src/lm_deluge.egg-info → lm_deluge-0.0.56}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/client.py +3 -0
  4. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/tracker.py +62 -1
  5. {lm_deluge-0.0.55 → lm_deluge-0.0.56/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  6. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/LICENSE +0 -0
  7. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/README.md +0 -0
  8. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/setup.cfg +0 -0
  9. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/__init__.py +0 -0
  10. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/agent.py +0 -0
  11. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/anthropic.py +0 -0
  13. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/base.py +0 -0
  14. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/bedrock.py +0 -0
  15. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/common.py +0 -0
  16. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  17. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  18. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  19. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  20. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  21. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/gemini.py +0 -0
  22. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/mistral.py +0 -0
  23. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/openai.py +0 -0
  24. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/api_requests/response.py +0 -0
  25. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/batches.py +0 -0
  26. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  27. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  28. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  29. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  30. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/built_in_tools/base.py +0 -0
  31. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/built_in_tools/openai.py +0 -0
  32. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/cache.py +0 -0
  33. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/cli.py +0 -0
  34. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/config.py +0 -0
  35. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/embed.py +0 -0
  36. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/errors.py +0 -0
  37. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/file.py +0 -0
  38. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/gemini_limits.py +0 -0
  39. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/image.py +0 -0
  40. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/__init__.py +0 -0
  41. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/classify.py +0 -0
  42. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/extract.py +0 -0
  43. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/locate.py +0 -0
  44. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/ocr.py +0 -0
  45. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/score.py +0 -0
  46. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/llm_tools/translate.py +0 -0
  47. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/__init__.py +0 -0
  48. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/anthropic.py +0 -0
  49. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/bedrock.py +0 -0
  50. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/cerebras.py +0 -0
  51. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/cohere.py +0 -0
  52. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/deepseek.py +0 -0
  53. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/fireworks.py +0 -0
  54. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/google.py +0 -0
  55. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/grok.py +0 -0
  56. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/groq.py +0 -0
  57. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/meta.py +0 -0
  58. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/mistral.py +0 -0
  59. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/openai.py +0 -0
  60. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/openrouter.py +0 -0
  61. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/models/together.py +0 -0
  62. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/presets/cerebras.py +0 -0
  63. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/presets/meta.py +0 -0
  64. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/prompt.py +0 -0
  65. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/request_context.py +0 -0
  66. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/rerank.py +0 -0
  67. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/tool.py +0 -0
  68. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/usage.py +0 -0
  69. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/util/harmony.py +0 -0
  70. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/util/json.py +0 -0
  71. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/util/logprobs.py +0 -0
  72. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/util/spatial.py +0 -0
  73. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/util/validation.py +0 -0
  74. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge/util/xml.py +0 -0
  75. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  76. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  77. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge.egg-info/requires.txt +0 -0
  78. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/src/lm_deluge.egg-info/top_level.txt +0 -0
  79. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/tests/test_builtin_tools.py +0 -0
  80. {lm_deluge-0.0.55 → lm_deluge-0.0.56}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.55
3
+ Version: 0.0.56
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.55"
6
+ version = "0.0.56"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -295,6 +295,7 @@ class _LLMClient(BaseModel):
295
295
  # Handle successful response
296
296
  if not response.is_error:
297
297
  context.status_tracker.task_succeeded(context.task_id)
298
+ context.status_tracker.track_usage(response)
298
299
  # Cache successful responses immediately
299
300
  if self.cache and response.completion:
300
301
  # print(f"DEBUG: Caching successful response")
@@ -333,6 +334,8 @@ class _LLMClient(BaseModel):
333
334
 
334
335
  # No retries left or no retry queue - final failure
335
336
  context.status_tracker.task_failed(context.task_id)
337
+ # Track usage even for failed requests if they made an API call
338
+ context.status_tracker.track_usage(response)
336
339
  context.maybe_callback(response, context.status_tracker)
337
340
 
338
341
  # Print final error message
@@ -33,6 +33,13 @@ class StatusTracker:
33
33
  total_requests: int = 0
34
34
  retry_queue: asyncio.Queue = field(default_factory=asyncio.Queue)
35
35
 
36
+ # Cumulative usage tracking
37
+ total_cost: float = 0.0
38
+ total_input_tokens: int = 0 # non-cached input tokens
39
+ total_cache_read_tokens: int = 0
40
+ total_cache_write_tokens: int = 0
41
+ total_output_tokens: int = 0
42
+
36
43
  # Progress bar configuration
37
44
  use_progress_bar: bool = True
38
45
  progress_bar_total: int | None = None
@@ -131,6 +138,25 @@ class StatusTracker:
131
138
  self.num_tasks_in_progress -= 1
132
139
  self.num_tasks_failed += 1
133
140
 
141
+ def track_usage(self, response):
142
+ """Accumulate usage statistics from a completed request.
143
+
144
+ Args:
145
+ response: APIResponse object containing usage and cost information
146
+ """
147
+ if response.cost:
148
+ self.total_cost += response.cost
149
+
150
+ if response.usage:
151
+ self.total_output_tokens += response.usage.output_tokens
152
+ self.total_input_tokens += response.usage.input_tokens
153
+
154
+ if response.usage.cache_read_tokens:
155
+ self.total_cache_read_tokens += response.usage.cache_read_tokens
156
+
157
+ if response.usage.cache_write_tokens:
158
+ self.total_cache_write_tokens += response.usage.cache_write_tokens
159
+
134
160
  def log_final_status(self):
135
161
  # Close progress bar before printing final status
136
162
  self.close_progress_bar()
@@ -144,6 +170,22 @@ class StatusTracker:
144
170
  f"{self.num_rate_limit_errors} rate limit errors received. Consider running at a lower rate."
145
171
  )
146
172
 
173
+ # Display cumulative usage stats if available
174
+ if self.total_cost > 0 or self.total_input_tokens > 0 or self.total_output_tokens > 0:
175
+ usage_parts = []
176
+ if self.total_cost > 0:
177
+ usage_parts.append(f"Cost: ${self.total_cost:.4f}")
178
+ if self.total_input_tokens > 0 or self.total_output_tokens > 0:
179
+ usage_parts.append(
180
+ f"Tokens: {self.total_input_tokens:,} in / {self.total_output_tokens:,} out"
181
+ )
182
+ if self.total_cache_read_tokens > 0:
183
+ usage_parts.append(f"Cache: {self.total_cache_read_tokens:,} read")
184
+ if self.total_cache_write_tokens > 0:
185
+ usage_parts.append(f"{self.total_cache_write_tokens:,} write")
186
+
187
+ print(" | ".join(usage_parts))
188
+
147
189
  @property
148
190
  def pbar(self) -> tqdm | None:
149
191
  """Backward compatibility property to access progress bar."""
@@ -229,7 +271,26 @@ class StatusTracker:
229
271
  f" [gold3]Capacity:[/gold3] {tokens_info} • {reqs_info}"
230
272
  )
231
273
 
232
- display = Group(self._rich_progress, in_progress, capacity_text)
274
+ # Format usage stats
275
+ usage_parts = []
276
+ if self.total_cost > 0:
277
+ usage_parts.append(f"${self.total_cost:.4f}")
278
+ if self.total_input_tokens > 0 or self.total_output_tokens > 0:
279
+ input_k = self.total_input_tokens / 1000
280
+ output_k = self.total_output_tokens / 1000
281
+ usage_parts.append(f"{input_k:.1f}k in • {output_k:.1f}k out")
282
+ if self.total_cache_read_tokens > 0:
283
+ cache_k = self.total_cache_read_tokens / 1000
284
+ usage_parts.append(f"{cache_k:.1f}k cached")
285
+
286
+ usage_text = ""
287
+ if usage_parts:
288
+ usage_text = f" [gold3]Usage:[/gold3] {' • '.join(usage_parts)}"
289
+
290
+ if usage_text:
291
+ display = Group(self._rich_progress, in_progress, capacity_text, usage_text)
292
+ else:
293
+ display = Group(self._rich_progress, in_progress, capacity_text)
233
294
  live.update(display)
234
295
 
235
296
  await asyncio.sleep(0.1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.55
3
+ Version: 0.0.56
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes