lm-deluge 0.0.42__tar.gz → 0.0.44__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (78) hide show
  1. {lm_deluge-0.0.42/src/lm_deluge.egg-info → lm_deluge-0.0.44}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/client.py +4 -11
  4. {lm_deluge-0.0.42 → lm_deluge-0.0.44/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  5. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/LICENSE +0 -0
  6. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/README.md +0 -0
  7. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/setup.cfg +0 -0
  8. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/__init__.py +0 -0
  9. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/agent.py +0 -0
  10. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/__init__.py +0 -0
  11. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/anthropic.py +0 -0
  12. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/base.py +0 -0
  13. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/bedrock.py +0 -0
  14. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/common.py +0 -0
  15. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  16. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  17. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  18. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  19. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  20. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/gemini.py +0 -0
  21. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/mistral.py +0 -0
  22. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/openai.py +0 -0
  23. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/api_requests/response.py +0 -0
  24. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/batches.py +0 -0
  25. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  26. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  27. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  28. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  29. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/built_in_tools/base.py +0 -0
  30. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/built_in_tools/openai.py +0 -0
  31. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/cache.py +0 -0
  32. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/cli.py +0 -0
  33. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/config.py +0 -0
  34. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/embed.py +0 -0
  35. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/errors.py +0 -0
  36. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/file.py +0 -0
  37. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/gemini_limits.py +0 -0
  38. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/image.py +0 -0
  39. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/__init__.py +0 -0
  40. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/classify.py +0 -0
  41. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/extract.py +0 -0
  42. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/locate.py +0 -0
  43. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/ocr.py +0 -0
  44. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/score.py +0 -0
  45. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/llm_tools/translate.py +0 -0
  46. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/__init__.py +0 -0
  47. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/anthropic.py +0 -0
  48. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/bedrock.py +0 -0
  49. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/cerebras.py +0 -0
  50. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/cohere.py +0 -0
  51. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/deepseek.py +0 -0
  52. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/fireworks.py +0 -0
  53. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/google.py +0 -0
  54. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/grok.py +0 -0
  55. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/groq.py +0 -0
  56. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/meta.py +0 -0
  57. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/mistral.py +0 -0
  58. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/openai.py +0 -0
  59. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/openrouter.py +0 -0
  60. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/models/together.py +0 -0
  61. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/prompt.py +0 -0
  62. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/request_context.py +0 -0
  63. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/rerank.py +0 -0
  64. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/tool.py +0 -0
  65. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/tracker.py +0 -0
  66. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/usage.py +0 -0
  67. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/util/harmony.py +0 -0
  68. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/util/json.py +0 -0
  69. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/util/logprobs.py +0 -0
  70. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/util/spatial.py +0 -0
  71. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/util/validation.py +0 -0
  72. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge/util/xml.py +0 -0
  73. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  74. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  75. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge.egg-info/requires.txt +0 -0
  76. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/src/lm_deluge.egg-info/top_level.txt +0 -0
  77. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/tests/test_builtin_tools.py +0 -0
  78. {lm_deluge-0.0.42 → lm_deluge-0.0.44}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.42
3
+ Version: 0.0.44
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.42"
6
+ version = "0.0.44"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import random
3
2
  from typing import Any, AsyncGenerator, Callable, Literal, Self, Sequence, overload
4
3
 
5
4
  import numpy as np
@@ -105,14 +104,8 @@ class _LLMClient(BaseModel):
105
104
 
106
105
  def _get_tracker(self) -> StatusTracker:
107
106
  if self._tracker is None:
108
- self._tracker = StatusTracker(
109
- max_requests_per_minute=self.max_requests_per_minute,
110
- max_tokens_per_minute=self.max_tokens_per_minute,
111
- max_concurrent_requests=self.max_concurrent_requests,
112
- use_progress_bar=False,
113
- progress_bar_disable=True,
114
- progress_style=self.progress,
115
- )
107
+ self.open()
108
+ assert self._tracker, "should have tracker now"
116
109
  return self._tracker
117
110
 
118
111
  @property
@@ -225,16 +218,16 @@ class _LLMClient(BaseModel):
225
218
  ):
226
219
  while True:
227
220
  async with self._capacity_lock:
228
- tracker.update_capacity()
229
221
  if tracker.check_capacity(num_tokens, retry=retry):
230
222
  tracker.set_limiting_factor(None)
231
223
  return
232
224
  seconds_to_pause = tracker.seconds_to_pause
233
225
 
234
226
  if seconds_to_pause > 0:
227
+ print(f"Pausing for {seconds_to_pause} seconds to cool down.")
235
228
  await asyncio.sleep(seconds_to_pause)
236
229
  else:
237
- await asyncio.sleep(random.random())
230
+ await asyncio.sleep(30.0 / self.max_requests_per_minute)
238
231
 
239
232
  async def _execute_request(self, context: RequestContext) -> APIResponse:
240
233
  """Create and send a single API request using the provided context."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.42
3
+ Version: 0.0.44
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes