lm-deluge 0.0.39__tar.gz → 0.0.40__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (78) hide show
  1. {lm_deluge-0.0.39/src/lm_deluge.egg-info → lm_deluge-0.0.40}/PKG-INFO +7 -4
  2. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/README.md +6 -3
  3. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/pyproject.toml +1 -1
  4. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/client.py +37 -1
  5. {lm_deluge-0.0.39 → lm_deluge-0.0.40/src/lm_deluge.egg-info}/PKG-INFO +7 -4
  6. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/LICENSE +0 -0
  7. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/setup.cfg +0 -0
  8. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/__init__.py +0 -0
  9. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/agent.py +0 -0
  10. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/__init__.py +0 -0
  11. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/anthropic.py +0 -0
  12. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/base.py +0 -0
  13. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/bedrock.py +0 -0
  14. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/common.py +0 -0
  15. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  16. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  17. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  18. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  19. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  20. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/gemini.py +0 -0
  21. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/mistral.py +0 -0
  22. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/openai.py +0 -0
  23. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/response.py +0 -0
  24. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/batches.py +0 -0
  25. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  26. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  27. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  28. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  29. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/base.py +0 -0
  30. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/openai.py +0 -0
  31. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/cache.py +0 -0
  32. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/cli.py +0 -0
  33. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/config.py +0 -0
  34. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/embed.py +0 -0
  35. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/errors.py +0 -0
  36. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/file.py +0 -0
  37. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/gemini_limits.py +0 -0
  38. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/image.py +0 -0
  39. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/__init__.py +0 -0
  40. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/classify.py +0 -0
  41. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/extract.py +0 -0
  42. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/locate.py +0 -0
  43. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/ocr.py +0 -0
  44. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/score.py +0 -0
  45. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/translate.py +0 -0
  46. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/__init__.py +0 -0
  47. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/anthropic.py +0 -0
  48. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/bedrock.py +0 -0
  49. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/cerebras.py +0 -0
  50. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/cohere.py +0 -0
  51. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/deepseek.py +0 -0
  52. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/fireworks.py +0 -0
  53. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/google.py +0 -0
  54. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/grok.py +0 -0
  55. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/groq.py +0 -0
  56. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/meta.py +0 -0
  57. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/mistral.py +0 -0
  58. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/openai.py +0 -0
  59. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/openrouter.py +0 -0
  60. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/together.py +0 -0
  61. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/prompt.py +0 -0
  62. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/request_context.py +0 -0
  63. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/rerank.py +0 -0
  64. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/tool.py +0 -0
  65. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/tracker.py +0 -0
  66. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/usage.py +0 -0
  67. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/harmony.py +0 -0
  68. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/json.py +0 -0
  69. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/logprobs.py +0 -0
  70. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/spatial.py +0 -0
  71. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/validation.py +0 -0
  72. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/xml.py +0 -0
  73. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  74. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  75. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/requires.txt +0 -0
  76. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/top_level.txt +0 -0
  77. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/tests/test_builtin_tools.py +0 -0
  78. {lm_deluge-0.0.39 → lm_deluge-0.0.40}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.39
3
+ Version: 0.0.40
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -111,14 +111,17 @@ await client.process_prompts_async(
111
111
 
112
112
  ### Queueing individual prompts
113
113
 
114
- You can queue prompts one at a time and track progress explicitly:
114
+ You can queue prompts one at a time and track progress explicitly. Iterate over
115
+ results as they finish with `as_completed` (or gather them all at once with
116
+ `wait_for_all`):
115
117
 
116
118
  ```python
117
119
  client = LLMClient("gpt-4.1-mini", progress="tqdm")
118
120
  client.open()
119
- task_id = client.start_nowait("hello there")
121
+ client.start_nowait("hello there")
120
122
  # ... queue more tasks ...
121
- results = await client.wait_for_all()
123
+ async for task_id, result in client.as_completed():
124
+ print(task_id, result.completion)
122
125
  client.close()
123
126
  ```
124
127
 
@@ -84,14 +84,17 @@ await client.process_prompts_async(
84
84
 
85
85
  ### Queueing individual prompts
86
86
 
87
- You can queue prompts one at a time and track progress explicitly:
87
+ You can queue prompts one at a time and track progress explicitly. Iterate over
88
+ results as they finish with `as_completed` (or gather them all at once with
89
+ `wait_for_all`):
88
90
 
89
91
  ```python
90
92
  client = LLMClient("gpt-4.1-mini", progress="tqdm")
91
93
  client.open()
92
- task_id = client.start_nowait("hello there")
94
+ client.start_nowait("hello there")
93
95
  # ... queue more tasks ...
94
- results = await client.wait_for_all()
96
+ async for task_id, result in client.as_completed():
97
+ print(task_id, result.completion)
95
98
  client.close()
96
99
  ```
97
100
 
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.39"
6
+ version = "0.0.40"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -1,6 +1,6 @@
1
1
  import asyncio
2
2
  import random
3
- from typing import Any, Callable, Literal, Self, Sequence, overload
3
+ from typing import Any, AsyncGenerator, Callable, Literal, Self, Sequence, overload
4
4
 
5
5
  import numpy as np
6
6
  import yaml
@@ -557,6 +557,42 @@ class _LLMClient(BaseModel):
557
557
  task_ids = list(self._tasks.keys())
558
558
  return [await self.wait_for(tid) for tid in task_ids]
559
559
 
560
+ async def as_completed(
561
+ self, task_ids: Sequence[int] | None = None
562
+ ) -> AsyncGenerator[tuple[int, APIResponse | None], None]:
563
+ """Yield ``(task_id, result)`` pairs as tasks complete.
564
+
565
+ Args:
566
+ task_ids: Optional sequence of task IDs to wait on. If ``None``,
567
+ all queued tasks are watched.
568
+
569
+ Yields:
570
+ Tuples of task ID and ``APIResponse`` as each task finishes.
571
+ """
572
+
573
+ if task_ids is None:
574
+ tasks_map: dict[asyncio.Task, int] = {
575
+ task: tid for tid, task in self._tasks.items()
576
+ }
577
+ else:
578
+ tasks_map = {
579
+ self._tasks[tid]: tid for tid in task_ids if tid in self._tasks
580
+ }
581
+
582
+ # Yield any tasks that have already completed
583
+ for task in list(tasks_map.keys()):
584
+ if task.done():
585
+ tid = tasks_map.pop(task)
586
+ yield tid, self._results.get(tid, await task)
587
+
588
+ while tasks_map:
589
+ done, _ = await asyncio.wait(
590
+ set(tasks_map.keys()), return_when=asyncio.FIRST_COMPLETED
591
+ )
592
+ for task in done:
593
+ tid = tasks_map.pop(task)
594
+ yield tid, self._results.get(tid, await task)
595
+
560
596
  async def stream(
561
597
  self,
562
598
  prompt: str | Conversation,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.39
3
+ Version: 0.0.40
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -111,14 +111,17 @@ await client.process_prompts_async(
111
111
 
112
112
  ### Queueing individual prompts
113
113
 
114
- You can queue prompts one at a time and track progress explicitly:
114
+ You can queue prompts one at a time and track progress explicitly. Iterate over
115
+ results as they finish with `as_completed` (or gather them all at once with
116
+ `wait_for_all`):
115
117
 
116
118
  ```python
117
119
  client = LLMClient("gpt-4.1-mini", progress="tqdm")
118
120
  client.open()
119
- task_id = client.start_nowait("hello there")
121
+ client.start_nowait("hello there")
120
122
  # ... queue more tasks ...
121
- results = await client.wait_for_all()
123
+ async for task_id, result in client.as_completed():
124
+ print(task_id, result.completion)
122
125
  client.close()
123
126
  ```
124
127
 
File without changes
File without changes