lm-deluge 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

lm_deluge/client.py CHANGED
@@ -6,6 +6,7 @@ import yaml
6
6
  from pydantic import BaseModel
7
7
  from pydantic.functional_validators import model_validator
8
8
 
9
+ from lm_deluge.api_requests.openai import stream_chat
9
10
  from lm_deluge.batches import (
10
11
  submit_batches_anthropic,
11
12
  submit_batches_oa,
@@ -34,6 +35,12 @@ class LLMClient(BaseModel):
34
35
  """
35
36
 
36
37
  model_names: list[str] = ["gpt-4.1-mini"]
38
+
39
+ def __init__(self, model_name: str | list[str] | None = None, **kwargs):
40
+ if model_name is not None:
41
+ kwargs["model_names"] = model_name
42
+ super().__init__(**kwargs)
43
+
37
44
  max_requests_per_minute: int = 1_000
38
45
  max_tokens_per_minute: int = 100_000
39
46
  max_concurrent_requests: int = 225
@@ -81,7 +88,7 @@ class LLMClient(BaseModel):
81
88
  @model_validator(mode="before")
82
89
  @classmethod
83
90
  def fix_lists(cls, data) -> "LLMClient":
84
- if isinstance(data["model_names"], str):
91
+ if isinstance(data.get("model_names"), str):
85
92
  data["model_names"] = [data["model_names"]]
86
93
  if "sampling_params" not in data or len(data.get("sampling_params", [])) == 0:
87
94
  data["sampling_params"] = [
@@ -162,6 +169,11 @@ class LLMClient(BaseModel):
162
169
  kwargs["model_names"] = model
163
170
  return cls(**kwargs)
164
171
 
172
+ def _select_model(self):
173
+ assert isinstance(self.model_weights, list)
174
+ model_idx = np.random.choice(range(len(self.models)), p=self.model_weights)
175
+ return self.models[model_idx], self.sampling_params[model_idx]
176
+
165
177
  @overload
166
178
  async def process_prompts_async(
167
179
  self,
@@ -249,41 +261,6 @@ class LLMClient(BaseModel):
249
261
  if len(cache_hit_ids) > 0:
250
262
  tracker.update_pbar(len(cache_hit_ids))
251
263
 
252
- # api_task = asyncio.create_task(
253
- # process_api_prompts_async(
254
- # ids,
255
- # prompts, # type: ignore -- fix later for dry running conversations
256
- # self.models,
257
- # self.model_weights, # type: ignore
258
- # self.sampling_params, # type: ignore
259
- # max_attempts=self.max_attempts,
260
- # max_concurrent_requests=self.max_concurrent_requests,
261
- # request_timeout=self.request_timeout,
262
- # status_tracker=tracker,
263
- # tools=tools,
264
- # cache=cache,
265
- # computer_use=computer_use,
266
- # display_width=display_width,
267
- # display_height=display_height,
268
- # use_responses_api=use_responses_api,
269
- # )
270
- # )
271
- # async def process_api_prompts_async(
272
-
273
- # models: str | list[str],
274
- # model_weights: list[float],
275
- # sampling_params: list[SamplingParams],
276
- # max_attempts: int = 5,
277
- # max_concurrent_requests: int = 1_000,
278
- # request_timeout: int = 30,
279
- # status_tracker: StatusTracker | None = None,
280
- # tools: list[Tool] | None = None,
281
- # cache: CachePattern | None = None,
282
- # computer_use: bool = False,
283
- # display_width: int = 1024,
284
- # display_height: int = 768,
285
- # use_responses_api: bool = False,
286
- # ):
287
264
  if isinstance(ids, np.ndarray):
288
265
  ids = ids.tolist() # pyright: ignore
289
266
 
@@ -296,28 +273,28 @@ class LLMClient(BaseModel):
296
273
  assert tracker.retry_queue, "retry queue not initialized"
297
274
  while True:
298
275
  # get next request (if one is not already waiting for capacity)
276
+ retry_request = False
299
277
  if next_request is None:
300
278
  if not tracker.retry_queue.empty():
301
279
  next_request = tracker.retry_queue.get_nowait()
280
+ retry_request = True
302
281
  print(f"Retrying request {next_request.task_id}.")
303
282
  elif prompts_not_finished:
304
283
  try:
305
284
  # get new request
306
285
  id, prompt = next(prompts_iter)
307
286
  # select model
308
- assert isinstance(self.model_weights, list)
309
- model_idx = np.random.choice(
310
- range(len(self.models)), p=self.model_weights
311
- )
287
+ model, sampling_params = self._select_model()
288
+
312
289
  next_request = create_api_request(
313
290
  task_id=id,
314
- model_name=self.models[model_idx],
291
+ model_name=model,
315
292
  prompt=prompt, # type: ignore
316
293
  request_timeout=self.request_timeout,
317
294
  attempts_left=self.max_attempts,
318
295
  status_tracker=tracker,
319
296
  results_arr=requests,
320
- sampling_params=self.sampling_params[model_idx],
297
+ sampling_params=sampling_params,
321
298
  all_model_names=self.models,
322
299
  all_sampling_params=self.sampling_params,
323
300
  tools=tools,
@@ -339,10 +316,9 @@ class LLMClient(BaseModel):
339
316
  # if enough capacity available, call API
340
317
  if next_request:
341
318
  next_request_tokens = next_request.num_tokens
342
- if tracker.check_capacity(next_request_tokens):
319
+ if tracker.check_capacity(next_request_tokens, retry=retry_request):
343
320
  tracker.set_limiting_factor(None)
344
- next_request.attempts_left -= 1
345
- # call API
321
+ # call API (attempts_left will be decremented in handle_error if it fails)
346
322
  asyncio.create_task(next_request.call_api())
347
323
  next_request = None # reset next_request to empty
348
324
  # update pbar status
@@ -360,9 +336,10 @@ class LLMClient(BaseModel):
360
336
  await asyncio.sleep(tracker.seconds_to_pause)
361
337
  print(f"Pausing {tracker.seconds_to_pause}s to cool down.")
362
338
 
363
- # after finishing, log final status
364
- tracker.log_final_status()
365
- # deduplicate results by id
339
+ # after finishing, log final status
340
+ tracker.log_final_status()
341
+
342
+ # deduplicate results by id
366
343
  api_results = deduplicate_responses(requests)
367
344
  for res in api_results:
368
345
  results[res.id] = res
@@ -399,6 +376,17 @@ class LLMClient(BaseModel):
399
376
  )
400
377
  )
401
378
 
379
+ async def stream(self, prompt: str | Conversation, tools: list[Tool] | None = None):
380
+ model, sampling_params = self._select_model()
381
+ if isinstance(prompt, str):
382
+ prompt = Conversation.user(prompt)
383
+ async for item in stream_chat(model, prompt, sampling_params, tools, None):
384
+ if isinstance(item, str):
385
+ print(item, end="", flush=True)
386
+ else:
387
+ # final item
388
+ return item
389
+
402
390
  async def submit_batch_job(
403
391
  self,
404
392
  prompts: Sequence[str | list[dict] | Conversation],
lm_deluge/config.py CHANGED
@@ -1,13 +1,14 @@
1
- from pydantic import BaseModel
2
1
  from typing import Literal
3
2
 
3
+ from pydantic import BaseModel
4
+
4
5
 
5
6
  class SamplingParams(BaseModel):
6
7
  temperature: float = 0.0
7
8
  top_p: float = 1.0
8
9
  json_mode: bool = False
9
10
  max_new_tokens: int = 512
10
- reasoning_effort: Literal["low", "medium", "high", None] = None
11
+ reasoning_effort: Literal["low", "medium", "high", "none", None] = None
11
12
  logprobs: bool = False
12
13
  top_logprobs: int | None = None
13
14
 
lm_deluge/file.py ADDED
@@ -0,0 +1,154 @@
1
+ import os
2
+ import io
3
+ import requests
4
+ import base64
5
+ import mimetypes
6
+ import xxhash
7
+ from dataclasses import dataclass, field
8
+ from pathlib import Path
9
+
10
+
11
+ @dataclass(slots=True)
12
+ class File:
13
+ # raw bytes, pathlike, http url, base64 data url, or file_id
14
+ data: bytes | io.BytesIO | Path | str
15
+ media_type: str | None = None # inferred if None
16
+ filename: str | None = None # optional filename for uploads
17
+ file_id: str | None = None # for OpenAI file uploads or Anthropic file API
18
+ type: str = field(init=False, default="file")
19
+
20
+ # helpers -----------------------------------------------------------------
21
+ def _bytes(self) -> bytes:
22
+ if isinstance(self.data, bytes):
23
+ return self.data
24
+ elif isinstance(self.data, io.BytesIO):
25
+ return self.data.getvalue()
26
+ elif isinstance(self.data, str) and self.data.startswith("http"):
27
+ res = requests.get(self.data)
28
+ res.raise_for_status()
29
+ return res.content
30
+ elif isinstance(self.data, str) and os.path.exists(self.data):
31
+ with open(self.data, "rb") as f:
32
+ return f.read()
33
+ elif isinstance(self.data, Path) and self.data.exists():
34
+ return Path(self.data).read_bytes()
35
+ elif isinstance(self.data, str) and self.data.startswith("data:"):
36
+ header, encoded = self.data.split(",", 1)
37
+ return base64.b64decode(encoded)
38
+ else:
39
+ raise ValueError("unreadable file format")
40
+
41
+ def _mime(self) -> str:
42
+ if self.media_type:
43
+ return self.media_type
44
+ if isinstance(self.data, (Path, str)):
45
+ # For URL or path, try to guess from the string
46
+ path_str = str(self.data)
47
+ guess = mimetypes.guess_type(path_str)[0]
48
+ if guess:
49
+ return guess
50
+ return "application/pdf" # default to PDF
51
+
52
+ def _filename(self) -> str:
53
+ if self.filename:
54
+ return self.filename
55
+ if isinstance(self.data, (Path, str)):
56
+ path_str = str(self.data)
57
+ if path_str.startswith("http"):
58
+ # Extract filename from URL
59
+ return path_str.split("/")[-1].split("?")[0] or "document.pdf"
60
+ else:
61
+ # Extract from local path
62
+ return os.path.basename(path_str) or "document.pdf"
63
+ return "document.pdf"
64
+
65
+ def _base64(self, include_header: bool = True) -> str:
66
+ encoded = base64.b64encode(self._bytes()).decode("utf-8")
67
+ if not include_header:
68
+ return encoded
69
+ return f"data:{self._mime()};base64,{encoded}"
70
+
71
+ @property
72
+ def fingerprint(self) -> str:
73
+ # Hash the file contents for fingerprinting
74
+ file_bytes = self._bytes()
75
+ return xxhash.xxh64(file_bytes).hexdigest()
76
+
77
+ @property
78
+ def size(self) -> int:
79
+ """Return file size in bytes."""
80
+ return len(self._bytes())
81
+
82
+ # ── provider-specific emission ────────────────────────────────────────────
83
+ def oa_chat(self) -> dict:
84
+ """For OpenAI Chat Completions - file content as base64 or file_id."""
85
+ if self.file_id:
86
+ return {
87
+ "type": "file",
88
+ "file": {
89
+ "file_id": self.file_id,
90
+ },
91
+ }
92
+ else:
93
+ return {
94
+ "type": "file",
95
+ "file": {
96
+ "filename": self._filename(),
97
+ "file_data": self._base64(),
98
+ },
99
+ }
100
+
101
+ def oa_resp(self) -> dict:
102
+ """For OpenAI Responses API - file content as base64 or file_id."""
103
+ if self.file_id:
104
+ return {
105
+ "type": "input_file",
106
+ "file_id": self.file_id,
107
+ }
108
+ else:
109
+ return {
110
+ "type": "input_file",
111
+ "filename": self._filename(),
112
+ "file_data": self._base64(),
113
+ }
114
+
115
+ def anthropic(self) -> dict:
116
+ """For Anthropic Messages API - file content as base64 or file_id."""
117
+ if self.file_id:
118
+ return {
119
+ "type": "document",
120
+ "source": {
121
+ "type": "file",
122
+ "file_id": self.file_id,
123
+ },
124
+ }
125
+ else:
126
+ b64 = base64.b64encode(self._bytes()).decode()
127
+ return {
128
+ "type": "document",
129
+ "source": {
130
+ "type": "base64",
131
+ "media_type": self._mime(),
132
+ "data": b64,
133
+ },
134
+ }
135
+
136
+ def anthropic_file_upload(self) -> tuple[str, bytes, str]:
137
+ """For Anthropic Files API - return tuple for file upload."""
138
+ filename = self._filename()
139
+ content = self._bytes()
140
+ media_type = self._mime()
141
+ return filename, content, media_type
142
+
143
+ def gemini(self) -> dict:
144
+ """For Gemini API - files are provided as inline data."""
145
+ return {
146
+ "inlineData": {
147
+ "mimeType": self._mime(),
148
+ "data": self._base64(include_header=False),
149
+ }
150
+ }
151
+
152
+ def mistral(self) -> dict:
153
+ """For Mistral API - not yet supported."""
154
+ raise NotImplementedError("File support for Mistral is not yet implemented")
lm_deluge/models.py CHANGED
@@ -167,6 +167,63 @@ registry = {
167
167
  "tokens_per_minute": 100_000,
168
168
  "reasoning_model": True,
169
169
  },
170
+ # Native Gemini API versions with file support
171
+ "gemini-2.0-flash-gemini": {
172
+ "id": "gemini-2.0-flash-gemini",
173
+ "name": "gemini-2.0-flash",
174
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
175
+ "api_key_env_var": "GEMINI_API_KEY",
176
+ "supports_json": True,
177
+ "supports_logprobs": False,
178
+ "api_spec": "gemini",
179
+ "input_cost": 0.1,
180
+ "output_cost": 0.4,
181
+ "requests_per_minute": 20,
182
+ "tokens_per_minute": 100_000,
183
+ "reasoning_model": False,
184
+ },
185
+ "gemini-2.0-flash-lite-gemini": {
186
+ "id": "gemini-2.0-flash-lite-gemini",
187
+ "name": "gemini-2.0-flash-lite",
188
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
189
+ "api_key_env_var": "GEMINI_API_KEY",
190
+ "supports_json": True,
191
+ "supports_logprobs": False,
192
+ "api_spec": "gemini",
193
+ "input_cost": 0.1,
194
+ "output_cost": 0.4,
195
+ "requests_per_minute": 20,
196
+ "tokens_per_minute": 100_000,
197
+ "reasoning_model": False,
198
+ },
199
+ "gemini-2.5-pro-gemini": {
200
+ "id": "gemini-2.5-pro-gemini",
201
+ "name": "gemini-2.5-pro-preview-05-06",
202
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
203
+ "api_key_env_var": "GEMINI_API_KEY",
204
+ "supports_json": True,
205
+ "supports_logprobs": False,
206
+ "api_spec": "gemini",
207
+ "input_cost": 0.1,
208
+ "output_cost": 0.4,
209
+ "requests_per_minute": 20,
210
+ "tokens_per_minute": 100_000,
211
+ "reasoning_model": True,
212
+ },
213
+ "gemini-2.5-flash-gemini": {
214
+ "id": "gemini-2.5-flash-gemini",
215
+ "name": "gemini-2.5-flash-preview-05-20",
216
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
217
+ "api_key_env_var": "GEMINI_API_KEY",
218
+ "supports_json": True,
219
+ "supports_logprobs": False,
220
+ "api_spec": "gemini",
221
+ "input_cost": 0.1,
222
+ "output_cost": 0.4,
223
+ "requests_per_minute": 20,
224
+ "tokens_per_minute": 100_000,
225
+ "reasoning_model": True,
226
+ },
170
227
  # ███████ █████████ █████
171
228
  # ███░░░░░███ ███░░░░░███ ░░███
172
229
  # ███ ░░███ ████████ ██████ ████████ ░███ ░███ ░███
lm_deluge/prompt.py CHANGED
@@ -1,12 +1,15 @@
1
1
  import io
2
2
  import json
3
- import tiktoken
4
- import xxhash
5
3
  from dataclasses import dataclass, field
6
4
  from pathlib import Path
7
5
  from typing import Literal, Sequence
8
- from lm_deluge.models import APIModel
6
+
7
+ import tiktoken
8
+ import xxhash
9
+
10
+ from lm_deluge.file import File
9
11
  from lm_deluge.image import Image
12
+ from lm_deluge.models import APIModel
10
13
 
11
14
  CachePattern = Literal[
12
15
  "tools_only",
@@ -203,7 +206,7 @@ class Thinking:
203
206
  return {"type": "text", "text": f"[Thinking: {self.content}]"}
204
207
 
205
208
 
206
- Part = Text | Image | ToolCall | ToolResult | Thinking
209
+ Part = Text | Image | File | ToolCall | ToolResult | Thinking
207
210
 
208
211
 
209
212
  ###############################################################################
@@ -246,6 +249,11 @@ class Message:
246
249
  """Get all image parts with proper typing."""
247
250
  return [part for part in self.parts if part.type == "image"] # type: ignore
248
251
 
252
+ @property
253
+ def files(self) -> list[File]:
254
+ """Get all file parts with proper typing."""
255
+ return [part for part in self.parts if part.type == "file"] # type: ignore
256
+
249
257
  @property
250
258
  def thinking_parts(self) -> list["Thinking"]:
251
259
  """Get all thinking parts with proper typing."""
@@ -262,6 +270,9 @@ class Message:
262
270
  elif isinstance(p, Image): # Image – redact the bytes, keep a hint
263
271
  w, h = p.size
264
272
  content_blocks.append({"type": "image", "tag": f"<Image ({w}×{h})>"})
273
+ elif isinstance(p, File): # File – redact the bytes, keep a hint
274
+ size = p.size
275
+ content_blocks.append({"type": "file", "tag": f"<File ({size} bytes)>"})
265
276
  elif isinstance(p, ToolCall):
266
277
  content_blocks.append(
267
278
  {
@@ -296,6 +307,9 @@ class Message:
296
307
  elif p["type"] == "image":
297
308
  # We only stored a placeholder tag, so keep that placeholder.
298
309
  parts.append(Image(p["tag"], detail="low"))
310
+ elif p["type"] == "file":
311
+ # We only stored a placeholder tag, so keep that placeholder.
312
+ parts.append(File(p["tag"]))
299
313
  elif p["type"] == "tool_call":
300
314
  parts.append(
301
315
  ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
@@ -340,6 +354,20 @@ class Message:
340
354
  self.parts.append(img)
341
355
  return self
342
356
 
357
+ def add_file(
358
+ self,
359
+ data: bytes | str | Path | io.BytesIO,
360
+ *,
361
+ media_type: str | None = None,
362
+ filename: str | None = None,
363
+ ) -> "Message":
364
+ """
365
+ Append a file block and return self for chaining.
366
+ """
367
+ file = File(data, media_type=media_type, filename=filename)
368
+ self.parts.append(file)
369
+ return self
370
+
343
371
  def add_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
344
372
  """Append a tool call block and return self for chaining."""
345
373
  self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
@@ -362,12 +390,15 @@ class Message:
362
390
  text: str | None = None,
363
391
  *,
364
392
  image: str | bytes | Path | io.BytesIO | None = None,
393
+ file: str | bytes | Path | io.BytesIO | None = None,
365
394
  ) -> "Message":
366
395
  res = cls("user", [])
367
396
  if text is not None:
368
397
  res.add_text(text)
369
398
  if image is not None:
370
399
  res.add_image(image)
400
+ if file is not None:
401
+ res.add_file(file)
371
402
  return res
372
403
 
373
404
  @classmethod
@@ -403,6 +434,19 @@ class Message:
403
434
  part_list.append(Text(item["text"]))
404
435
  elif item["type"] == "image_url":
405
436
  part_list.append(Image(data=item["image_url"]["url"]))
437
+ elif item["type"] == "file":
438
+ file_data = item["file"]
439
+ if "file_id" in file_data:
440
+ # Handle file ID reference (not implemented yet)
441
+ part_list.append(File(data=file_data["file_id"]))
442
+ elif "file_data" in file_data:
443
+ # Handle base64 file data
444
+ part_list.append(
445
+ File(
446
+ data=file_data["file_data"],
447
+ filename=file_data.get("filename"),
448
+ )
449
+ )
406
450
  parts = part_list
407
451
 
408
452
  # Handle tool calls (assistant messages)
@@ -511,11 +555,17 @@ class Conversation:
511
555
 
512
556
  @classmethod
513
557
  def user(
514
- cls, text: str, *, image: bytes | str | Path | None = None
558
+ cls,
559
+ text: str,
560
+ *,
561
+ image: bytes | str | Path | None = None,
562
+ file: bytes | str | Path | None = None,
515
563
  ) -> "Conversation":
516
- msg = (
517
- Message.user(text) if image is None else Message.user(text).add_image(image)
518
- )
564
+ msg = Message.user(text)
565
+ if image is not None:
566
+ msg.add_image(image)
567
+ if file is not None:
568
+ msg.add_file(file)
519
569
  return cls([msg])
520
570
 
521
571
  @classmethod
@@ -677,6 +727,9 @@ class Conversation:
677
727
  if isinstance(part, Image):
678
728
  # Force conversion to bytes if not already
679
729
  part.data = part._bytes()
730
+ elif isinstance(part, File):
731
+ # Force conversion to bytes if not already
732
+ part.data = part._bytes()
680
733
  return self
681
734
 
682
735
  def _add_cache_control_to_message(self, message: dict) -> None:
@@ -765,6 +818,11 @@ class Conversation:
765
818
  content_blocks.append(
766
819
  {"type": "image", "tag": f"<Image ({w}×{h})>"}
767
820
  )
821
+ elif isinstance(p, File): # File – redact the bytes, keep a hint
822
+ size = p.size
823
+ content_blocks.append(
824
+ {"type": "file", "tag": f"<File ({size} bytes)>"}
825
+ )
768
826
  elif isinstance(p, ToolCall):
769
827
  content_blocks.append(
770
828
  {
@@ -795,7 +853,7 @@ class Conversation:
795
853
 
796
854
  for m in payload.get("messages", []):
797
855
  role: Role = m["role"] # 'system' | 'user' | 'assistant'
798
- parts: list[Text | Image | ToolCall | ToolResult | Thinking] = []
856
+ parts: list[Part] = []
799
857
 
800
858
  for p in m["content"]:
801
859
  if p["type"] == "text":
@@ -804,6 +862,9 @@ class Conversation:
804
862
  # We only stored a placeholder tag, so keep that placeholder.
805
863
  # You could raise instead if real image bytes are required.
806
864
  parts.append(Image(p["tag"], detail="low"))
865
+ elif p["type"] == "file":
866
+ # We only stored a placeholder tag, so keep that placeholder.
867
+ parts.append(File(p["tag"]))
807
868
  elif p["type"] == "tool_call":
808
869
  parts.append(
809
870
  ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
lm_deluge/tracker.py CHANGED
@@ -67,7 +67,7 @@ class StatusTracker:
67
67
  def set_limiting_factor(self, factor):
68
68
  self.limiting_factor = factor
69
69
 
70
- def check_capacity(self, num_tokens: int):
70
+ def check_capacity(self, num_tokens: int, retry: bool = False):
71
71
  request_available = self.available_request_capacity >= 1
72
72
  tokens_available = self.available_token_capacity >= num_tokens
73
73
  concurrent_request_available = (
@@ -76,8 +76,10 @@ class StatusTracker:
76
76
  if request_available and tokens_available and concurrent_request_available:
77
77
  self.available_request_capacity -= 1
78
78
  self.available_token_capacity -= num_tokens
79
- self.num_tasks_started += 1
80
- self.num_tasks_in_progress += 1
79
+ if not retry:
80
+ # Only count new tasks, not retries
81
+ self.num_tasks_started += 1
82
+ self.num_tasks_in_progress += 1
81
83
  self.set_limiting_factor(None)
82
84
  return True
83
85
  else:
lm_deluge/usage.py CHANGED
@@ -71,6 +71,16 @@ class Usage:
71
71
  cache_write_tokens=None,
72
72
  )
73
73
 
74
+ @classmethod
75
+ def from_gemini_usage(cls, usage_data: dict) -> "Usage":
76
+ """Create Usage from Gemini API response usage data."""
77
+ return cls(
78
+ input_tokens=usage_data.get("promptTokenCount", 0),
79
+ output_tokens=usage_data.get("candidatesTokenCount", 0),
80
+ cache_read_tokens=None, # Gemini doesn't support caching yet
81
+ cache_write_tokens=None,
82
+ )
83
+
74
84
  def to_dict(self) -> dict:
75
85
  """Convert to dictionary for serialization."""
76
86
  return {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.13
3
+ Version: 0.0.15
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -30,6 +30,7 @@ Dynamic: license-file
30
30
  `lm-deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
31
31
 
32
32
  - **Unified client** – Send prompts to all relevant models with a single client.
33
+ - **Files and Images** - Include images easily for multimodal models, and PDF files for models that support them (OpenAI and Anthropic).
33
34
  - **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
34
35
  - **Spray across models/providers** – Configure a client with multiple models from any provider(s), and sampling weights. The client samples a model for each request.
35
36
  - **Tool Use** – Unified API for defining tools for all providers, and creating tools automatically from python functions.
@@ -41,6 +42,8 @@ Dynamic: license-file
41
42
 
42
43
  **STREAMING IS NOT IN SCOPE.** There are plenty of packages that let you stream chat completions across providers. The sole purpose of this package is to do very fast batch inference using APIs. Sorry!
43
44
 
45
+ **Update 06/02/2025:** I lied, it supports (very basic) streaming now via client.stream(...). It will print tokens as they arrive, then return an APIResponse at the end. More sophisticated streaming may or may not be implemented later, don't count on it.
46
+
44
47
  ## Installation
45
48
 
46
49
  ```bash