lm-deluge 0.0.59__py3-none-any.whl → 0.0.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

@@ -42,6 +42,14 @@ def _build_anthropic_request(
42
42
  "content-type": "application/json",
43
43
  }
44
44
 
45
+ # Check if any messages contain uploaded files (file_id)
46
+ # If so, add the files-api beta header
47
+ for msg in prompt.messages:
48
+ for file in msg.files:
49
+ if file.is_remote and file.remote_provider == "anthropic":
50
+ _add_beta(base_headers, "files-api-2025-04-14")
51
+ break
52
+
45
53
  request_json = {
46
54
  "model": model.name,
47
55
  "messages": messages,
@@ -1,10 +1,11 @@
1
1
  import asyncio
2
2
  import json
3
3
  import os
4
- import warnings
5
4
 
6
5
  from aiohttp import ClientResponse
7
6
 
7
+ from lm_deluge.warnings import maybe_warn
8
+
8
9
  try:
9
10
  from requests_aws4auth import AWS4Auth
10
11
  except ImportError:
@@ -187,9 +188,7 @@ async def _build_openai_bedrock_request(
187
188
  # Note: GPT-OSS on Bedrock doesn't support response_format parameter
188
189
  # Even though the model supports JSON, we can't use the response_format parameter
189
190
  if sampling_params.json_mode and model.supports_json:
190
- warnings.warn(
191
- f"JSON mode requested for {model.name} but response_format parameter not supported on Bedrock"
192
- )
191
+ maybe_warn("WARN_JSON_MODE_UNSUPPORTED", model_name=model.name)
193
192
 
194
193
  if tools:
195
194
  request_tools = []
@@ -1,11 +1,12 @@
1
1
  import json
2
2
  import os
3
- import warnings
4
3
  from typing import Any
4
+
5
5
  from aiohttp import ClientResponse
6
6
 
7
7
  from lm_deluge.request_context import RequestContext
8
8
  from lm_deluge.tool import Tool
9
+ from lm_deluge.warnings import maybe_warn
9
10
 
10
11
  from ..config import SamplingParams
11
12
  from ..models import APIModel
@@ -54,9 +55,7 @@ async def _build_gemini_request(
54
55
 
55
56
  else:
56
57
  if sampling_params.reasoning_effort:
57
- warnings.warn(
58
- f"Ignoring reasoning_effort param for non-reasoning model: {model.name}"
59
- )
58
+ maybe_warn("WARN_REASONING_UNSUPPORTED", model_name=model.name)
60
59
 
61
60
  # Add tools if provided
62
61
  if tools:
@@ -76,8 +75,10 @@ class GeminiRequest(APIRequestBase):
76
75
 
77
76
  # Warn if cache is specified for Gemini model
78
77
  if self.context.cache is not None:
79
- warnings.warn(
80
- f"Cache parameter '{self.context.cache}' is not supported for Gemini models, ignoring for {self.context.model_name}"
78
+ maybe_warn(
79
+ "WARN_CACHING_UNSUPPORTED",
80
+ model_name=self.context.model_name,
81
+ cache_param=self.context.cache,
81
82
  )
82
83
 
83
84
  self.model = APIModel.from_registry(self.context.model_name)
@@ -1,9 +1,10 @@
1
1
  import json
2
2
  import os
3
- import warnings
4
3
 
5
4
  from aiohttp import ClientResponse
6
5
 
6
+ from lm_deluge.warnings import maybe_warn
7
+
7
8
  from ..models import APIModel
8
9
  from ..prompt import Message
9
10
  from ..request_context import RequestContext
@@ -17,8 +18,10 @@ class MistralRequest(APIRequestBase):
17
18
 
18
19
  # Warn if cache is specified for non-Anthropic model
19
20
  if self.context.cache is not None:
20
- warnings.warn(
21
- f"Cache parameter '{self.context.cache}' is only supported for Anthropic models, ignoring for {self.context.model_name}"
21
+ maybe_warn(
22
+ "WARN_CACHING_UNSUPPORTED",
23
+ model_name=self.context.model_name,
24
+ cache_param=self.context.cache,
22
25
  )
23
26
  self.model = APIModel.from_registry(self.context.model_name)
24
27
 
@@ -38,13 +41,9 @@ class MistralRequest(APIRequestBase):
38
41
  "max_tokens": self.context.sampling_params.max_new_tokens,
39
42
  }
40
43
  if self.context.sampling_params.reasoning_effort:
41
- warnings.warn(
42
- f"Ignoring reasoning_effort param for non-reasoning model: {self.context.model_name}"
43
- )
44
+ maybe_warn("WARN_REASONING_UNSUPPORTED", model_name=self.context.model_name)
44
45
  if self.context.sampling_params.logprobs:
45
- warnings.warn(
46
- f"Ignoring logprobs param for non-logprobs model: {self.context.model_name}"
47
- )
46
+ maybe_warn("WARN_LOGPROBS_UNSUPPORTED", model_name=self.context.model_name)
48
47
  if self.context.sampling_params.json_mode and self.model.supports_json:
49
48
  self.request_json["response_format"] = {"type": "json_object"}
50
49
 
@@ -1,7 +1,6 @@
1
1
  import json
2
2
  import os
3
3
  import traceback as tb
4
- import warnings
5
4
  from types import SimpleNamespace
6
5
 
7
6
  import aiohttp
@@ -9,6 +8,7 @@ from aiohttp import ClientResponse
9
8
 
10
9
  from lm_deluge.request_context import RequestContext
11
10
  from lm_deluge.tool import MCPServer, Tool
11
+ from lm_deluge.warnings import maybe_warn
12
12
 
13
13
  from ..config import SamplingParams
14
14
  from ..models import APIModel
@@ -75,9 +75,8 @@ async def _build_oa_chat_request(
75
75
  request_json["reasoning_effort"] = effort
76
76
  else:
77
77
  if sampling_params.reasoning_effort:
78
- warnings.warn(
79
- f"Ignoring reasoning_effort param for non-reasoning model: {model.name}"
80
- )
78
+ maybe_warn("WARN_REASONING_UNSUPPORTED", model_name=context.model_name)
79
+
81
80
  if sampling_params.logprobs:
82
81
  request_json["logprobs"] = True
83
82
  if sampling_params.top_logprobs is not None:
@@ -105,8 +104,10 @@ class OpenAIRequest(APIRequestBase):
105
104
 
106
105
  # Warn if cache is specified for non-Anthropic model
107
106
  if self.context.cache is not None:
108
- warnings.warn(
109
- f"Cache parameter '{self.context.cache}' is only supported for Anthropic models, ignoring for {self.context.model_name}"
107
+ maybe_warn(
108
+ "WARN_CACHING_UNSUPPORTED",
109
+ model_name=self.context.model_name,
110
+ cache_param=self.context.cache,
110
111
  )
111
112
  self.model = APIModel.from_registry(self.context.model_name)
112
113
 
@@ -283,9 +284,7 @@ async def _build_oa_responses_request(
283
284
  }
284
285
  else:
285
286
  if sampling_params.reasoning_effort:
286
- warnings.warn(
287
- f"Ignoring reasoning_effort for non-reasoning model: {model.id}"
288
- )
287
+ maybe_warn("WARN_REASONING_UNSUPPORTED", model_name=context.model_name)
289
288
 
290
289
  if sampling_params.json_mode and model.supports_json:
291
290
  request_json["text"] = {"format": {"type": "json_object"}}
@@ -322,8 +321,10 @@ class OpenAIResponsesRequest(APIRequestBase):
322
321
  super().__init__(context)
323
322
  # Warn if cache is specified for non-Anthropic model
324
323
  if self.context.cache is not None:
325
- warnings.warn(
326
- f"Cache parameter '{self.context.cache}' is only supported for Anthropic models, ignoring for {self.context.model_name}"
324
+ maybe_warn(
325
+ "WARN_CACHING_UNSUPPORTED",
326
+ model_name=self.context.model_name,
327
+ cache_param=self.context.cache,
327
328
  )
328
329
  self.model = APIModel.from_registry(self.context.model_name)
329
330
 
@@ -526,8 +527,10 @@ async def stream_chat(
526
527
  extra_headers: dict[str, str] | None = None,
527
528
  ):
528
529
  if cache is not None:
529
- warnings.warn(
530
- f"Cache parameter '{cache}' is only supported for Anthropic models, ignoring for {model_name}"
530
+ maybe_warn(
531
+ "WARN_CACHING_UNSUPPORTED",
532
+ model_name=model_name,
533
+ cache_param=cache,
531
534
  )
532
535
 
533
536
  model = APIModel.from_registry(model_name)
lm_deluge/client.py CHANGED
@@ -3,6 +3,7 @@ from typing import (
3
3
  Any,
4
4
  AsyncGenerator,
5
5
  Callable,
6
+ ClassVar,
6
7
  Literal,
7
8
  Self,
8
9
  Sequence,
@@ -31,7 +32,7 @@ from lm_deluge.tool import MCPServer, Tool
31
32
 
32
33
  from .api_requests.base import APIResponse
33
34
  from .config import SamplingParams
34
- from .models import APIModel, registry
35
+ from .models import APIModel, register_model, registry
35
36
  from .request_context import RequestContext
36
37
  from .tracker import StatusTracker
37
38
 
@@ -43,6 +44,12 @@ class _LLMClient(BaseModel):
43
44
  Keeps all validation, serialization, and existing functionality.
44
45
  """
45
46
 
47
+ _REASONING_SUFFIXES: ClassVar[dict[str, Literal["low", "medium", "high"]]] = {
48
+ "-low": "low",
49
+ "-medium": "medium",
50
+ "-high": "high",
51
+ }
52
+
46
53
  model_names: str | list[str] = ["gpt-4.1-mini"]
47
54
  name: str | None = None
48
55
  max_requests_per_minute: int = 1_000
@@ -117,13 +124,112 @@ class _LLMClient(BaseModel):
117
124
 
118
125
  # NEW! Builder methods
119
126
  def with_model(self, model: str):
120
- self.model_names = [model]
127
+ self._update_models([model])
121
128
  return self
122
129
 
123
130
  def with_models(self, models: list[str]):
124
- self.model_names = models
131
+ self._update_models(models)
125
132
  return self
126
133
 
134
+ def _update_models(self, models: list[str]) -> None:
135
+ normalized, per_model_efforts = self._normalize_model_names(models)
136
+ if self.reasoning_effort is None:
137
+ unique_efforts = {eff for eff in per_model_efforts if eff is not None}
138
+ if len(normalized) == 1 and per_model_efforts[0] is not None:
139
+ self.reasoning_effort = per_model_efforts[0]
140
+ elif (
141
+ len(unique_efforts) == 1
142
+ and len(unique_efforts) != 0
143
+ and None not in per_model_efforts
144
+ ):
145
+ self.reasoning_effort = next(iter(unique_efforts)) # type: ignore
146
+ self.model_names = normalized
147
+ self._align_sampling_params(per_model_efforts)
148
+ self._reset_model_weights()
149
+
150
+ def _normalize_model_names(
151
+ self, models: list[str]
152
+ ) -> tuple[list[str], list[Literal["low", "medium", "high"] | None]]:
153
+ normalized: list[str] = []
154
+ efforts: list[Literal["low", "medium", "high"] | None] = []
155
+
156
+ for name in models:
157
+ base_name = self._preprocess_openrouter_model(name)
158
+ trimmed_name, effort = self.__class__._strip_reasoning_suffix_if_registered(
159
+ base_name
160
+ )
161
+ normalized.append(trimmed_name)
162
+ efforts.append(effort)
163
+
164
+ return normalized, efforts
165
+
166
+ def _align_sampling_params(
167
+ self, per_model_efforts: list[Literal["low", "medium", "high"] | None]
168
+ ) -> None:
169
+ if len(per_model_efforts) < len(self.model_names):
170
+ per_model_efforts = per_model_efforts + [None] * (
171
+ len(self.model_names) - len(per_model_efforts)
172
+ )
173
+
174
+ if not self.model_names:
175
+ self.sampling_params = []
176
+ return
177
+
178
+ if not self.sampling_params:
179
+ self.sampling_params = []
180
+
181
+ if len(self.sampling_params) == 0:
182
+ for _ in self.model_names:
183
+ self.sampling_params.append(
184
+ SamplingParams(
185
+ temperature=self.temperature,
186
+ top_p=self.top_p,
187
+ json_mode=self.json_mode,
188
+ max_new_tokens=self.max_new_tokens,
189
+ reasoning_effort=self.reasoning_effort,
190
+ logprobs=self.logprobs,
191
+ top_logprobs=self.top_logprobs,
192
+ )
193
+ )
194
+ elif len(self.sampling_params) == 1 and len(self.model_names) > 1:
195
+ base_param = self.sampling_params[0]
196
+ self.sampling_params = [
197
+ base_param.model_copy(deep=True) for _ in self.model_names
198
+ ]
199
+ elif len(self.sampling_params) != len(self.model_names):
200
+ base_param = self.sampling_params[0]
201
+ self.sampling_params = [
202
+ base_param.model_copy(deep=True) for _ in self.model_names
203
+ ]
204
+
205
+ if self.reasoning_effort is not None:
206
+ for sp in self.sampling_params:
207
+ sp.reasoning_effort = self.reasoning_effort
208
+ else:
209
+ for sp, effort in zip(self.sampling_params, per_model_efforts):
210
+ if effort is not None:
211
+ sp.reasoning_effort = effort
212
+
213
+ def _reset_model_weights(self) -> None:
214
+ if not self.model_names:
215
+ self.model_weights = []
216
+ return
217
+
218
+ if isinstance(self.model_weights, list):
219
+ if len(self.model_weights) == len(self.model_names) and any(
220
+ self.model_weights
221
+ ):
222
+ total = sum(self.model_weights)
223
+ if total == 0:
224
+ self.model_weights = [
225
+ 1 / len(self.model_names) for _ in self.model_names
226
+ ]
227
+ else:
228
+ self.model_weights = [w / total for w in self.model_weights]
229
+ return
230
+ # Fallback to uniform distribution
231
+ self.model_weights = [1 / len(self.model_names) for _ in self.model_names]
232
+
127
233
  def with_limits(
128
234
  self,
129
235
  max_requests_per_minute: int | None = None,
@@ -147,11 +253,64 @@ class _LLMClient(BaseModel):
147
253
  def models(self):
148
254
  return self.model_names # why? idk
149
255
 
256
+ @staticmethod
257
+ def _preprocess_openrouter_model(model_name: str) -> str:
258
+ """Process openrouter: prefix and register model if needed."""
259
+ if model_name.startswith("openrouter:"):
260
+ slug = model_name.split(":", 1)[1] # Everything after "openrouter:"
261
+ # Create a unique id by replacing slashes with hyphens
262
+ model_id = f"openrouter-{slug.replace('/', '-')}"
263
+
264
+ # Register the model if not already in registry
265
+ if model_id not in registry:
266
+ register_model(
267
+ id=model_id,
268
+ name=slug, # The full slug sent to OpenRouter API (e.g., "openrouter/andromeda-alpha")
269
+ api_base="https://openrouter.ai/api/v1",
270
+ api_key_env_var="OPENROUTER_API_KEY",
271
+ api_spec="openai",
272
+ supports_json=True,
273
+ supports_logprobs=False,
274
+ supports_responses=False,
275
+ input_cost=0, # Unknown costs for generic models
276
+ cached_input_cost=0,
277
+ cache_write_cost=0,
278
+ output_cost=0,
279
+ )
280
+
281
+ return model_id
282
+ return model_name
283
+
150
284
  @model_validator(mode="before")
151
285
  @classmethod
152
286
  def fix_lists(cls, data) -> "_LLMClient":
153
- if isinstance(data.get("model_names"), str):
154
- data["model_names"] = [data["model_names"]]
287
+ # Process model_names - handle both strings and lists
288
+ model_names = data.get("model_names")
289
+
290
+ if isinstance(model_names, str):
291
+ # Single model as string
292
+ # First, handle OpenRouter prefix
293
+ model_name = cls._preprocess_openrouter_model(model_names)
294
+
295
+ # Then handle reasoning effort suffix (e.g., "gpt-5-high")
296
+ model_name, effort = cls._strip_reasoning_suffix_if_registered(model_name)
297
+ if effort and data.get("reasoning_effort") is None:
298
+ data["reasoning_effort"] = effort
299
+
300
+ data["model_names"] = [model_name]
301
+
302
+ elif isinstance(model_names, list):
303
+ # List of models - process each one
304
+ processed_models = []
305
+ for model_name in model_names:
306
+ # Handle OpenRouter prefix for each model
307
+ processed_model = cls._preprocess_openrouter_model(model_name)
308
+ processed_model, _ = cls._strip_reasoning_suffix_if_registered(
309
+ processed_model
310
+ )
311
+ processed_models.append(processed_model)
312
+ data["model_names"] = processed_models
313
+
155
314
  if not isinstance(data.get("sampling_params", []), list):
156
315
  data["sampling_params"] = [data["sampling_params"]]
157
316
  if "sampling_params" not in data or len(data.get("sampling_params", [])) == 0:
@@ -170,6 +329,18 @@ class _LLMClient(BaseModel):
170
329
  data["sampling_params"] = data["sampling_params"] * len(data["model_names"])
171
330
  return data
172
331
 
332
+ @classmethod
333
+ def _strip_reasoning_suffix_if_registered(
334
+ cls, model_name: str
335
+ ) -> tuple[str, Literal["low", "medium", "high"] | None]:
336
+ """Remove reasoning suffix only when the trimmed model already exists."""
337
+ for suffix, effort in cls._REASONING_SUFFIXES.items():
338
+ if model_name.endswith(suffix) and len(model_name) > len(suffix):
339
+ candidate = model_name[: -len(suffix)]
340
+ if candidate in registry:
341
+ return candidate, effort
342
+ return model_name, None
343
+
173
344
  @model_validator(mode="after")
174
345
  def validate_client(self) -> Self:
175
346
  if isinstance(self.model_names, str):
lm_deluge/file.py CHANGED
@@ -1,22 +1,35 @@
1
- from functools import cached_property
2
- import os
3
- import io
4
- import requests
5
1
  import base64
2
+ import io
6
3
  import mimetypes
7
- import xxhash
4
+ import os
8
5
  from dataclasses import dataclass, field
6
+ from functools import cached_property
9
7
  from pathlib import Path
8
+ from typing import Literal
9
+
10
+ import requests
11
+ import xxhash
10
12
 
11
13
 
12
14
  @dataclass
13
15
  class File:
14
16
  # raw bytes, pathlike, http url, base64 data url, or file_id
15
- data: bytes | io.BytesIO | Path | str
17
+ data: bytes | io.BytesIO | Path | str | None
16
18
  media_type: str | None = None # inferred if None
19
+ type: str = field(init=False, default="file")
20
+ is_remote: bool = False
21
+ remote_provider: Literal["openai", "anthropic", "google"] | None = None
17
22
  filename: str | None = None # optional filename for uploads
18
23
  file_id: str | None = None # for OpenAI file uploads or Anthropic file API
19
- type: str = field(init=False, default="file")
24
+
25
+ def __post_init__(self):
26
+ if self.is_remote:
27
+ if self.remote_provider is None:
28
+ raise ValueError("remote_provider must be specified")
29
+ if self.file_id is None:
30
+ raise ValueError("file_id must be specified for remote files")
31
+ if self.file_id and not self.is_remote:
32
+ print("Warning: File ID specified by file not labeled as remote.")
20
33
 
21
34
  # helpers -----------------------------------------------------------------
22
35
  def _bytes(self) -> bytes:
@@ -75,17 +88,342 @@ class File:
75
88
  @cached_property
76
89
  def fingerprint(self) -> str:
77
90
  # Hash the file contents for fingerprinting
91
+ if self.is_remote:
92
+ # For remote files, use provider:file_id for interpretability
93
+ return f"{self.remote_provider}:{self.file_id}"
78
94
  file_bytes = self._bytes()
79
95
  return xxhash.xxh64(file_bytes).hexdigest()
80
96
 
81
97
  @cached_property
82
98
  def size(self) -> int:
83
99
  """Return file size in bytes."""
100
+ if self.is_remote:
101
+ # For remote files, we don't have the bytes available
102
+ return 0
84
103
  return len(self._bytes())
85
104
 
105
+ async def as_remote(
106
+ self, provider: Literal["openai", "anthropic", "google"]
107
+ ) -> "File":
108
+ """Upload file to provider's file API and return new File with file_id.
109
+
110
+ Args:
111
+ provider: The provider to upload to ("openai", "anthropic", or "google")
112
+
113
+ Returns:
114
+ A new File object with file_id set and is_remote=True
115
+
116
+ Raises:
117
+ ValueError: If provider is unsupported or API key is missing
118
+ RuntimeError: If upload fails
119
+ """
120
+ if self.is_remote:
121
+ # If already remote with same provider, return self
122
+ if self.remote_provider == provider:
123
+ return self
124
+ # Otherwise raise error about cross-provider incompatibility
125
+ raise ValueError(
126
+ f"File is already uploaded to {self.remote_provider}. "
127
+ f"Cannot re-upload to {provider}."
128
+ )
129
+
130
+ if provider == "openai":
131
+ return await self._upload_to_openai()
132
+ elif provider == "anthropic":
133
+ return await self._upload_to_anthropic()
134
+ elif provider == "google":
135
+ return await self._upload_to_google()
136
+ else:
137
+ raise ValueError(f"Unsupported provider: {provider}")
138
+
139
+ async def _upload_to_openai(self) -> "File":
140
+ """Upload file to OpenAI's Files API."""
141
+ import aiohttp
142
+
143
+ api_key = os.environ.get("OPENAI_API_KEY")
144
+ if not api_key:
145
+ raise ValueError("OPENAI_API_KEY environment variable must be set")
146
+
147
+ url = "https://api.openai.com/v1/files"
148
+ headers = {"Authorization": f"Bearer {api_key}"}
149
+
150
+ # Get file bytes and metadata
151
+ file_bytes = self._bytes()
152
+ filename = self._filename()
153
+
154
+ # Create multipart form data
155
+ data = aiohttp.FormData()
156
+ data.add_field("purpose", "assistants")
157
+ data.add_field(
158
+ "file",
159
+ file_bytes,
160
+ filename=filename,
161
+ content_type=self._mime(),
162
+ )
163
+
164
+ try:
165
+ async with aiohttp.ClientSession() as session:
166
+ async with session.post(url, headers=headers, data=data) as response:
167
+ if response.status != 200:
168
+ text = await response.text()
169
+ raise RuntimeError(f"Failed to upload file to OpenAI: {text}")
170
+
171
+ response_data = await response.json()
172
+ file_id = response_data["id"]
173
+
174
+ # Return new File object with file_id
175
+ return File(
176
+ data=None,
177
+ media_type=self.media_type,
178
+ is_remote=True,
179
+ remote_provider="openai",
180
+ filename=filename,
181
+ file_id=file_id,
182
+ )
183
+ except aiohttp.ClientError as e:
184
+ raise RuntimeError(f"Failed to upload file to OpenAI: {e}")
185
+
186
+ async def _upload_to_anthropic(self) -> "File":
187
+ """Upload file to Anthropic's Files API."""
188
+ import aiohttp
189
+
190
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
191
+ if not api_key:
192
+ raise ValueError("ANTHROPIC_API_KEY environment variable must be set")
193
+
194
+ url = "https://api.anthropic.com/v1/files"
195
+ headers = {
196
+ "x-api-key": api_key,
197
+ "anthropic-version": "2023-06-01",
198
+ "anthropic-beta": "files-api-2025-04-14",
199
+ }
200
+
201
+ # Get file bytes and metadata
202
+ file_bytes = self._bytes()
203
+ filename = self._filename()
204
+
205
+ # Create multipart form data
206
+ data = aiohttp.FormData()
207
+ data.add_field(
208
+ "file",
209
+ file_bytes,
210
+ filename=filename,
211
+ content_type=self._mime(),
212
+ )
213
+
214
+ try:
215
+ async with aiohttp.ClientSession() as session:
216
+ async with session.post(url, headers=headers, data=data) as response:
217
+ if response.status != 200:
218
+ text = await response.text()
219
+ raise RuntimeError(
220
+ f"Failed to upload file to Anthropic: {text}"
221
+ )
222
+
223
+ response_data = await response.json()
224
+ file_id = response_data["id"]
225
+
226
+ # Return new File object with file_id
227
+ return File(
228
+ data=None,
229
+ media_type=self.media_type,
230
+ is_remote=True,
231
+ remote_provider="anthropic",
232
+ filename=filename,
233
+ file_id=file_id,
234
+ )
235
+ except aiohttp.ClientError as e:
236
+ raise RuntimeError(f"Failed to upload file to Anthropic: {e}")
237
+
238
+ async def _upload_to_google(self) -> "File":
239
+ """Upload file to Google Gemini Files API."""
240
+ import json
241
+
242
+ import aiohttp
243
+
244
+ api_key = os.environ.get("GEMINI_API_KEY")
245
+ if not api_key:
246
+ raise ValueError("GEMINI_API_KEY environment variable must be set")
247
+
248
+ # Google uses a different URL structure with the API key as a parameter
249
+ url = f"https://generativelanguage.googleapis.com/upload/v1beta/files?key={api_key}"
250
+
251
+ # Get file bytes and metadata
252
+ file_bytes = self._bytes()
253
+ filename = self._filename()
254
+ mime_type = self._mime()
255
+
256
+ # Google expects a multipart request with metadata and file data
257
+ # Using the resumable upload protocol
258
+ headers = {
259
+ "X-Goog-Upload-Protocol": "multipart",
260
+ }
261
+
262
+ # Create multipart form data with metadata and file
263
+ data = aiohttp.FormData()
264
+
265
+ # Add metadata part as JSON
266
+ metadata = {"file": {"display_name": filename}}
267
+ data.add_field(
268
+ "metadata",
269
+ json.dumps(metadata),
270
+ content_type="application/json",
271
+ )
272
+
273
+ # Add file data part
274
+ data.add_field(
275
+ "file",
276
+ file_bytes,
277
+ filename=filename,
278
+ content_type=mime_type,
279
+ )
280
+
281
+ try:
282
+ async with aiohttp.ClientSession() as session:
283
+ async with session.post(url, headers=headers, data=data) as response:
284
+ if response.status not in [200, 201]:
285
+ text = await response.text()
286
+ raise RuntimeError(f"Failed to upload file to Google: {text}")
287
+
288
+ response_data = await response.json()
289
+ # Google returns a file object with a 'name' field like 'files/abc123'
290
+ file_uri = response_data.get("file", {}).get(
291
+ "uri"
292
+ ) or response_data.get("name")
293
+ if not file_uri:
294
+ raise RuntimeError(
295
+ f"No file URI in Google response: {response_data}"
296
+ )
297
+
298
+ # Return new File object with file_id (using the file URI)
299
+ return File(
300
+ data=None,
301
+ media_type=self.media_type,
302
+ is_remote=True,
303
+ remote_provider="google",
304
+ filename=filename,
305
+ file_id=file_uri,
306
+ )
307
+ except aiohttp.ClientError as e:
308
+ raise RuntimeError(f"Failed to upload file to Google: {e}")
309
+
310
+ async def delete(self) -> bool:
311
+ """Delete the uploaded file from the remote provider.
312
+
313
+ Returns:
314
+ True if deletion was successful, False otherwise
315
+
316
+ Raises:
317
+ ValueError: If file is not a remote file or provider is unsupported
318
+ RuntimeError: If deletion fails
319
+ """
320
+ if not self.is_remote:
321
+ raise ValueError(
322
+ "Cannot delete a non-remote file. Only remote files can be deleted."
323
+ )
324
+
325
+ if not self.file_id:
326
+ raise ValueError("Cannot delete file without file_id")
327
+
328
+ if self.remote_provider == "openai":
329
+ return await self._delete_from_openai()
330
+ elif self.remote_provider == "anthropic":
331
+ return await self._delete_from_anthropic()
332
+ elif self.remote_provider == "google":
333
+ return await self._delete_from_google()
334
+ else:
335
+ raise ValueError(f"Unsupported provider: {self.remote_provider}")
336
+
337
+ async def _delete_from_openai(self) -> bool:
338
+ """Delete file from OpenAI's Files API."""
339
+ import aiohttp
340
+
341
+ api_key = os.environ.get("OPENAI_API_KEY")
342
+ if not api_key:
343
+ raise ValueError("OPENAI_API_KEY environment variable must be set")
344
+
345
+ url = f"https://api.openai.com/v1/files/{self.file_id}"
346
+ headers = {"Authorization": f"Bearer {api_key}"}
347
+
348
+ try:
349
+ async with aiohttp.ClientSession() as session:
350
+ async with session.delete(url, headers=headers) as response:
351
+ if response.status == 200:
352
+ return True
353
+ else:
354
+ text = await response.text()
355
+ raise RuntimeError(f"Failed to delete file from OpenAI: {text}")
356
+ except aiohttp.ClientError as e:
357
+ raise RuntimeError(f"Failed to delete file from OpenAI: {e}")
358
+
359
+ async def _delete_from_anthropic(self) -> bool:
360
+ """Delete file from Anthropic's Files API."""
361
+ import aiohttp
362
+
363
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
364
+ if not api_key:
365
+ raise ValueError("ANTHROPIC_API_KEY environment variable must be set")
366
+
367
+ url = f"https://api.anthropic.com/v1/files/{self.file_id}"
368
+ headers = {
369
+ "x-api-key": api_key,
370
+ "anthropic-version": "2023-06-01",
371
+ "anthropic-beta": "files-api-2025-04-14",
372
+ }
373
+
374
+ try:
375
+ async with aiohttp.ClientSession() as session:
376
+ async with session.delete(url, headers=headers) as response:
377
+ if response.status == 200:
378
+ return True
379
+ else:
380
+ text = await response.text()
381
+ raise RuntimeError(
382
+ f"Failed to delete file from Anthropic: {text}"
383
+ )
384
+ except aiohttp.ClientError as e:
385
+ raise RuntimeError(f"Failed to delete file from Anthropic: {e}")
386
+
387
+ async def _delete_from_google(self) -> bool:
388
+ """Delete file from Google Gemini Files API."""
389
+ import aiohttp
390
+
391
+ api_key = os.environ.get("GEMINI_API_KEY")
392
+ if not api_key:
393
+ raise ValueError("GEMINI_API_KEY environment variable must be set")
394
+
395
+ # Google file_id is the full URI like "https://generativelanguage.googleapis.com/v1beta/files/abc123"
396
+ # We need to extract just the file name part for the delete endpoint
397
+ assert self.file_id, "can't delete file with no file id"
398
+ if self.file_id.startswith("https://"):
399
+ # Extract the path after the domain
400
+ file_name = self.file_id.split("/v1beta/")[-1]
401
+ else:
402
+ file_name = self.file_id
403
+
404
+ url = f"https://generativelanguage.googleapis.com/v1beta/{file_name}?key={api_key}"
405
+
406
+ try:
407
+ async with aiohttp.ClientSession() as session:
408
+ async with session.delete(url) as response:
409
+ if response.status in [200, 204]:
410
+ return True
411
+ else:
412
+ text = await response.text()
413
+ raise RuntimeError(f"Failed to delete file from Google: {text}")
414
+ except aiohttp.ClientError as e:
415
+ raise RuntimeError(f"Failed to delete file from Google: {e}")
416
+
86
417
  # ── provider-specific emission ────────────────────────────────────────────
87
418
  def oa_chat(self) -> dict:
88
419
  """For OpenAI Chat Completions - file content as base64 or file_id."""
420
+ # Validate provider compatibility
421
+ if self.is_remote and self.remote_provider != "openai":
422
+ raise ValueError(
423
+ f"Cannot emit file uploaded to {self.remote_provider} as OpenAI format. "
424
+ f"File must be uploaded to OpenAI or provided as raw data."
425
+ )
426
+
89
427
  if self.file_id:
90
428
  return {
91
429
  "type": "file",
@@ -104,6 +442,13 @@ class File:
104
442
 
105
443
  def oa_resp(self) -> dict:
106
444
  """For OpenAI Responses API - file content as base64 or file_id."""
445
+ # Validate provider compatibility
446
+ if self.is_remote and self.remote_provider != "openai":
447
+ raise ValueError(
448
+ f"Cannot emit file uploaded to {self.remote_provider} as OpenAI format. "
449
+ f"File must be uploaded to OpenAI or provided as raw data."
450
+ )
451
+
107
452
  if self.file_id:
108
453
  return {
109
454
  "type": "input_file",
@@ -118,6 +463,13 @@ class File:
118
463
 
119
464
  def anthropic(self) -> dict:
120
465
  """For Anthropic Messages API - file content as base64 or file_id."""
466
+ # Validate provider compatibility
467
+ if self.is_remote and self.remote_provider != "anthropic":
468
+ raise ValueError(
469
+ f"Cannot emit file uploaded to {self.remote_provider} as Anthropic format. "
470
+ f"File must be uploaded to Anthropic or provided as raw data."
471
+ )
472
+
121
473
  if self.file_id:
122
474
  return {
123
475
  "type": "document",
@@ -145,13 +497,30 @@ class File:
145
497
  return filename, content, media_type
146
498
 
147
499
  def gemini(self) -> dict:
148
- """For Gemini API - files are provided as inline data."""
149
- return {
150
- "inlineData": {
151
- "mimeType": self._mime(),
152
- "data": self._base64(include_header=False),
500
+ """For Gemini API - files are provided as inline data or file URI."""
501
+ # Validate provider compatibility
502
+ if self.is_remote and self.remote_provider != "google":
503
+ raise ValueError(
504
+ f"Cannot emit file uploaded to {self.remote_provider} as Google format. "
505
+ f"File must be uploaded to Google or provided as raw data."
506
+ )
507
+
508
+ if self.file_id:
509
+ # Use file URI for uploaded files
510
+ return {
511
+ "fileData": {
512
+ "mimeType": self._mime(),
513
+ "fileUri": self.file_id,
514
+ }
515
+ }
516
+ else:
517
+ # Use inline data for non-uploaded files
518
+ return {
519
+ "inlineData": {
520
+ "mimeType": self._mime(),
521
+ "data": self._base64(include_header=False),
522
+ }
153
523
  }
154
- }
155
524
 
156
525
  def mistral(self) -> dict:
157
526
  """For Mistral API - not yet supported."""
@@ -10,6 +10,20 @@ OPENAI_MODELS = {
10
10
  # ░███
11
11
  # █████
12
12
  # ░░░░░
13
+ "gpt-5-codex": {
14
+ "id": "gpt-5-codex",
15
+ "name": "gpt-5-codex",
16
+ "api_base": "https://api.openai.com/v1",
17
+ "api_key_env_var": "OPENAI_API_KEY",
18
+ "supports_json": False,
19
+ "supports_logprobs": True,
20
+ "supports_responses": True,
21
+ "api_spec": "openai",
22
+ "input_cost": 1.25,
23
+ "cached_input_cost": 0.125,
24
+ "output_cost": 10.0,
25
+ "reasoning_model": True,
26
+ },
13
27
  "gpt-5": {
14
28
  "id": "gpt-5",
15
29
  "name": "gpt-5",
@@ -79,6 +93,20 @@ OPENAI_MODELS = {
79
93
  "output_cost": 12.0,
80
94
  "reasoning_model": False,
81
95
  },
96
+ "codex-mini-latest": {
97
+ "id": "codex-mini-latest",
98
+ "name": "codex-mini-latest",
99
+ "api_base": "https://api.openai.com/v1",
100
+ "api_key_env_var": "OPENAI_API_KEY",
101
+ "supports_json": True,
102
+ "supports_logprobs": False,
103
+ "supports_responses": True,
104
+ "api_spec": "openai",
105
+ "input_cost": 1.5,
106
+ "cached_input_cost": 0.375,
107
+ "output_cost": 6.0,
108
+ "reasoning_model": True,
109
+ },
82
110
  "o3": {
83
111
  "id": "o3",
84
112
  "name": "o3-2025-04-16",
lm_deluge/prompt.py CHANGED
@@ -9,6 +9,7 @@ import xxhash
9
9
 
10
10
  from lm_deluge.file import File
11
11
  from lm_deluge.image import Image, MediaType
12
+ from lm_deluge.warnings import deprecated
12
13
 
13
14
  CachePattern = Literal[
14
15
  "tools_only",
@@ -415,12 +416,17 @@ class Message:
415
416
 
416
417
  return cls(role, parts)
417
418
 
418
- def add_text(self, content: str) -> "Message":
419
+ def with_text(self, content: str) -> "Message":
419
420
  """Append a text block and return self for chaining."""
420
421
  self.parts.append(Text(content))
421
422
  return self
422
423
 
423
- def add_image(
424
+ @deprecated("with_text")
425
+ def add_text(self, content: str) -> "Message":
426
+ """Append a text block and return self for chaining."""
427
+ return self.with_text(content)
428
+
429
+ def with_image(
424
430
  self,
425
431
  data: bytes | str | Path | io.BytesIO | Image,
426
432
  *,
@@ -446,7 +452,27 @@ class Message:
446
452
  self.parts.append(img)
447
453
  return self
448
454
 
449
- def add_file(
455
+ @deprecated("with_image")
456
+ def add_image(
457
+ self,
458
+ data: bytes | str | Path | io.BytesIO | Image,
459
+ *,
460
+ media_type: MediaType | None = None,
461
+ detail: Literal["low", "high", "auto"] = "auto",
462
+ max_size: int | None = None,
463
+ ) -> "Message":
464
+ """
465
+ Append an image block and return self for chaining.
466
+
467
+ If max_size is provided, the image will be resized so that its longer
468
+ dimension equals max_size, but only if the longer dimension is currently
469
+ larger than max_size.
470
+ """
471
+ return self.with_image(
472
+ data=data, media_type=media_type, detail=detail, max_size=max_size
473
+ )
474
+
475
+ def with_file(
450
476
  self,
451
477
  data: bytes | str | Path | io.BytesIO,
452
478
  *,
@@ -460,11 +486,29 @@ class Message:
460
486
  self.parts.append(file)
461
487
  return self
462
488
 
463
- def add_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
489
+ @deprecated("with_file")
490
+ def add_file(
491
+ self,
492
+ data: bytes | str | Path | io.BytesIO,
493
+ *,
494
+ media_type: str | None = None,
495
+ filename: str | None = None,
496
+ ) -> "Message":
497
+ """
498
+ Append a file block and return self for chaining.
499
+ """
500
+ return self.with_file(data, media_type=media_type, filename=filename)
501
+
502
+ def with_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
464
503
  """Append a tool call block and return self for chaining."""
465
504
  self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
466
505
  return self
467
506
 
507
+ @deprecated("with_tool_call")
508
+ def add_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
509
+ """Append a tool call block and return self for chaining."""
510
+ return self.with_tool_call(id, name, arguments)
511
+
468
512
  def with_tool_result(
469
513
  self, tool_call_id: str, result: str | list[ToolResultPart]
470
514
  ) -> "Message":
@@ -472,11 +516,23 @@ class Message:
472
516
  self.parts.append(ToolResult(tool_call_id=tool_call_id, result=result))
473
517
  return self
474
518
 
475
- def add_thinking(self, content: str) -> "Message":
519
+ @deprecated("with_tool_result")
520
+ def add_tool_result(
521
+ self, tool_call_id: str, result: str | list[ToolResultPart]
522
+ ) -> "Message":
523
+ """Append a tool result block and return self for chaining."""
524
+ return self.with_tool_result(tool_call_id, result)
525
+
526
+ def with_thinking(self, content: str) -> "Message":
476
527
  """Append a thinking block and return self for chaining."""
477
528
  self.parts.append(Thinking(content=content))
478
529
  return self
479
530
 
531
+ @deprecated("with_thinking")
532
+ def add_thinking(self, content: str) -> "Message":
533
+ """Append a thinking block and return self for chaining."""
534
+ return self.with_thinking(content)
535
+
480
536
  # -------- convenient constructors --------
481
537
  @classmethod
482
538
  def user(
@@ -488,25 +544,25 @@ class Message:
488
544
  ) -> "Message":
489
545
  res = cls("user", [])
490
546
  if text is not None:
491
- res.add_text(text)
547
+ res.with_text(text)
492
548
  if image is not None:
493
- res.add_image(image)
549
+ res.with_image(image)
494
550
  if file is not None:
495
- res.add_file(file)
551
+ res.with_file(file)
496
552
  return res
497
553
 
498
554
  @classmethod
499
555
  def system(cls, text: str | None = None) -> "Message":
500
556
  res = cls("system", [])
501
557
  if text is not None:
502
- res.add_text(text)
558
+ res.with_text(text)
503
559
  return res
504
560
 
505
561
  @classmethod
506
562
  def ai(cls, text: str | None = None) -> "Message":
507
563
  res = cls("assistant", [])
508
564
  if text is not None:
509
- res.add_text(text)
565
+ res.with_text(text)
510
566
  return res
511
567
 
512
568
  # ──── provider-specific constructors ───
@@ -698,9 +754,9 @@ class Conversation:
698
754
  ) -> "Conversation":
699
755
  msg = Message.user(text)
700
756
  if image is not None:
701
- msg.add_image(image)
757
+ msg.with_image(image)
702
758
  if file is not None:
703
- msg.add_file(file)
759
+ msg.with_file(file)
704
760
  return cls([msg])
705
761
 
706
762
  @classmethod
@@ -1211,11 +1267,11 @@ class Conversation:
1211
1267
  for i, tool_result in enumerate(m.tool_results):
1212
1268
  images = tool_result.get_images()
1213
1269
  if len(images) > 0:
1214
- user_msg.add_text(
1270
+ user_msg.with_text(
1215
1271
  f"[Images for Tool Call {tool_result.tool_call_id}]"
1216
1272
  )
1217
1273
  for img in images:
1218
- user_msg.add_image(img)
1274
+ user_msg.with_image(img)
1219
1275
 
1220
1276
  else:
1221
1277
  result.append(m.oa_chat())
lm_deluge/warnings.py ADDED
@@ -0,0 +1,46 @@
1
+ import functools
2
+ import os
3
+ import warnings
4
+
5
+ WARNINGS: dict[str, str] = {
6
+ "WARN_JSON_MODE_UNSUPPORTED": "JSON mode requested for {model_name} but response_format parameter not supported.",
7
+ "WARN_REASONING_UNSUPPORTED": "Ignoring reasoning_effort param for non-reasoning model: {model_name}.",
8
+ "WARN_CACHING_UNSUPPORTED": "Cache parameter '{cache_param}' is not supported, ignoring for {model_name}.",
9
+ "WARN_LOGPROBS_UNSUPPORTED": "Ignoring logprobs param for non-logprobs model: {model_name}",
10
+ }
11
+
12
+
13
+ def maybe_warn(warning: str, **kwargs):
14
+ if os.getenv(warning):
15
+ pass
16
+ else:
17
+ warnings.warn(WARNINGS[warning].format(**kwargs))
18
+ os.environ[warning] = "1"
19
+
20
+
21
+ def deprecated(replacement: str):
22
+ """Decorator to mark methods as deprecated and suggest replacement.
23
+
24
+ Only shows the warning once per method to avoid spam.
25
+
26
+ Args:
27
+ replacement: The name of the replacement method to suggest
28
+ """
29
+
30
+ def decorator(func):
31
+ warning_key = f"DEPRECATED_{func.__module__}_{func.__qualname__}"
32
+
33
+ @functools.wraps(func)
34
+ def wrapper(*args, **kwargs):
35
+ if not os.getenv(warning_key):
36
+ warnings.warn(
37
+ f"{func.__name__} is deprecated, use {replacement} instead",
38
+ DeprecationWarning,
39
+ stacklevel=2,
40
+ )
41
+ os.environ[warning_key] = "1"
42
+ return func(*args, **kwargs)
43
+
44
+ return wrapper
45
+
46
+ return decorator
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.59
3
+ Version: 0.0.61
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -2,26 +2,27 @@ lm_deluge/__init__.py,sha256=LKKIcqQoQyDpTck6fnB7iAs75BnfNNa3Bj5Nz7KU4Hk,376
2
2
  lm_deluge/batches.py,sha256=Km6QM5_7BlF2qEyo4WPlhkaZkpzrLqf50AaveHXQOoY,25127
3
3
  lm_deluge/cache.py,sha256=xO2AIYvP3tUpTMKQjwQQYfGRJSRi6e7sMlRhLjsS-u4,4873
4
4
  lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
5
- lm_deluge/client.py,sha256=jDXGC032MmBfAFDHdWNm23gdDP9pCiNeU-wIi9RCG5g,33616
5
+ lm_deluge/client.py,sha256=TKRN1KAMOgtQFLazh_iyj185GBHtP7r8KAU4lod-qfs,40693
6
6
  lm_deluge/config.py,sha256=H1tQyJDNHGFuwxqQNL5Z-CjWAC0luHSBA3iY_pxmACM,932
7
7
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
8
8
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
9
- lm_deluge/file.py,sha256=FGomcG8s2go_55Z2CChflHgmU-UqgFftgFY8c7f_G70,5631
9
+ lm_deluge/file.py,sha256=PTmlJQ-IaYcYUFun9V0bJ1NPVP84edJrR0hvCMWFylY,19697
10
10
  lm_deluge/image.py,sha256=5AMXmn2x47yXeYNfMSMAOWcnlrOxxOel-4L8QCJwU70,8928
11
- lm_deluge/prompt.py,sha256=fm-wUkf5YMz1NXwFTlzjckwxoWW7cXhN2Z01zrQPO5E,60001
11
+ lm_deluge/prompt.py,sha256=1hGLOIwdyGFokKv0dPiVpke3OPHD6vK5qO6q9E8H89Y,62020
12
12
  lm_deluge/request_context.py,sha256=cBayMFWupWhde2OjRugW3JH-Gin-WFGc6DK2Mb4Prdc,2576
13
13
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
14
14
  lm_deluge/tool.py,sha256=eZpzgkSIlGD7KdZQwzLF-UdyRJpRnNNXpceGJrNhRrE,26421
15
15
  lm_deluge/tracker.py,sha256=aeS9GUJpgOSQRVXAnGDvlMO8qYpSxpTNLYj2hrMg0m8,14757
16
16
  lm_deluge/usage.py,sha256=xz9tAw2hqaJvv9aAVhnQ6N1Arn7fS8Shb28VwCW26wI,5136
17
+ lm_deluge/warnings.py,sha256=nlDJMCw30VhDEFxqLO2-bfXH_Tv5qmlglzUSbokCSw8,1498
17
18
  lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
- lm_deluge/api_requests/anthropic.py,sha256=7tTb_NMPodDHrCzakrLd9LyXuLqeTQyAGU-FvMoV3gI,8437
19
+ lm_deluge/api_requests/anthropic.py,sha256=i4coscYQcg3TKkTJPoIvVAP5rY4HQA7Dt4P-OYTgBNw,8762
19
20
  lm_deluge/api_requests/base.py,sha256=GCcydwBRx4_xAuYLvasXlyj-TgqvKAVhVvxRfJkvPbY,9471
20
- lm_deluge/api_requests/bedrock.py,sha256=GmVxXz3ERAeQ7e52Nlztt81O4H9eJOQeOnS6b65vjm4,15453
21
+ lm_deluge/api_requests/bedrock.py,sha256=Uppne03GcIEk1tVYzoGu7GXK2Sg94a_xvFTLDRN_phY,15412
21
22
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
22
- lm_deluge/api_requests/gemini.py,sha256=COHqPWmeaq9fpg0YwOZqQTUbijKnXNF4cvMLnW9kLl8,7857
23
- lm_deluge/api_requests/mistral.py,sha256=S_LpOfCGbCVEROH_od3P-tYeNYTKFMamMTL-c_wFCBI,4597
24
- lm_deluge/api_requests/openai.py,sha256=_da5n2FECjzKFj0fD9BzSUm2E_E0tSgGAMBk9mHOBjc,24908
23
+ lm_deluge/api_requests/gemini.py,sha256=4uD7fQl0yWyAvYkPNi3oO1InBnvYfo5_QR6k-va-2GI,7838
24
+ lm_deluge/api_requests/mistral.py,sha256=8JZP2CDf1XZfaPcTk0WS4q-VfYYj58ptpoH8LD3MQG4,4528
25
+ lm_deluge/api_requests/openai.py,sha256=qRBakHOOMYJWvKO0HeeE5C1Dv_dbokuizZin9Ca4k_k,24855
25
26
  lm_deluge/api_requests/response.py,sha256=vG194gAH5p7ulpNy4qy5Pryfb1p3ZV21-YGoj__ru3E,7436
26
27
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
27
28
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
@@ -53,7 +54,7 @@ lm_deluge/models/grok.py,sha256=TDzr8yfTaHbdJhwMA-Du6L-efaKFJhjTQViuVElCCHI,2566
53
54
  lm_deluge/models/groq.py,sha256=Mi5WE1xOBGoZlymD0UN6kzhH_NOmfJYU4N2l-TO0Z8Q,2552
54
55
  lm_deluge/models/meta.py,sha256=BBgnscL1gMcIdPbRqrlDl_q9YAYGSrkw9JkAIabXtLs,1883
55
56
  lm_deluge/models/mistral.py,sha256=x67o5gckBGmPcIGdVbS26XZAYFKBYM4tsxEAahGp8bk,4323
56
- lm_deluge/models/openai.py,sha256=HC_oNLmKkmShkcfeUgyhesACtXGg__I2WiIIDrN-X84,10176
57
+ lm_deluge/models/openai.py,sha256=6J4eAt6Iu5RopokyldUQzRlviFBXBqhLqpVP5tztzqI,11074
57
58
  lm_deluge/models/openrouter.py,sha256=O-Po4tmHjAqFIVU96TUL0QnK01R4e2yDN7Z4sYJ-CuE,2120
58
59
  lm_deluge/models/together.py,sha256=AjKhPsazqBgqyLwHkNQW07COM1n_oSrYQRp2BFVvn9o,4381
59
60
  lm_deluge/presets/cerebras.py,sha256=MDkqj15qQRrj8wxSCDNNe_Cs7h1WN1UjV6lTmSY1olQ,479
@@ -64,8 +65,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
64
65
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
65
66
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
66
67
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
67
- lm_deluge-0.0.59.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
68
- lm_deluge-0.0.59.dist-info/METADATA,sha256=WKLfnV3lKGr1gkfEEyDhbp4oGMsu30LRXs0zPLRsdsk,13443
69
- lm_deluge-0.0.59.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
- lm_deluge-0.0.59.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
71
- lm_deluge-0.0.59.dist-info/RECORD,,
68
+ lm_deluge-0.0.61.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
69
+ lm_deluge-0.0.61.dist-info/METADATA,sha256=TKr3MSJYw8Hx4Qx5dmldB5UiS1fqeiwcZRIolb6W4ug,13443
70
+ lm_deluge-0.0.61.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
71
+ lm_deluge-0.0.61.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
72
+ lm_deluge-0.0.61.dist-info/RECORD,,