lm-deluge 0.0.40__py3-none-any.whl → 0.0.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
  import json
3
3
  import os
4
+ import warnings
4
5
 
5
6
  from aiohttp import ClientResponse
6
7
 
@@ -135,27 +136,110 @@ async def _build_anthropic_bedrock_request(
135
136
  return request_json, base_headers, auth, url, region
136
137
 
137
138
 
139
+ async def _build_openai_bedrock_request(
140
+ model: APIModel,
141
+ context: RequestContext,
142
+ ):
143
+ prompt = context.prompt
144
+ tools = context.tools
145
+ sampling_params = context.sampling_params
146
+
147
+ # Handle AWS auth
148
+ access_key = os.getenv("AWS_ACCESS_KEY_ID")
149
+ secret_key = os.getenv("AWS_SECRET_ACCESS_KEY")
150
+ session_token = os.getenv("AWS_SESSION_TOKEN")
151
+
152
+ if not access_key or not secret_key:
153
+ raise ValueError(
154
+ "AWS credentials not found. Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables."
155
+ )
156
+
157
+ # Determine region - GPT-OSS is available in us-west-2
158
+ region = "us-west-2"
159
+
160
+ # Construct the endpoint URL for OpenAI-compatible endpoint
161
+ service = "bedrock"
162
+ url = f"https://bedrock-runtime.{region}.amazonaws.com/openai/v1/chat/completions"
163
+
164
+ # Prepare headers
165
+ auth = AWS4Auth(
166
+ access_key,
167
+ secret_key,
168
+ region,
169
+ service,
170
+ session_token=session_token,
171
+ )
172
+
173
+ # Setup basic headers (AWS4Auth will add the Authorization header)
174
+ base_headers = {
175
+ "Content-Type": "application/json",
176
+ }
177
+
178
+ # Prepare request body in OpenAI format
179
+ request_json = {
180
+ "model": model.name,
181
+ "messages": prompt.to_openai(),
182
+ "temperature": sampling_params.temperature,
183
+ "top_p": sampling_params.top_p,
184
+ "max_completion_tokens": sampling_params.max_new_tokens,
185
+ }
186
+
187
+ # Note: GPT-OSS on Bedrock doesn't support response_format parameter
188
+ # Even though the model supports JSON, we can't use the response_format parameter
189
+ if sampling_params.json_mode and model.supports_json:
190
+ warnings.warn(
191
+ f"JSON mode requested for {model.name} but response_format parameter not supported on Bedrock"
192
+ )
193
+
194
+ if tools:
195
+ request_tools = []
196
+ for tool in tools:
197
+ if isinstance(tool, Tool):
198
+ request_tools.append(tool.dump_for("openai-completions"))
199
+ elif isinstance(tool, MCPServer):
200
+ as_tools = await tool.to_tools()
201
+ request_tools.extend(
202
+ [t.dump_for("openai-completions") for t in as_tools]
203
+ )
204
+ request_json["tools"] = request_tools
205
+
206
+ return request_json, base_headers, auth, url, region
207
+
208
+
138
209
  class BedrockRequest(APIRequestBase):
139
210
  def __init__(self, context: RequestContext):
140
211
  super().__init__(context=context)
141
212
 
142
213
  self.model = APIModel.from_registry(self.context.model_name)
143
214
  self.region = None # Will be set during build_request
215
+ self.is_openai_model = self.model.name.startswith("openai.")
144
216
 
145
217
  async def build_request(self):
146
- self.url = f"{self.model.api_base}/messages"
147
-
148
- # Lock images as bytes if caching is enabled
149
- if self.context.cache is not None:
150
- self.context.prompt.lock_images_as_bytes()
151
-
152
- (
153
- self.request_json,
154
- base_headers,
155
- self.auth,
156
- self.url,
157
- self.region,
158
- ) = await _build_anthropic_bedrock_request(self.model, self.context)
218
+ if self.is_openai_model:
219
+ # Use OpenAI-compatible endpoint
220
+ (
221
+ self.request_json,
222
+ base_headers,
223
+ self.auth,
224
+ self.url,
225
+ self.region,
226
+ ) = await _build_openai_bedrock_request(self.model, self.context)
227
+ else:
228
+ # Use Anthropic-style endpoint
229
+ self.url = f"{self.model.api_base}/messages"
230
+
231
+ # Lock images as bytes if caching is enabled
232
+ if self.context.cache is not None:
233
+ self.context.prompt.lock_images_as_bytes()
234
+
235
+ (
236
+ self.request_json,
237
+ base_headers,
238
+ self.auth,
239
+ self.url,
240
+ self.region,
241
+ ) = await _build_anthropic_bedrock_request(self.model, self.context)
242
+
159
243
  self.request_header = self.merge_headers(
160
244
  base_headers, exclude_patterns=["anthropic", "openai", "gemini", "mistral"]
161
245
  )
@@ -232,34 +316,64 @@ class BedrockRequest(APIRequestBase):
232
316
  thinking = None
233
317
  content = None
234
318
  usage = None
319
+ finish_reason = None
235
320
  status_code = http_response.status
236
321
  mimetype = http_response.headers.get("Content-Type", None)
322
+ data = None
237
323
  assert self.context.status_tracker
238
324
 
239
325
  if status_code >= 200 and status_code < 300:
240
326
  try:
241
327
  data = await http_response.json()
242
- response_content = data["content"]
243
-
244
- # Parse response into Message with parts
245
- parts = []
246
- for item in response_content:
247
- if item["type"] == "text":
248
- parts.append(Text(item["text"]))
249
- elif item["type"] == "thinking":
250
- thinking = item["thinking"]
251
- parts.append(Thinking(item["thinking"]))
252
- elif item["type"] == "tool_use":
253
- parts.append(
254
- ToolCall(
255
- id=item["id"],
256
- name=item["name"],
257
- arguments=item["input"],
328
+
329
+ if self.is_openai_model:
330
+ # Handle OpenAI-style response
331
+ parts = []
332
+ message = data["choices"][0]["message"]
333
+ finish_reason = data["choices"][0]["finish_reason"]
334
+
335
+ # Add text content if present
336
+ if message.get("content"):
337
+ parts.append(Text(message["content"]))
338
+
339
+ # Add tool calls if present
340
+ if "tool_calls" in message:
341
+ for tool_call in message["tool_calls"]:
342
+ parts.append(
343
+ ToolCall(
344
+ id=tool_call["id"],
345
+ name=tool_call["function"]["name"],
346
+ arguments=json.loads(
347
+ tool_call["function"]["arguments"]
348
+ ),
349
+ )
350
+ )
351
+
352
+ content = Message("assistant", parts)
353
+ usage = Usage.from_openai_usage(data["usage"])
354
+ else:
355
+ # Handle Anthropic-style response
356
+ response_content = data["content"]
357
+
358
+ # Parse response into Message with parts
359
+ parts = []
360
+ for item in response_content:
361
+ if item["type"] == "text":
362
+ parts.append(Text(item["text"]))
363
+ elif item["type"] == "thinking":
364
+ thinking = item["thinking"]
365
+ parts.append(Thinking(item["thinking"]))
366
+ elif item["type"] == "tool_use":
367
+ parts.append(
368
+ ToolCall(
369
+ id=item["id"],
370
+ name=item["name"],
371
+ arguments=item["input"],
372
+ )
258
373
  )
259
- )
260
374
 
261
- content = Message("assistant", parts)
262
- usage = Usage.from_anthropic_usage(data["usage"])
375
+ content = Message("assistant", parts)
376
+ usage = Usage.from_anthropic_usage(data["usage"])
263
377
  except Exception as e:
264
378
  is_error = True
265
379
  error_message = (
@@ -275,6 +389,7 @@ class BedrockRequest(APIRequestBase):
275
389
  error_message = text
276
390
 
277
391
  # Handle special kinds of errors
392
+ retry_with_different_model = status_code in [529, 429, 400, 401, 403, 413]
278
393
  if is_error and error_message is not None:
279
394
  if (
280
395
  "rate limit" in error_message.lower()
@@ -286,6 +401,7 @@ class BedrockRequest(APIRequestBase):
286
401
  if "context length" in error_message or "too long" in error_message:
287
402
  error_message += " (Context length exceeded, set retries to 0.)"
288
403
  self.context.attempts_left = 0
404
+ retry_with_different_model = True
289
405
 
290
406
  return APIResponse(
291
407
  id=self.context.task_id,
@@ -299,4 +415,7 @@ class BedrockRequest(APIRequestBase):
299
415
  region=self.region,
300
416
  sampling_params=self.context.sampling_params,
301
417
  usage=usage,
418
+ raw_response=data,
419
+ finish_reason=finish_reason,
420
+ retry_with_different_model=retry_with_different_model,
302
421
  )
@@ -113,6 +113,9 @@ class OpenAIRequest(APIRequestBase):
113
113
  finish_reason = None
114
114
  assert self.context.status_tracker
115
115
 
116
+ if status_code == 500:
117
+ print("Internal Server Error: ", (await http_response.text()))
118
+
116
119
  if status_code >= 200 and status_code < 300:
117
120
  try:
118
121
  data = await http_response.json()
@@ -305,6 +308,9 @@ class OpenAIResponsesRequest(APIRequestBase):
305
308
  data = None
306
309
  assert self.context.status_tracker
307
310
 
311
+ if status_code == 500:
312
+ print("Internal Server Error: ", http_response.text())
313
+
308
314
  if status_code >= 200 and status_code < 300:
309
315
  try:
310
316
  data = await http_response.json()
@@ -428,10 +434,12 @@ class OpenAIResponsesRequest(APIRequestBase):
428
434
  error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
429
435
 
430
436
  elif mimetype and "json" in mimetype.lower():
437
+ print("is_error True, json response")
431
438
  is_error = True
432
439
  data = await http_response.json()
433
440
  error_message = json.dumps(data)
434
441
  else:
442
+ print("is_error True, non-json response")
435
443
  is_error = True
436
444
  text = await http_response.text()
437
445
  error_message = text
lm_deluge/batches.py CHANGED
@@ -1,21 +1,22 @@
1
- import os
1
+ import asyncio
2
2
  import json
3
+ import os
4
+ import tempfile
3
5
  import time
4
- import asyncio
6
+ from typing import Literal, Sequence
7
+
5
8
  import aiohttp
6
- import tempfile
7
- from lm_deluge.prompt import CachePattern, Conversation, prompts_to_conversations
8
- from lm_deluge.config import SamplingParams
9
- from lm_deluge.models import APIModel
10
- from typing import Sequence, Literal
11
- from lm_deluge.api_requests.openai import _build_oa_chat_request
12
- from lm_deluge.api_requests.anthropic import _build_anthropic_request
13
9
  from rich.console import Console
14
10
  from rich.live import Live
15
11
  from rich.spinner import Spinner
16
12
  from rich.table import Table
17
13
  from rich.text import Text
18
- from lm_deluge.models import registry
14
+
15
+ from lm_deluge.api_requests.anthropic import _build_anthropic_request
16
+ from lm_deluge.api_requests.openai import _build_oa_chat_request
17
+ from lm_deluge.config import SamplingParams
18
+ from lm_deluge.models import APIModel, registry
19
+ from lm_deluge.prompt import CachePattern, Conversation, prompts_to_conversations
19
20
  from lm_deluge.request_context import RequestContext
20
21
 
21
22
 
@@ -162,6 +163,91 @@ async def _submit_anthropic_batch(file_path: str, headers: dict, model: str):
162
163
  return batch_id
163
164
 
164
165
 
166
+ async def create_batch_files_oa(
167
+ model: str,
168
+ sampling_params: SamplingParams,
169
+ prompts: Sequence[str | list[dict] | Conversation],
170
+ batch_size: int = 50_000,
171
+ destination: str | None = None, # if none provided, temp files
172
+ ):
173
+ MAX_BATCH_SIZE_BYTES = 200 * 1024 * 1024 # 200MB
174
+ MAX_BATCH_SIZE_ITEMS = batch_size
175
+
176
+ prompts = prompts_to_conversations(prompts)
177
+ if any(p is None for p in prompts):
178
+ raise ValueError("All prompts must be valid.")
179
+
180
+ model_obj = APIModel.from_registry(model)
181
+
182
+ current_batch = []
183
+ current_batch_size = 0
184
+ file_paths = []
185
+
186
+ for idx, prompt in enumerate(prompts):
187
+ assert isinstance(prompt, Conversation)
188
+ context = RequestContext(
189
+ task_id=idx,
190
+ model_name=model,
191
+ prompt=prompt,
192
+ sampling_params=sampling_params,
193
+ )
194
+ request = {
195
+ "custom_id": str(idx),
196
+ "method": "POST",
197
+ "url": "/v1/chat/completions",
198
+ "body": await _build_oa_chat_request(model_obj, context),
199
+ }
200
+
201
+ # Calculate size of this request
202
+ request_json = json.dumps(request) + "\n"
203
+ request_size = len(request_json.encode("utf-8"))
204
+
205
+ # Check if adding this request would exceed limits
206
+ would_exceed_size = current_batch_size + request_size > MAX_BATCH_SIZE_BYTES
207
+ would_exceed_items = len(current_batch) >= MAX_BATCH_SIZE_ITEMS
208
+
209
+ if current_batch and (would_exceed_size or would_exceed_items):
210
+ # Submit current batch
211
+ def write_batch_file():
212
+ with tempfile.NamedTemporaryFile(
213
+ mode="w+", suffix=".jsonl", delete=False
214
+ ) as f:
215
+ for batch_request in current_batch:
216
+ json.dump(batch_request, f)
217
+ f.write("\n")
218
+ print("wrote", len(current_batch), "items")
219
+ return f.name
220
+
221
+ file_path = await asyncio.to_thread(write_batch_file)
222
+ file_paths.append(file_path)
223
+ # Start new batch
224
+ current_batch = []
225
+ current_batch_size = 0
226
+ # current_batch_start_idx = idx
227
+
228
+ # Add request to current batch
229
+ current_batch.append(request)
230
+ current_batch_size += request_size
231
+
232
+ # Submit final batch if it has items
233
+ if current_batch:
234
+
235
+ def write_final_batch_file():
236
+ with tempfile.NamedTemporaryFile(
237
+ mode="w+", suffix=".jsonl", delete=False
238
+ ) as f:
239
+ for batch_request in current_batch:
240
+ json.dump(batch_request, f)
241
+ f.write("\n")
242
+ print("wrote", len(current_batch), "items")
243
+ return f.name
244
+
245
+ file_path = await asyncio.to_thread(write_final_batch_file)
246
+ file_paths.append(file_path)
247
+
248
+ return file_paths
249
+
250
+
165
251
  async def submit_batches_oa(
166
252
  model: str,
167
253
  sampling_params: SamplingParams,
lm_deluge/client.py CHANGED
@@ -294,7 +294,7 @@ class _LLMClient(BaseModel):
294
294
 
295
295
  # Print error message for debugging
296
296
  error_msg = (
297
- f"Error task {context.task_id}. Model: {response.model_internal}"
297
+ f"😔 Error task {context.task_id}. Model: {response.model_internal}"
298
298
  )
299
299
  if response.status_code:
300
300
  error_msg += f" Code: {response.status_code},"
@@ -356,16 +356,16 @@ class _LLMClient(BaseModel):
356
356
  prompts = prompts_to_conversations(prompts)
357
357
  ids = list(range(len(prompts)))
358
358
  results: list[APIResponse | None] = [None for _ in range(len(prompts))]
359
-
360
- # Create StatusTracker
361
- tracker = StatusTracker(
362
- max_requests_per_minute=self.max_requests_per_minute,
363
- max_tokens_per_minute=self.max_tokens_per_minute,
364
- max_concurrent_requests=self.max_concurrent_requests,
365
- progress_style=self.progress,
366
- use_progress_bar=show_progress,
367
- )
368
- tracker.init_progress_bar(total=len(prompts), disable=not show_progress)
359
+ # Use existing tracker if client has been opened; otherwise open/close automatically
360
+ tracker: StatusTracker
361
+ tracker_preopened = self._tracker is not None
362
+ if tracker_preopened:
363
+ tracker = self._tracker # type: ignore[assignment]
364
+ tracker.add_to_total(len(prompts))
365
+ else:
366
+ self.open(total=len(prompts), show_progress=show_progress)
367
+ tracker = self._tracker # type: ignore[assignment]
368
+ assert tracker is not None
369
369
 
370
370
  # Create retry queue for failed requests
371
371
  retry_queue: asyncio.Queue[RequestContext] = asyncio.Queue()
@@ -458,7 +458,8 @@ class _LLMClient(BaseModel):
458
458
  # Sleep - original logic
459
459
  await asyncio.sleep(seconds_to_sleep_each_loop + tracker.seconds_to_pause)
460
460
 
461
- tracker.log_final_status()
461
+ if not tracker_preopened:
462
+ self.close()
462
463
 
463
464
  if return_completions_only:
464
465
  return [r.completion if r is not None else None for r in results]
@@ -473,6 +474,7 @@ class _LLMClient(BaseModel):
473
474
  show_progress=True,
474
475
  tools: list[Tool | dict | MCPServer] | None = None,
475
476
  cache: CachePattern | None = None,
477
+ use_responses_api: bool = False,
476
478
  ):
477
479
  return asyncio.run(
478
480
  self.process_prompts_async(
@@ -481,6 +483,7 @@ class _LLMClient(BaseModel):
481
483
  show_progress=show_progress,
482
484
  tools=tools,
483
485
  cache=cache,
486
+ use_responses_api=use_responses_api,
484
487
  )
485
488
  )
486
489
 
@@ -96,4 +96,33 @@ BEDROCK_MODELS = {
96
96
  "tokens_per_minute": 400_000,
97
97
  "reasoning_model": True,
98
98
  },
99
+ # GPT-OSS on AWS Bedrock
100
+ "gpt-oss-120b-bedrock": {
101
+ "id": "gpt-oss-120b-bedrock",
102
+ "name": "openai.gpt-oss-120b-1:0",
103
+ "regions": ["us-west-2"],
104
+ "api_base": "",
105
+ "api_key_env_var": "",
106
+ "api_spec": "bedrock",
107
+ "input_cost": 0.0,
108
+ "output_cost": 0.0,
109
+ "supports_json": False,
110
+ "supports_logprobs": False,
111
+ "supports_responses": False,
112
+ "reasoning_model": False,
113
+ },
114
+ "gpt-oss-20b-bedrock": {
115
+ "id": "gpt-oss-20b-bedrock",
116
+ "name": "openai.gpt-oss-20b-1:0",
117
+ "regions": ["us-west-2"],
118
+ "api_base": "",
119
+ "api_key_env_var": "",
120
+ "api_spec": "bedrock",
121
+ "input_cost": 0.0,
122
+ "output_cost": 0.0,
123
+ "supports_json": False,
124
+ "supports_logprobs": False,
125
+ "supports_responses": False,
126
+ "reasoning_model": False,
127
+ },
99
128
  }
lm_deluge/tracker.py CHANGED
@@ -14,7 +14,7 @@ from rich.progress import (
14
14
  TextColumn,
15
15
  )
16
16
  from rich.text import Text
17
- from tqdm import tqdm
17
+ from tqdm.auto import tqdm
18
18
 
19
19
  SECONDS_TO_PAUSE_AFTER_RATE_LIMIT_ERROR = 5
20
20
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.40
3
+ Version: 0.0.42
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -1,9 +1,9 @@
1
1
  lm_deluge/__init__.py,sha256=mAztMuxINmh7dGbYnT8tsmw1eryQAvd0jpY8yHzd0EE,315
2
2
  lm_deluge/agent.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lm_deluge/batches.py,sha256=vJXVnuuGkIQnXoDPODPERrvdG9X1Ov1jnXExnPe6ZAc,21772
3
+ lm_deluge/batches.py,sha256=rQocJLyIs3Ko_nRdAE9jT__5cKWYxiIRAH_Lw3L0E1k,24653
4
4
  lm_deluge/cache.py,sha256=VB1kv8rM2t5XWPR60uhszFcxLDnVKOe1oA5hYjVDjIo,4375
5
5
  lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
6
- lm_deluge/client.py,sha256=ddLk9p4nHMQdDKTV2klPJ9D5Xoj89WepWMHcFFo0we0,34102
6
+ lm_deluge/client.py,sha256=WeVdwS6_tnSEqnZRpokWL7cBXLkG1B9SXD9hF2uYKls,34292
7
7
  lm_deluge/config.py,sha256=H1tQyJDNHGFuwxqQNL5Z-CjWAC0luHSBA3iY_pxmACM,932
8
8
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
9
9
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
@@ -14,16 +14,16 @@ lm_deluge/prompt.py,sha256=quG4dzK2yMiRqPiF67USY5Gl2TqT3rwYkZDJTfwZFHw,37183
14
14
  lm_deluge/request_context.py,sha256=o33LSEwnK6YPhZeulUoSE_VrdKCXiCQa0tjjixK2K6M,2540
15
15
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
16
16
  lm_deluge/tool.py,sha256=_coOKB9nPNVZoseMRumRyQ8BMR7_d0IlstzMHNT69JY,15732
17
- lm_deluge/tracker.py,sha256=5ehC-mcLWXpI-OBm6B9W8R4wUegA9OqkRc1PxyotvnY,11526
17
+ lm_deluge/tracker.py,sha256=EHFPsS94NmsON2u97rSE70q1t6pwCsixUmGV-kIphMs,11531
18
18
  lm_deluge/usage.py,sha256=VMEKghePFIID5JFBObqYxFpgYxnbYm_dnHy7V1-_T6M,4866
19
19
  lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
20
20
  lm_deluge/api_requests/anthropic.py,sha256=J5BzYV7aYNoL6FPArB6usyS267z1BguZTRY5JLMd0So,8159
21
21
  lm_deluge/api_requests/base.py,sha256=EVHNFtlttKbN7Tt1MnLaO-NjvKHPSV5CqlRv-OnpVAE,5593
22
- lm_deluge/api_requests/bedrock.py,sha256=FZMhF590JzJtAYDugbDtG93RhPt5efWZ0Wn4V8U8Dgw,11031
22
+ lm_deluge/api_requests/bedrock.py,sha256=GmVxXz3ERAeQ7e52Nlztt81O4H9eJOQeOnS6b65vjm4,15453
23
23
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
24
24
  lm_deluge/api_requests/gemini.py,sha256=COHqPWmeaq9fpg0YwOZqQTUbijKnXNF4cvMLnW9kLl8,7857
25
25
  lm_deluge/api_requests/mistral.py,sha256=S_LpOfCGbCVEROH_od3P-tYeNYTKFMamMTL-c_wFCBI,4597
26
- lm_deluge/api_requests/openai.py,sha256=FL_UCELdkaf_GZIBPViLdNcUwPMwqvEKj9mMcH72Nmc,22346
26
+ lm_deluge/api_requests/openai.py,sha256=queIeUwQnXUZpa7ebCkSACNCGY3LXnz9_St-AnWcCJU,22656
27
27
  lm_deluge/api_requests/response.py,sha256=Zc9kxBqB4JJIFR6OhXW-BS3ulK5JygE75JNBEpKgn5Q,5989
28
28
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
29
29
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
@@ -45,7 +45,7 @@ lm_deluge/llm_tools/score.py,sha256=9oGA3-k2U5buHQXkXaEI9M4Wb5yysNhTLsPbGeghAlQ,
45
45
  lm_deluge/llm_tools/translate.py,sha256=iXyYvQZ8bC44FWhBk4qpdqjKM1WFF7Shq-H2PxhPgg4,1452
46
46
  lm_deluge/models/__init__.py,sha256=Dh2CuTZeCAddIIXwWJXOjM10B0CpKqjTdMXWYuBP0s8,4289
47
47
  lm_deluge/models/anthropic.py,sha256=3pW7fyBY9Xh1m1RtfncU9amWTtKnjGZD0STjpu8iUSQ,5700
48
- lm_deluge/models/bedrock.py,sha256=jpb_n-Wh3G3VAKZn7U1t5r5IQ2oTDXwrjGIP013l2cI,4534
48
+ lm_deluge/models/bedrock.py,sha256=PIaXvho2agCm1hSSAEy8zHCITjApXT2eUOGDKW425tE,5424
49
49
  lm_deluge/models/cerebras.py,sha256=u2FMXJF6xMr0euDRKLKMo_NVTOcvSrrEpehbHr8sSeE,2050
50
50
  lm_deluge/models/cohere.py,sha256=M_7cVA9QD4qe1X4sZXCpKEkKrKz2jibaspiTnzsZ1GU,3998
51
51
  lm_deluge/models/deepseek.py,sha256=6_jDEprNNYis5I5MDQNloRes9h1P6pMYHXxOd2UZMgg,941
@@ -64,8 +64,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
64
64
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
65
65
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
66
66
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
67
- lm_deluge-0.0.40.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
68
- lm_deluge-0.0.40.dist-info/METADATA,sha256=QMWHzRuDQohIf6ZWjWpajRmQhAh2aJqEqAnXgVZ613s,13443
69
- lm_deluge-0.0.40.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
- lm_deluge-0.0.40.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
71
- lm_deluge-0.0.40.dist-info/RECORD,,
67
+ lm_deluge-0.0.42.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
68
+ lm_deluge-0.0.42.dist-info/METADATA,sha256=yfYfPalVaruEKv25EO2hCIDKUzKbM-EJgaY2i3x1_I8,13443
69
+ lm_deluge-0.0.42.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
+ lm_deluge-0.0.42.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
71
+ lm_deluge-0.0.42.dist-info/RECORD,,