lm-deluge 0.0.50__py3-none-any.whl → 0.0.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import os
3
+ import traceback as tb
3
4
  import warnings
4
5
  from types import SimpleNamespace
5
6
 
@@ -341,7 +342,13 @@ class OpenAIResponsesRequest(APIRequestBase):
341
342
  elif content_item.get("type") == "refusal":
342
343
  parts.append(Text(content_item["refusal"]))
343
344
  elif item.get("type") == "reasoning":
344
- parts.append(Thinking(item["summary"]["text"]))
345
+ summary = item["summary"]
346
+ if not summary:
347
+ continue
348
+ if isinstance(summary, list) and len(summary) > 0:
349
+ summary = summary[0]
350
+ assert isinstance(summary, dict), "summary isn't a dict"
351
+ parts.append(Thinking(summary["text"]))
345
352
  elif item.get("type") == "function_call":
346
353
  parts.append(
347
354
  ToolCall(
@@ -432,6 +439,9 @@ class OpenAIResponsesRequest(APIRequestBase):
432
439
  except Exception as e:
433
440
  is_error = True
434
441
  error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
442
+ print("got data:", data)
443
+ traceback = tb.format_exc()
444
+ print(f"Error details:\n{traceback}")
435
445
 
436
446
  elif mimetype and "json" in mimetype.lower():
437
447
  print("is_error True, json response")
@@ -14,7 +14,7 @@ class APIResponse:
14
14
  # request information
15
15
  id: int # should be unique to the request within a given prompt-processing call
16
16
  model_internal: str # our internal model tag
17
- prompt: Conversation | dict
17
+ prompt: Conversation | dict # dict if converted to log
18
18
  sampling_params: SamplingParams
19
19
 
20
20
  # http response information
lm_deluge/client.py CHANGED
@@ -357,6 +357,8 @@ class _LLMClient(BaseModel):
357
357
  prompts = prompts_to_conversations(prompts)
358
358
  ids = list(range(len(prompts)))
359
359
  results: list[APIResponse | None] = [None for _ in range(len(prompts))]
360
+ contexts: list[RequestContext | None] = [None for _ in range(len(prompts))]
361
+ inflight_tasks: set[asyncio.Task[None]] = set()
360
362
  # Use existing tracker if client has been opened; otherwise open/close automatically
361
363
  tracker: StatusTracker
362
364
  tracker_preopened = self._tracker is not None
@@ -419,6 +421,8 @@ class _LLMClient(BaseModel):
419
421
  )
420
422
 
421
423
  # Launch simplified request processing
424
+ contexts[next_context.task_id] = next_context
425
+
422
426
  async def process_and_store(ctx: RequestContext):
423
427
  try:
424
428
  response = await self.process_single_request(ctx, retry_queue)
@@ -439,7 +443,9 @@ class _LLMClient(BaseModel):
439
443
  if ctx.status_tracker:
440
444
  ctx.status_tracker.task_failed(ctx.task_id)
441
445
 
442
- asyncio.create_task(process_and_store(next_context))
446
+ task = asyncio.create_task(process_and_store(next_context))
447
+ inflight_tasks.add(task)
448
+ task.add_done_callback(inflight_tasks.discard)
443
449
  next_context = None # Reset after successful dispatch
444
450
  next_is_retry = False
445
451
 
@@ -456,9 +462,37 @@ class _LLMClient(BaseModel):
456
462
  # Yield briefly to allow in-flight tasks to progress
457
463
  await asyncio.sleep(min(0.01, seconds_to_sleep_each_loop))
458
464
 
465
+ if inflight_tasks:
466
+ await asyncio.gather(*inflight_tasks, return_exceptions=True)
467
+
459
468
  if not tracker_preopened:
460
469
  self.close()
461
470
 
471
+ for idx, response in enumerate(results):
472
+ if response is None:
473
+ ctx = contexts[idx]
474
+ prompt = ctx.prompt if ctx else prompts[idx]
475
+ sampling_params = (
476
+ ctx.sampling_params
477
+ if ctx
478
+ else self.sampling_params[0]
479
+ if self.sampling_params
480
+ else SamplingParams()
481
+ )
482
+ model_name = ctx.model_name if ctx else self.model_names[0]
483
+ assert isinstance(
484
+ prompt, Conversation
485
+ ), "expected prompt to be a conversation"
486
+ results[idx] = APIResponse(
487
+ id=idx,
488
+ model_internal=model_name,
489
+ prompt=prompt,
490
+ sampling_params=sampling_params,
491
+ status_code=None,
492
+ is_error=True,
493
+ error_message="Internal error: no response produced.",
494
+ )
495
+
462
496
  if return_completions_only:
463
497
  return [r.completion if r is not None else None for r in results]
464
498
 
lm_deluge/tool.py CHANGED
@@ -58,14 +58,14 @@ class Tool(BaseModel):
58
58
  """
59
59
 
60
60
  name: str
61
- description: str | None
62
- parameters: dict[str, Any] | None
61
+ description: str | None = None
62
+ parameters: dict[str, Any] | None = None
63
63
  required: list[str] = Field(default_factory=list)
64
64
  additionalProperties: bool | None = None # only
65
65
  # if desired, can provide a callable to run the tool
66
66
  run: Callable | None = None
67
67
  # for built-in tools that don't require schema
68
- built_in: bool = False
68
+ is_built_in: bool = False
69
69
  type: str | None = None
70
70
  built_in_args: dict[str, Any] = Field(default_factory=dict)
71
71
 
@@ -309,7 +309,7 @@ class Tool(BaseModel):
309
309
  def for_openai_completions(
310
310
  self, *, strict: bool = True, **kwargs
311
311
  ) -> dict[str, Any]:
312
- if self.built_in:
312
+ if self.is_built_in:
313
313
  return {"type": self.type, **self.built_in_args, **kwargs}
314
314
  if strict:
315
315
  # For strict mode, remove defaults and make all parameters required
@@ -338,7 +338,7 @@ class Tool(BaseModel):
338
338
  return self.for_openai_completions(strict=strict, **kwargs)
339
339
 
340
340
  def for_openai_responses(self, **kwargs) -> dict[str, Any]:
341
- if self.built_in:
341
+ if self.is_built_in:
342
342
  return {"type": self.type, **self.built_in_args, **kwargs}
343
343
  return {
344
344
  "type": "function",
@@ -349,7 +349,7 @@ class Tool(BaseModel):
349
349
 
350
350
  def for_anthropic(self, **kwargs) -> dict[str, Any]:
351
351
  # built-in tools have "name", "type", maybe metadata
352
- if self.built_in:
352
+ if self.is_built_in:
353
353
  return {
354
354
  "name": self.name,
355
355
  "type": self.type,
@@ -392,6 +392,14 @@ class Tool(BaseModel):
392
392
  return self.for_google()
393
393
  raise ValueError(provider)
394
394
 
395
+ @classmethod
396
+ def built_in(cls, name: str, **kwargs):
397
+ if "type" in kwargs:
398
+ type = kwargs.pop("type")
399
+ else:
400
+ type = name
401
+ return cls(name=name, type=type, is_built_in=True, built_in_args=kwargs)
402
+
395
403
 
396
404
  class OpenAIMCPSpec(TypedDict):
397
405
  type: str
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.50
3
+ Version: 0.0.52
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ lm_deluge/agent.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  lm_deluge/batches.py,sha256=rQocJLyIs3Ko_nRdAE9jT__5cKWYxiIRAH_Lw3L0E1k,24653
4
4
  lm_deluge/cache.py,sha256=xO2AIYvP3tUpTMKQjwQQYfGRJSRi6e7sMlRhLjsS-u4,4873
5
5
  lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
6
- lm_deluge/client.py,sha256=syXTBHkuPFWKDeY5B_OnzTQncYzY0VSSnPjbqib1ois,34379
6
+ lm_deluge/client.py,sha256=WIz1M6PLZi08Y3SFhGS3Pxs1fP8P0nKSLMYzFUdNnOQ,35814
7
7
  lm_deluge/config.py,sha256=H1tQyJDNHGFuwxqQNL5Z-CjWAC0luHSBA3iY_pxmACM,932
8
8
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
9
9
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
@@ -13,7 +13,7 @@ lm_deluge/image.py,sha256=5AMXmn2x47yXeYNfMSMAOWcnlrOxxOel-4L8QCJwU70,8928
13
13
  lm_deluge/prompt.py,sha256=2-6bALg_hOfExh9vHeKPFA6E_O8rHe6p9eIdvCulERs,59654
14
14
  lm_deluge/request_context.py,sha256=o33LSEwnK6YPhZeulUoSE_VrdKCXiCQa0tjjixK2K6M,2540
15
15
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
16
- lm_deluge/tool.py,sha256=kQbXsEgjTaW_y4ZPDRJg8G6Zhh1PXnFwxzvAnlYBv2M,15936
16
+ lm_deluge/tool.py,sha256=3weKo09E_srEKwHlz2WMVhk2BuDr5pJpi1UP0-qlcmo,16210
17
17
  lm_deluge/tracker.py,sha256=EHFPsS94NmsON2u97rSE70q1t6pwCsixUmGV-kIphMs,11531
18
18
  lm_deluge/usage.py,sha256=VMEKghePFIID5JFBObqYxFpgYxnbYm_dnHy7V1-_T6M,4866
19
19
  lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
@@ -23,8 +23,8 @@ lm_deluge/api_requests/bedrock.py,sha256=GmVxXz3ERAeQ7e52Nlztt81O4H9eJOQeOnS6b65
23
23
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
24
24
  lm_deluge/api_requests/gemini.py,sha256=COHqPWmeaq9fpg0YwOZqQTUbijKnXNF4cvMLnW9kLl8,7857
25
25
  lm_deluge/api_requests/mistral.py,sha256=S_LpOfCGbCVEROH_od3P-tYeNYTKFMamMTL-c_wFCBI,4597
26
- lm_deluge/api_requests/openai.py,sha256=queIeUwQnXUZpa7ebCkSACNCGY3LXnz9_St-AnWcCJU,22656
27
- lm_deluge/api_requests/response.py,sha256=Zc9kxBqB4JJIFR6OhXW-BS3ulK5JygE75JNBEpKgn5Q,5989
26
+ lm_deluge/api_requests/openai.py,sha256=frxSdQn9ZAAweSO-HMKRZ6gKU3Wdl1PqTVPhwy-iNA8,23202
27
+ lm_deluge/api_requests/response.py,sha256=ji5m-BazYRk0gdUjKFnwG5uM2sJdgxoawB2FBNzaR7Y,6017
28
28
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
29
29
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
30
30
  lm_deluge/api_requests/deprecated/deepseek.py,sha256=FEApI93VAWDwuaqTooIyKMgONYqRhdUmiAPBRme-IYs,4582
@@ -66,8 +66,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
66
66
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
67
67
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
68
68
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
69
- lm_deluge-0.0.50.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
70
- lm_deluge-0.0.50.dist-info/METADATA,sha256=rJjaKCn6fcI-68Ce4hjKl7nhahpWFCU0RGkU_Ud-Kn4,13443
71
- lm_deluge-0.0.50.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
- lm_deluge-0.0.50.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
73
- lm_deluge-0.0.50.dist-info/RECORD,,
69
+ lm_deluge-0.0.52.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
70
+ lm_deluge-0.0.52.dist-info/METADATA,sha256=X1JJBjExVA0NNXSaoB2NkOpT9f660AFe9u58BmKdb2w,13443
71
+ lm_deluge-0.0.52.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
+ lm_deluge-0.0.52.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
73
+ lm_deluge-0.0.52.dist-info/RECORD,,