lm-deluge 0.0.66__py3-none-any.whl → 0.0.67__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -350,7 +350,8 @@ class OpenAIResponsesRequest(APIRequestBase):
350
350
  assert self.context.status_tracker
351
351
 
352
352
  if status_code == 500:
353
- print("Internal Server Error: ", http_response.text())
353
+ res_text = await http_response.text()
354
+ print("Internal Server Error: ", res_text)
354
355
 
355
356
  if status_code >= 200 and status_code < 300:
356
357
  try:
@@ -362,126 +363,138 @@ class OpenAIResponsesRequest(APIRequestBase):
362
363
  )
363
364
  if not is_error:
364
365
  assert data is not None, "data is None"
365
- try:
366
- # Parse Responses API format
367
- parts = []
368
366
 
369
- # Get the output array from the response
370
- output = data.get("output", [])
371
- if not output:
372
- is_error = True
373
- error_message = "No output in response"
374
- else:
375
- # Process each output item
376
- for item in output:
377
- if item.get("type") == "message":
378
- message_content = item.get("content", [])
379
- for content_item in message_content:
380
- if content_item.get("type") == "output_text":
381
- parts.append(Text(content_item["text"]))
382
- elif content_item.get("type") == "refusal":
383
- parts.append(Text(content_item["refusal"]))
384
- elif item.get("type") == "reasoning":
385
- summary = item["summary"]
386
- if not summary:
387
- continue
388
- if isinstance(summary, list) and len(summary) > 0:
389
- summary = summary[0]
390
- assert isinstance(summary, dict), "summary isn't a dict"
391
- parts.append(Thinking(summary["text"]))
392
- elif item.get("type") == "function_call":
393
- parts.append(
394
- ToolCall(
395
- id=item["call_id"],
396
- name=item["name"],
397
- arguments=json.loads(item["arguments"]),
367
+ # Check if response is incomplete
368
+ if data.get("status") == "incomplete":
369
+ is_error = True
370
+ incomplete_reason = data.get("incomplete_details", {}).get(
371
+ "reason", "unknown"
372
+ )
373
+ error_message = f"Response incomplete: {incomplete_reason}"
374
+
375
+ if not is_error:
376
+ try:
377
+ # Parse Responses API format
378
+ parts = []
379
+
380
+ # Get the output array from the response
381
+ output = data.get("output", [])
382
+ if not output:
383
+ is_error = True
384
+ error_message = "No output in response"
385
+ else:
386
+ # Process each output item
387
+ for item in output:
388
+ if item.get("type") == "message":
389
+ message_content = item.get("content", [])
390
+ for content_item in message_content:
391
+ if content_item.get("type") == "output_text":
392
+ parts.append(Text(content_item["text"]))
393
+ elif content_item.get("type") == "refusal":
394
+ parts.append(Text(content_item["refusal"]))
395
+ elif item.get("type") == "reasoning":
396
+ summary = item["summary"]
397
+ if not summary:
398
+ continue
399
+ if isinstance(summary, list) and len(summary) > 0:
400
+ summary = summary[0]
401
+ assert isinstance(
402
+ summary, dict
403
+ ), "summary isn't a dict"
404
+ parts.append(Thinking(summary["text"]))
405
+ elif item.get("type") == "function_call":
406
+ parts.append(
407
+ ToolCall(
408
+ id=item["call_id"],
409
+ name=item["name"],
410
+ arguments=json.loads(item["arguments"]),
411
+ )
398
412
  )
399
- )
400
- elif item.get("type") == "mcp_call":
401
- parts.append(
402
- ToolCall(
403
- id=item["id"],
404
- name=item["name"],
405
- arguments=json.loads(item["arguments"]),
406
- built_in=True,
407
- built_in_type="mcp_call",
408
- extra_body={
409
- "server_label": item["server_label"],
410
- "error": item.get("error"),
411
- "output": item.get("output"),
412
- },
413
+ elif item.get("type") == "mcp_call":
414
+ parts.append(
415
+ ToolCall(
416
+ id=item["id"],
417
+ name=item["name"],
418
+ arguments=json.loads(item["arguments"]),
419
+ built_in=True,
420
+ built_in_type="mcp_call",
421
+ extra_body={
422
+ "server_label": item["server_label"],
423
+ "error": item.get("error"),
424
+ "output": item.get("output"),
425
+ },
426
+ )
413
427
  )
414
- )
415
428
 
416
- elif item.get("type") == "computer_call":
417
- parts.append(
418
- ToolCall(
419
- id=item["call_id"],
420
- name="computer_call",
421
- arguments=item.get("action"),
422
- built_in=True,
423
- built_in_type="computer_call",
429
+ elif item.get("type") == "computer_call":
430
+ parts.append(
431
+ ToolCall(
432
+ id=item["call_id"],
433
+ name="computer_call",
434
+ arguments=item.get("action"),
435
+ built_in=True,
436
+ built_in_type="computer_call",
437
+ )
424
438
  )
425
- )
426
439
 
427
- elif item.get("type") == "web_search_call":
428
- parts.append(
429
- ToolCall(
430
- id=item["id"],
431
- name="web_search_call",
432
- arguments={},
433
- built_in=True,
434
- built_in_type="web_search_call",
435
- extra_body={"status": item["status"]},
440
+ elif item.get("type") == "web_search_call":
441
+ parts.append(
442
+ ToolCall(
443
+ id=item["id"],
444
+ name="web_search_call",
445
+ arguments={},
446
+ built_in=True,
447
+ built_in_type="web_search_call",
448
+ extra_body={"status": item["status"]},
449
+ )
436
450
  )
437
- )
438
451
 
439
- elif item.get("type") == "file_search_call":
440
- parts.append(
441
- ToolCall(
442
- id=item["id"],
443
- name="file_search_call",
444
- arguments={"queries": item["queries"]},
445
- built_in=True,
446
- built_in_type="file_search_call",
447
- extra_body={
448
- "status": item["status"],
449
- "results": item["results"],
450
- },
452
+ elif item.get("type") == "file_search_call":
453
+ parts.append(
454
+ ToolCall(
455
+ id=item["id"],
456
+ name="file_search_call",
457
+ arguments={"queries": item["queries"]},
458
+ built_in=True,
459
+ built_in_type="file_search_call",
460
+ extra_body={
461
+ "status": item["status"],
462
+ "results": item["results"],
463
+ },
464
+ )
451
465
  )
452
- )
453
- elif item.get("type") == "image_generation_call":
454
- parts.append(
455
- ToolCall(
456
- id=item["id"],
457
- name="image_generation_call",
458
- arguments={},
459
- built_in=True,
460
- built_in_type="image_generation_call",
461
- extra_body={
462
- "status": item["status"],
463
- "result": item["result"],
464
- },
466
+ elif item.get("type") == "image_generation_call":
467
+ parts.append(
468
+ ToolCall(
469
+ id=item["id"],
470
+ name="image_generation_call",
471
+ arguments={},
472
+ built_in=True,
473
+ built_in_type="image_generation_call",
474
+ extra_body={
475
+ "status": item["status"],
476
+ "result": item["result"],
477
+ },
478
+ )
465
479
  )
466
- )
467
480
 
468
- # Handle reasoning if present
469
- if "reasoning" in data and data["reasoning"].get("summary"):
470
- thinking = data["reasoning"]["summary"]
471
- parts.append(Thinking(thinking))
481
+ # Handle reasoning if present
482
+ if "reasoning" in data and data["reasoning"].get("summary"):
483
+ thinking = data["reasoning"]["summary"]
484
+ parts.append(Thinking(thinking))
472
485
 
473
- content = Message("assistant", parts)
486
+ content = Message("assistant", parts)
474
487
 
475
- # Extract usage information
476
- if "usage" in data and data["usage"] is not None:
477
- usage = Usage.from_openai_usage(data["usage"])
488
+ # Extract usage information
489
+ if "usage" in data and data["usage"] is not None:
490
+ usage = Usage.from_openai_usage(data["usage"])
478
491
 
479
- except Exception as e:
480
- is_error = True
481
- error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
482
- print("got data:", data)
483
- traceback = tb.format_exc()
484
- print(f"Error details:\n{traceback}")
492
+ except Exception as e:
493
+ is_error = True
494
+ error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
495
+ print("got data:", data)
496
+ traceback = tb.format_exc()
497
+ print(f"Error details:\n{traceback}")
485
498
 
486
499
  elif mimetype and "json" in mimetype.lower():
487
500
  print("is_error True, json response")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.66
3
+ Version: 0.0.67
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -22,7 +22,7 @@ lm_deluge/api_requests/bedrock.py,sha256=Uppne03GcIEk1tVYzoGu7GXK2Sg94a_xvFTLDRN
22
22
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
23
23
  lm_deluge/api_requests/gemini.py,sha256=4uD7fQl0yWyAvYkPNi3oO1InBnvYfo5_QR6k-va-2GI,7838
24
24
  lm_deluge/api_requests/mistral.py,sha256=8JZP2CDf1XZfaPcTk0WS4q-VfYYj58ptpoH8LD3MQG4,4528
25
- lm_deluge/api_requests/openai.py,sha256=IyCOzUe5177kFLHoW1O8GTnptamPXrbR_KWMFpXYURQ,24959
25
+ lm_deluge/api_requests/openai.py,sha256=d1Ddf5sSutx9Ti1riwOEkeADnhYG7Y4vQm2DOhKl67I,25925
26
26
  lm_deluge/api_requests/response.py,sha256=vG194gAH5p7ulpNy4qy5Pryfb1p3ZV21-YGoj__ru3E,7436
27
27
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
28
28
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
@@ -65,8 +65,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
65
65
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
66
66
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
67
67
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
68
- lm_deluge-0.0.66.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
69
- lm_deluge-0.0.66.dist-info/METADATA,sha256=6Psxlb_Av6dX0KZ0rZFLy8pM5rqBd32ho7gvsrlssP4,13443
70
- lm_deluge-0.0.66.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
71
- lm_deluge-0.0.66.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
72
- lm_deluge-0.0.66.dist-info/RECORD,,
68
+ lm_deluge-0.0.67.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
69
+ lm_deluge-0.0.67.dist-info/METADATA,sha256=I15oTqc18y9F5bfeURskxLXXgkJ1AZ-ffA-xfUJRKTo,13443
70
+ lm_deluge-0.0.67.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
71
+ lm_deluge-0.0.67.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
72
+ lm_deluge-0.0.67.dist-info/RECORD,,