lm-deluge 0.0.66__py3-none-any.whl → 0.0.68__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lm_deluge/api_requests/openai.py +120 -107
- lm_deluge/client.py +9 -8
- {lm_deluge-0.0.66.dist-info → lm_deluge-0.0.68.dist-info}/METADATA +1 -1
- {lm_deluge-0.0.66.dist-info → lm_deluge-0.0.68.dist-info}/RECORD +7 -7
- {lm_deluge-0.0.66.dist-info → lm_deluge-0.0.68.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.66.dist-info → lm_deluge-0.0.68.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.66.dist-info → lm_deluge-0.0.68.dist-info}/top_level.txt +0 -0
lm_deluge/api_requests/openai.py
CHANGED
|
@@ -350,7 +350,8 @@ class OpenAIResponsesRequest(APIRequestBase):
|
|
|
350
350
|
assert self.context.status_tracker
|
|
351
351
|
|
|
352
352
|
if status_code == 500:
|
|
353
|
-
|
|
353
|
+
res_text = await http_response.text()
|
|
354
|
+
print("Internal Server Error: ", res_text)
|
|
354
355
|
|
|
355
356
|
if status_code >= 200 and status_code < 300:
|
|
356
357
|
try:
|
|
@@ -362,126 +363,138 @@ class OpenAIResponsesRequest(APIRequestBase):
|
|
|
362
363
|
)
|
|
363
364
|
if not is_error:
|
|
364
365
|
assert data is not None, "data is None"
|
|
365
|
-
try:
|
|
366
|
-
# Parse Responses API format
|
|
367
|
-
parts = []
|
|
368
366
|
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
367
|
+
# Check if response is incomplete
|
|
368
|
+
if data.get("status") == "incomplete":
|
|
369
|
+
is_error = True
|
|
370
|
+
incomplete_reason = data.get("incomplete_details", {}).get(
|
|
371
|
+
"reason", "unknown"
|
|
372
|
+
)
|
|
373
|
+
error_message = f"Response incomplete: {incomplete_reason}"
|
|
374
|
+
|
|
375
|
+
if not is_error:
|
|
376
|
+
try:
|
|
377
|
+
# Parse Responses API format
|
|
378
|
+
parts = []
|
|
379
|
+
|
|
380
|
+
# Get the output array from the response
|
|
381
|
+
output = data.get("output", [])
|
|
382
|
+
if not output:
|
|
383
|
+
is_error = True
|
|
384
|
+
error_message = "No output in response"
|
|
385
|
+
else:
|
|
386
|
+
# Process each output item
|
|
387
|
+
for item in output:
|
|
388
|
+
if item.get("type") == "message":
|
|
389
|
+
message_content = item.get("content", [])
|
|
390
|
+
for content_item in message_content:
|
|
391
|
+
if content_item.get("type") == "output_text":
|
|
392
|
+
parts.append(Text(content_item["text"]))
|
|
393
|
+
elif content_item.get("type") == "refusal":
|
|
394
|
+
parts.append(Text(content_item["refusal"]))
|
|
395
|
+
elif item.get("type") == "reasoning":
|
|
396
|
+
summary = item["summary"]
|
|
397
|
+
if not summary:
|
|
398
|
+
continue
|
|
399
|
+
if isinstance(summary, list) and len(summary) > 0:
|
|
400
|
+
summary = summary[0]
|
|
401
|
+
assert isinstance(
|
|
402
|
+
summary, dict
|
|
403
|
+
), "summary isn't a dict"
|
|
404
|
+
parts.append(Thinking(summary["text"]))
|
|
405
|
+
elif item.get("type") == "function_call":
|
|
406
|
+
parts.append(
|
|
407
|
+
ToolCall(
|
|
408
|
+
id=item["call_id"],
|
|
409
|
+
name=item["name"],
|
|
410
|
+
arguments=json.loads(item["arguments"]),
|
|
411
|
+
)
|
|
398
412
|
)
|
|
399
|
-
)
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
+
elif item.get("type") == "mcp_call":
|
|
414
|
+
parts.append(
|
|
415
|
+
ToolCall(
|
|
416
|
+
id=item["id"],
|
|
417
|
+
name=item["name"],
|
|
418
|
+
arguments=json.loads(item["arguments"]),
|
|
419
|
+
built_in=True,
|
|
420
|
+
built_in_type="mcp_call",
|
|
421
|
+
extra_body={
|
|
422
|
+
"server_label": item["server_label"],
|
|
423
|
+
"error": item.get("error"),
|
|
424
|
+
"output": item.get("output"),
|
|
425
|
+
},
|
|
426
|
+
)
|
|
413
427
|
)
|
|
414
|
-
)
|
|
415
428
|
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
429
|
+
elif item.get("type") == "computer_call":
|
|
430
|
+
parts.append(
|
|
431
|
+
ToolCall(
|
|
432
|
+
id=item["call_id"],
|
|
433
|
+
name="computer_call",
|
|
434
|
+
arguments=item.get("action"),
|
|
435
|
+
built_in=True,
|
|
436
|
+
built_in_type="computer_call",
|
|
437
|
+
)
|
|
424
438
|
)
|
|
425
|
-
)
|
|
426
439
|
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
440
|
+
elif item.get("type") == "web_search_call":
|
|
441
|
+
parts.append(
|
|
442
|
+
ToolCall(
|
|
443
|
+
id=item["id"],
|
|
444
|
+
name="web_search_call",
|
|
445
|
+
arguments={},
|
|
446
|
+
built_in=True,
|
|
447
|
+
built_in_type="web_search_call",
|
|
448
|
+
extra_body={"status": item["status"]},
|
|
449
|
+
)
|
|
436
450
|
)
|
|
437
|
-
)
|
|
438
451
|
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
452
|
+
elif item.get("type") == "file_search_call":
|
|
453
|
+
parts.append(
|
|
454
|
+
ToolCall(
|
|
455
|
+
id=item["id"],
|
|
456
|
+
name="file_search_call",
|
|
457
|
+
arguments={"queries": item["queries"]},
|
|
458
|
+
built_in=True,
|
|
459
|
+
built_in_type="file_search_call",
|
|
460
|
+
extra_body={
|
|
461
|
+
"status": item["status"],
|
|
462
|
+
"results": item["results"],
|
|
463
|
+
},
|
|
464
|
+
)
|
|
451
465
|
)
|
|
452
|
-
)
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
466
|
+
elif item.get("type") == "image_generation_call":
|
|
467
|
+
parts.append(
|
|
468
|
+
ToolCall(
|
|
469
|
+
id=item["id"],
|
|
470
|
+
name="image_generation_call",
|
|
471
|
+
arguments={},
|
|
472
|
+
built_in=True,
|
|
473
|
+
built_in_type="image_generation_call",
|
|
474
|
+
extra_body={
|
|
475
|
+
"status": item["status"],
|
|
476
|
+
"result": item["result"],
|
|
477
|
+
},
|
|
478
|
+
)
|
|
465
479
|
)
|
|
466
|
-
)
|
|
467
480
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
481
|
+
# Handle reasoning if present
|
|
482
|
+
if "reasoning" in data and data["reasoning"].get("summary"):
|
|
483
|
+
thinking = data["reasoning"]["summary"]
|
|
484
|
+
parts.append(Thinking(thinking))
|
|
472
485
|
|
|
473
|
-
|
|
486
|
+
content = Message("assistant", parts)
|
|
474
487
|
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
488
|
+
# Extract usage information
|
|
489
|
+
if "usage" in data and data["usage"] is not None:
|
|
490
|
+
usage = Usage.from_openai_usage(data["usage"])
|
|
478
491
|
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
492
|
+
except Exception as e:
|
|
493
|
+
is_error = True
|
|
494
|
+
error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
|
|
495
|
+
print("got data:", data)
|
|
496
|
+
traceback = tb.format_exc()
|
|
497
|
+
print(f"Error details:\n{traceback}")
|
|
485
498
|
|
|
486
499
|
elif mimetype and "json" in mimetype.lower():
|
|
487
500
|
print("is_error True, json response")
|
lm_deluge/client.py
CHANGED
|
@@ -702,7 +702,7 @@ class _LLMClient(BaseModel):
|
|
|
702
702
|
|
|
703
703
|
async def start(
|
|
704
704
|
self,
|
|
705
|
-
prompt:
|
|
705
|
+
prompt: Prompt,
|
|
706
706
|
*,
|
|
707
707
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
708
708
|
cache: CachePattern | None = None,
|
|
@@ -780,12 +780,12 @@ class _LLMClient(BaseModel):
|
|
|
780
780
|
|
|
781
781
|
async def stream(
|
|
782
782
|
self,
|
|
783
|
-
prompt:
|
|
783
|
+
prompt: Prompt,
|
|
784
784
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
785
785
|
):
|
|
786
786
|
model, sampling_params = self._select_model()
|
|
787
|
-
|
|
788
|
-
|
|
787
|
+
prompt = prompts_to_conversations([prompt])[0]
|
|
788
|
+
assert isinstance(prompt, Conversation)
|
|
789
789
|
async for item in stream_chat(
|
|
790
790
|
model, prompt, sampling_params, tools, None, self.extra_headers
|
|
791
791
|
):
|
|
@@ -799,7 +799,7 @@ class _LLMClient(BaseModel):
|
|
|
799
799
|
|
|
800
800
|
async def run_agent_loop(
|
|
801
801
|
self,
|
|
802
|
-
conversation:
|
|
802
|
+
conversation: Prompt,
|
|
803
803
|
*,
|
|
804
804
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
805
805
|
max_rounds: int = 5,
|
|
@@ -812,8 +812,9 @@ class _LLMClient(BaseModel):
|
|
|
812
812
|
instances or built‑in tool dictionaries.
|
|
813
813
|
"""
|
|
814
814
|
|
|
815
|
-
if isinstance(conversation,
|
|
816
|
-
conversation =
|
|
815
|
+
if not isinstance(conversation, Conversation):
|
|
816
|
+
conversation = prompts_to_conversations([conversation])[0]
|
|
817
|
+
assert isinstance(conversation, Conversation)
|
|
817
818
|
|
|
818
819
|
# Expand MCPServer objects to their constituent tools for tool execution
|
|
819
820
|
expanded_tools: list[Tool] = []
|
|
@@ -870,7 +871,7 @@ class _LLMClient(BaseModel):
|
|
|
870
871
|
|
|
871
872
|
def run_agent_loop_sync(
|
|
872
873
|
self,
|
|
873
|
-
conversation:
|
|
874
|
+
conversation: Prompt,
|
|
874
875
|
*,
|
|
875
876
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
876
877
|
max_rounds: int = 5,
|
|
@@ -2,7 +2,7 @@ lm_deluge/__init__.py,sha256=LKKIcqQoQyDpTck6fnB7iAs75BnfNNa3Bj5Nz7KU4Hk,376
|
|
|
2
2
|
lm_deluge/batches.py,sha256=Km6QM5_7BlF2qEyo4WPlhkaZkpzrLqf50AaveHXQOoY,25127
|
|
3
3
|
lm_deluge/cache.py,sha256=xO2AIYvP3tUpTMKQjwQQYfGRJSRi6e7sMlRhLjsS-u4,4873
|
|
4
4
|
lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
|
|
5
|
-
lm_deluge/client.py,sha256=
|
|
5
|
+
lm_deluge/client.py,sha256=nBKuP6buwQYNMCP9f2SOuPkfowKRijJv4-bI-STg7Iw,40824
|
|
6
6
|
lm_deluge/config.py,sha256=s3wFBRD6pi0wtXMJRmQDT2vdiqSvhjUPmLehbkv41i0,943
|
|
7
7
|
lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
|
|
8
8
|
lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
|
|
@@ -22,7 +22,7 @@ lm_deluge/api_requests/bedrock.py,sha256=Uppne03GcIEk1tVYzoGu7GXK2Sg94a_xvFTLDRN
|
|
|
22
22
|
lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
|
|
23
23
|
lm_deluge/api_requests/gemini.py,sha256=4uD7fQl0yWyAvYkPNi3oO1InBnvYfo5_QR6k-va-2GI,7838
|
|
24
24
|
lm_deluge/api_requests/mistral.py,sha256=8JZP2CDf1XZfaPcTk0WS4q-VfYYj58ptpoH8LD3MQG4,4528
|
|
25
|
-
lm_deluge/api_requests/openai.py,sha256=
|
|
25
|
+
lm_deluge/api_requests/openai.py,sha256=d1Ddf5sSutx9Ti1riwOEkeADnhYG7Y4vQm2DOhKl67I,25925
|
|
26
26
|
lm_deluge/api_requests/response.py,sha256=vG194gAH5p7ulpNy4qy5Pryfb1p3ZV21-YGoj__ru3E,7436
|
|
27
27
|
lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
|
|
28
28
|
lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
|
|
@@ -65,8 +65,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
|
|
|
65
65
|
lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
|
|
66
66
|
lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
|
|
67
67
|
lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
|
|
68
|
-
lm_deluge-0.0.
|
|
69
|
-
lm_deluge-0.0.
|
|
70
|
-
lm_deluge-0.0.
|
|
71
|
-
lm_deluge-0.0.
|
|
72
|
-
lm_deluge-0.0.
|
|
68
|
+
lm_deluge-0.0.68.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
|
|
69
|
+
lm_deluge-0.0.68.dist-info/METADATA,sha256=dSD-PnK2RiwWGsC_-ui4gJ6cQvcXQODAKfod6xUjoXQ,13443
|
|
70
|
+
lm_deluge-0.0.68.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
71
|
+
lm_deluge-0.0.68.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
|
|
72
|
+
lm_deluge-0.0.68.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|