lm-deluge 0.0.67__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (108) hide show
  1. lm_deluge/__init__.py +1 -2
  2. lm_deluge/api_requests/anthropic.py +117 -22
  3. lm_deluge/api_requests/base.py +84 -11
  4. lm_deluge/api_requests/bedrock.py +30 -6
  5. lm_deluge/api_requests/chat_reasoning.py +4 -0
  6. lm_deluge/api_requests/gemini.py +166 -20
  7. lm_deluge/api_requests/openai.py +145 -25
  8. lm_deluge/batches.py +15 -45
  9. lm_deluge/client.py +309 -50
  10. lm_deluge/config.py +15 -3
  11. lm_deluge/models/__init__.py +14 -1
  12. lm_deluge/models/anthropic.py +29 -14
  13. lm_deluge/models/arcee.py +16 -0
  14. lm_deluge/models/deepseek.py +36 -4
  15. lm_deluge/models/google.py +42 -0
  16. lm_deluge/models/grok.py +24 -0
  17. lm_deluge/models/kimi.py +36 -0
  18. lm_deluge/models/minimax.py +18 -0
  19. lm_deluge/models/openai.py +100 -0
  20. lm_deluge/models/openrouter.py +133 -7
  21. lm_deluge/models/together.py +11 -0
  22. lm_deluge/models/zai.py +50 -0
  23. lm_deluge/pipelines/gepa/__init__.py +95 -0
  24. lm_deluge/pipelines/gepa/core.py +354 -0
  25. lm_deluge/pipelines/gepa/docs/samples.py +705 -0
  26. lm_deluge/pipelines/gepa/examples/01_synthetic_keywords.py +140 -0
  27. lm_deluge/pipelines/gepa/examples/02_gsm8k_math.py +261 -0
  28. lm_deluge/pipelines/gepa/examples/03_hotpotqa_multihop.py +300 -0
  29. lm_deluge/pipelines/gepa/examples/04_batch_classification.py +271 -0
  30. lm_deluge/pipelines/gepa/examples/simple_qa.py +129 -0
  31. lm_deluge/pipelines/gepa/optimizer.py +435 -0
  32. lm_deluge/pipelines/gepa/proposer.py +235 -0
  33. lm_deluge/pipelines/gepa/util.py +165 -0
  34. lm_deluge/{llm_tools → pipelines}/score.py +2 -2
  35. lm_deluge/{llm_tools → pipelines}/translate.py +5 -3
  36. lm_deluge/prompt.py +537 -88
  37. lm_deluge/request_context.py +7 -2
  38. lm_deluge/server/__init__.py +24 -0
  39. lm_deluge/server/__main__.py +144 -0
  40. lm_deluge/server/adapters.py +369 -0
  41. lm_deluge/server/app.py +388 -0
  42. lm_deluge/server/auth.py +71 -0
  43. lm_deluge/server/model_policy.py +215 -0
  44. lm_deluge/server/models_anthropic.py +172 -0
  45. lm_deluge/server/models_openai.py +175 -0
  46. lm_deluge/tool/__init__.py +1130 -0
  47. lm_deluge/tool/builtin/anthropic/__init__.py +300 -0
  48. lm_deluge/tool/builtin/anthropic/bash.py +0 -0
  49. lm_deluge/tool/builtin/anthropic/computer_use.py +0 -0
  50. lm_deluge/tool/builtin/gemini.py +59 -0
  51. lm_deluge/tool/builtin/openai.py +74 -0
  52. lm_deluge/tool/cua/__init__.py +173 -0
  53. lm_deluge/tool/cua/actions.py +148 -0
  54. lm_deluge/tool/cua/base.py +27 -0
  55. lm_deluge/tool/cua/batch.py +215 -0
  56. lm_deluge/tool/cua/converters.py +466 -0
  57. lm_deluge/tool/cua/kernel.py +702 -0
  58. lm_deluge/tool/cua/trycua.py +989 -0
  59. lm_deluge/tool/prefab/__init__.py +45 -0
  60. lm_deluge/tool/prefab/batch_tool.py +156 -0
  61. lm_deluge/tool/prefab/docs.py +1119 -0
  62. lm_deluge/tool/prefab/email.py +294 -0
  63. lm_deluge/tool/prefab/filesystem.py +1711 -0
  64. lm_deluge/tool/prefab/full_text_search/__init__.py +285 -0
  65. lm_deluge/tool/prefab/full_text_search/tantivy_index.py +396 -0
  66. lm_deluge/tool/prefab/memory.py +458 -0
  67. lm_deluge/tool/prefab/otc/__init__.py +165 -0
  68. lm_deluge/tool/prefab/otc/executor.py +281 -0
  69. lm_deluge/tool/prefab/otc/parse.py +188 -0
  70. lm_deluge/tool/prefab/random.py +212 -0
  71. lm_deluge/tool/prefab/rlm/__init__.py +296 -0
  72. lm_deluge/tool/prefab/rlm/executor.py +349 -0
  73. lm_deluge/tool/prefab/rlm/parse.py +144 -0
  74. lm_deluge/tool/prefab/sandbox/__init__.py +19 -0
  75. lm_deluge/tool/prefab/sandbox/daytona_sandbox.py +483 -0
  76. lm_deluge/tool/prefab/sandbox/docker_sandbox.py +609 -0
  77. lm_deluge/tool/prefab/sandbox/fargate_sandbox.py +546 -0
  78. lm_deluge/tool/prefab/sandbox/modal_sandbox.py +469 -0
  79. lm_deluge/tool/prefab/sandbox/seatbelt_sandbox.py +827 -0
  80. lm_deluge/tool/prefab/sheets.py +385 -0
  81. lm_deluge/tool/prefab/skills.py +0 -0
  82. lm_deluge/tool/prefab/subagents.py +233 -0
  83. lm_deluge/tool/prefab/todos.py +342 -0
  84. lm_deluge/tool/prefab/tool_search.py +169 -0
  85. lm_deluge/tool/prefab/web_search.py +199 -0
  86. lm_deluge/tracker.py +16 -13
  87. lm_deluge/util/schema.py +412 -0
  88. lm_deluge/warnings.py +8 -0
  89. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/METADATA +23 -9
  90. lm_deluge-0.0.90.dist-info/RECORD +132 -0
  91. lm_deluge/built_in_tools/anthropic/__init__.py +0 -128
  92. lm_deluge/built_in_tools/openai.py +0 -28
  93. lm_deluge/presets/cerebras.py +0 -17
  94. lm_deluge/presets/meta.py +0 -13
  95. lm_deluge/tool.py +0 -849
  96. lm_deluge-0.0.67.dist-info/RECORD +0 -72
  97. lm_deluge/{llm_tools → pipelines}/__init__.py +1 -1
  98. /lm_deluge/{llm_tools → pipelines}/classify.py +0 -0
  99. /lm_deluge/{llm_tools → pipelines}/extract.py +0 -0
  100. /lm_deluge/{llm_tools → pipelines}/locate.py +0 -0
  101. /lm_deluge/{llm_tools → pipelines}/ocr.py +0 -0
  102. /lm_deluge/{built_in_tools/anthropic/bash.py → skills/anthropic.py} +0 -0
  103. /lm_deluge/{built_in_tools/anthropic/computer_use.py → skills/compat.py} +0 -0
  104. /lm_deluge/{built_in_tools → tool/builtin}/anthropic/editor.py +0 -0
  105. /lm_deluge/{built_in_tools → tool/builtin}/base.py +0 -0
  106. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/WHEEL +0 -0
  107. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/licenses/LICENSE +0 -0
  108. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,165 @@
1
+ """
2
+ Utility functions for GEPA.
3
+
4
+ Includes conversation formatting and text extraction helpers.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import re
10
+ from typing import Any
11
+
12
+ from lm_deluge.prompt import Conversation
13
+
14
+
15
+ def format_conversation_compact(conversation: Conversation) -> str:
16
+ """
17
+ Format a Conversation for showing to the proposer LLM.
18
+
19
+ Goals:
20
+ - Show full user and assistant message content
21
+ - Show tool calls with their arguments
22
+ - Abbreviate tool results (just show placeholder, not full content)
23
+ - No decorative separators, keep it compact
24
+
25
+ Args:
26
+ conversation: The conversation to format
27
+
28
+ Returns:
29
+ A string representation suitable for including in a prompt
30
+ """
31
+ lines: list[str] = []
32
+
33
+ # Check for system message (first message with role="system")
34
+ for msg in conversation.messages:
35
+ if msg.role == "system":
36
+ lines.append(f"[system]\n{msg.completion}")
37
+ lines.append("")
38
+ break
39
+
40
+ for msg in conversation.messages:
41
+ role = msg.role
42
+
43
+ if role == "system":
44
+ # Already handled above
45
+ continue
46
+
47
+ if role == "user":
48
+ text_content = msg.completion or ""
49
+ lines.append(f"[user]\n{text_content}")
50
+
51
+ elif role == "assistant":
52
+ # Handle text content
53
+ text_content = msg.completion or ""
54
+ if text_content:
55
+ lines.append(f"[assistant]\n{text_content}")
56
+
57
+ # Handle tool calls
58
+ if msg.tool_calls:
59
+ for tc in msg.tool_calls:
60
+ tool_name = tc.name
61
+ # Format arguments compactly
62
+ args_str = _format_tool_args(tc.arguments)
63
+ lines.append(f"[tool_call: {tool_name}]\n{args_str}")
64
+
65
+ elif role == "tool":
66
+ # Just show placeholder for tool results - content can be huge
67
+ # Try to get tool names from tool_results
68
+ if msg.tool_results:
69
+ for tr in msg.tool_results:
70
+ tool_id = getattr(tr, "tool_call_id", "unknown")
71
+ lines.append(f"[tool_result: {tool_id}] (content omitted)")
72
+ else:
73
+ lines.append("[tool_result] (content omitted)")
74
+
75
+ lines.append("")
76
+
77
+ return "\n".join(lines).strip()
78
+
79
+
80
+ def _format_tool_args(arguments: dict[str, Any] | str | None) -> str:
81
+ """Format tool call arguments compactly."""
82
+ if arguments is None:
83
+ return "(no arguments)"
84
+
85
+ if isinstance(arguments, str):
86
+ # Already a string (might be JSON string)
87
+ return arguments[:500] + "..." if len(arguments) > 500 else arguments
88
+
89
+ if isinstance(arguments, dict):
90
+ # Format as key=value pairs
91
+ parts = []
92
+ for key, value in arguments.items():
93
+ value_str = str(value)
94
+ # Truncate long values
95
+ if len(value_str) > 200:
96
+ value_str = value_str[:200] + "..."
97
+ parts.append(f" {key}: {value_str}")
98
+ return "\n".join(parts) if parts else "(no arguments)"
99
+
100
+ return str(arguments)
101
+
102
+
103
+ def extract_text_from_response(response: str) -> str:
104
+ """
105
+ Extract text from between ``` blocks in LLM response.
106
+
107
+ Handles various formats:
108
+ - ```text``` or ```language\ntext```
109
+ - Incomplete blocks
110
+ - No blocks (returns trimmed response)
111
+ """
112
+ # Find content between first and last ```
113
+ start = response.find("```")
114
+ if start == -1:
115
+ return response.strip()
116
+
117
+ start += 3
118
+ end = response.rfind("```")
119
+
120
+ if end <= start:
121
+ # Handle incomplete blocks
122
+ stripped = response.strip()
123
+ if stripped.startswith("```"):
124
+ match = re.match(r"^```\S*\n?", response)
125
+ if match:
126
+ return response[match.end() :].strip()
127
+ elif stripped.endswith("```"):
128
+ return stripped[:-3].strip()
129
+ return stripped
130
+
131
+ # Skip language specifier (e.g., ```python\n)
132
+ content = response[start:end]
133
+ match = re.match(r"^\S*\n", content)
134
+ if match:
135
+ content = content[match.end() :]
136
+
137
+ return content.strip()
138
+
139
+
140
+ def format_components_for_prompt(
141
+ component_values: dict[str, str],
142
+ component_descriptions: dict[str, str],
143
+ ) -> str:
144
+ """
145
+ Format components for showing to the proposer.
146
+
147
+ Args:
148
+ component_values: Current text value for each component
149
+ component_descriptions: Description of what each component does
150
+
151
+ Returns:
152
+ Formatted string listing all components
153
+ """
154
+ lines = []
155
+ for name, value in component_values.items():
156
+ description = component_descriptions.get(name, "")
157
+ lines.append(f"### {name}")
158
+ if description:
159
+ lines.append(f"*{description}*")
160
+ lines.append("```")
161
+ lines.append(value)
162
+ lines.append("```")
163
+ lines.append("")
164
+
165
+ return "\n".join(lines)
@@ -1,4 +1,4 @@
1
- from ..client import LLMClient, APIResponse
1
+ from ..client import _LLMClient, APIResponse
2
2
  from ..util.logprobs import extract_prob
3
3
 
4
4
  # def extract_prob_yes(logprobs: list[dict]):
@@ -24,7 +24,7 @@ from ..util.logprobs import extract_prob
24
24
  def score_llm(
25
25
  scoring_prompt_template: str,
26
26
  inputs: list[tuple | list | dict], # to format the template
27
- scoring_model: LLMClient,
27
+ scoring_model: _LLMClient,
28
28
  return_probabilities: bool,
29
29
  yes_token: str = "yes",
30
30
  ) -> list[bool | None] | list[float | None]:
@@ -1,5 +1,5 @@
1
1
  import asyncio
2
- from ..client import LLMClient
2
+ from ..client import _LLMClient
3
3
 
4
4
  translation_prompt = (
5
5
  "Translate the following text (enclosed in ```) into English. "
@@ -20,7 +20,9 @@ def is_english(text: str, low_memory: bool = True):
20
20
  return True
21
21
 
22
22
 
23
- async def translate_async(texts: list[str], client: LLMClient, low_memory: bool = True):
23
+ async def translate_async(
24
+ texts: list[str], client: _LLMClient, low_memory: bool = True
25
+ ):
24
26
  to_translate_idxs = [
25
27
  i for i, text in enumerate(texts) if not is_english(text, low_memory=low_memory)
26
28
  ]
@@ -40,5 +42,5 @@ async def translate_async(texts: list[str], client: LLMClient, low_memory: bool
40
42
  return texts
41
43
 
42
44
 
43
- def translate(texts: list[str], client: LLMClient, low_memory: bool = True):
45
+ def translate(texts: list[str], client: _LLMClient, low_memory: bool = True):
44
46
  return asyncio.run(translate_async(texts, client, low_memory))