lm-deluge 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lm_deluge/models.py CHANGED
@@ -1,7 +1,11 @@
1
+ from __future__ import annotations
2
+
1
3
  import random
2
4
  from dataclasses import dataclass, field
3
5
 
4
- registry = {
6
+ from .request_context import RequestContext
7
+
8
+ BUILTIN_MODELS = {
5
9
  # `7MMM. ,MMF' mm
6
10
  # MMMb dPMM MM
7
11
  # M YM ,M MM .gP"Ya mmMMmm ,6"Yb.
@@ -243,7 +247,7 @@ registry = {
243
247
  "supports_json": True,
244
248
  "supports_logprobs": False,
245
249
  "supports_responses": True,
246
- "api_spec": "openai-responses",
250
+ "api_spec": "openai",
247
251
  "input_cost": 2.0,
248
252
  "output_cost": 8.0,
249
253
  "requests_per_minute": 20,
@@ -1194,6 +1198,8 @@ class APIModel:
1194
1198
  if name not in registry:
1195
1199
  raise ValueError(f"Model {name} not found in registry")
1196
1200
  cfg = registry[name]
1201
+ if isinstance(cfg, APIModel):
1202
+ return cfg
1197
1203
  return cls(**cfg)
1198
1204
 
1199
1205
  def sample_region(self):
@@ -1206,3 +1212,34 @@ class APIModel:
1206
1212
  else:
1207
1213
  raise ValueError("no regions to sample")
1208
1214
  random.sample(regions, 1, counts=weights)[0]
1215
+
1216
+ def make_request(self, context: RequestContext): # -> "APIRequestBase"
1217
+ from .api_requests.common import CLASSES
1218
+
1219
+ api_spec = self.api_spec
1220
+ if (
1221
+ context.use_responses_api
1222
+ and self.supports_responses
1223
+ and api_spec == "openai"
1224
+ ):
1225
+ api_spec = "openai-responses"
1226
+
1227
+ request_class = CLASSES.get(api_spec, None)
1228
+ if request_class is None:
1229
+ raise ValueError(f"Unsupported API spec: {api_spec}")
1230
+ return request_class(context=context)
1231
+
1232
+
1233
+ registry: dict[str, APIModel] = {}
1234
+
1235
+
1236
+ def register_model(**kwargs) -> APIModel:
1237
+ """Register a model configuration and return the created APIModel."""
1238
+ model = APIModel(**kwargs)
1239
+ registry[model.id] = model
1240
+ return model
1241
+
1242
+
1243
+ # Populate registry with builtin models
1244
+ for cfg in BUILTIN_MODELS.values():
1245
+ register_model(**cfg)
lm_deluge/prompt.py CHANGED
@@ -9,7 +9,6 @@ import xxhash
9
9
 
10
10
  from lm_deluge.file import File
11
11
  from lm_deluge.image import Image
12
- from lm_deluge.models import APIModel
13
12
 
14
13
  CachePattern = Literal[
15
14
  "tools_only",
@@ -57,6 +56,10 @@ class ToolCall:
57
56
  name: str # function name
58
57
  arguments: dict # parsed arguments
59
58
  type: str = field(init=False, default="tool_call")
59
+ # built-in tool handling
60
+ built_in: bool = False
61
+ built_in_type: str | None = None
62
+ extra_body: dict | None = None
60
63
 
61
64
  @property
62
65
  def fingerprint(self) -> str:
@@ -102,10 +105,13 @@ class ToolCall:
102
105
  @dataclass(slots=True)
103
106
  class ToolResult:
104
107
  tool_call_id: str # references the ToolCall.id
105
- result: (
106
- str | dict | list[dict]
107
- ) # tool execution result - can be string or list for images
108
+ # tool execution result - can be string or list for images
109
+ result: str | dict | list[dict]
108
110
  type: str = field(init=False, default="tool_result")
111
+ # NEW! instead of specific carve-out for computer use,
112
+ # need to handle all built-ins for OpenAI
113
+ built_in: bool = False
114
+ built_in_type: str | None = None
109
115
 
110
116
  @property
111
117
  def fingerprint(self) -> str:
@@ -126,36 +132,42 @@ class ToolResult:
126
132
  return {"tool_call_id": self.tool_call_id, "content": content}
127
133
 
128
134
  def oa_resp(self) -> dict: # OpenAI Responses
129
- # Check if this is a computer use output (special case)
130
- if isinstance(self.result, dict) and self.result.get("_computer_use_output"):
131
- # This is a computer use output, emit it properly
132
- output_data = self.result.copy()
133
- output_data.pop("_computer_use_output") # Remove marker
135
+ # if normal (not built-in just return the regular output
136
+ if not self.built_in:
137
+ result = (
138
+ json.dumps(self.result)
139
+ if isinstance(self.result, list)
140
+ else self.result
141
+ )
142
+ return {
143
+ "type": "function_result",
144
+ "call_id": self.tool_call_id,
145
+ "result": result,
146
+ }
134
147
 
148
+ # if it's a built-in, OpenAI expects special type for each
149
+ else:
150
+ assert isinstance(self.result, dict)
151
+ output_data = self.result.copy()
135
152
  result = {
136
- "type": "computer_call_output",
153
+ "type": self.built_in_type,
137
154
  "call_id": self.tool_call_id,
138
- "output": output_data.get("output", {}),
139
155
  }
140
-
141
- # Add acknowledged safety checks if present
142
- if "acknowledged_safety_checks" in output_data:
143
- result["acknowledged_safety_checks"] = output_data[
144
- "acknowledged_safety_checks"
145
- ]
156
+ if self.built_in_type == "computer_call":
157
+ result["output"] = output_data.get("output", {})
158
+ if "acknowledged_safety_checks" in output_data:
159
+ result["acknowledged_safety_checks"] = output_data[
160
+ "acknowledged_safety_checks"
161
+ ]
162
+ elif self.built_in_type == "image_generation_call":
163
+ raise NotImplementedError(
164
+ "implement image generation call handling in tool result"
165
+ )
166
+ elif self.built_in_type == "web_search_call":
167
+ pass
146
168
 
147
169
  return result
148
170
 
149
- # Regular function result
150
- result = (
151
- json.dumps(self.result) if isinstance(self.result, list) else self.result
152
- )
153
- return {
154
- "type": "function_result",
155
- "call_id": self.tool_call_id,
156
- "result": result,
157
- }
158
-
159
171
  def anthropic(self) -> dict: # Anthropic Messages
160
172
  return {
161
173
  "type": "tool_result",
@@ -183,6 +195,8 @@ class ToolResult:
183
195
  class Thinking:
184
196
  content: str # reasoning content (o1, Claude thinking, etc.)
185
197
  type: str = field(init=False, default="thinking")
198
+ # for openai - to keep conversation chain
199
+ raw_payload: dict | None = None
186
200
 
187
201
  @property
188
202
  def fingerprint(self) -> str:
@@ -779,6 +793,8 @@ class Conversation:
779
793
  return n + 6 * len(self.messages)
780
794
 
781
795
  def dry_run(self, model_name: str, max_new_tokens: int):
796
+ from lm_deluge.models import APIModel
797
+
782
798
  model_obj = APIModel.from_registry(model_name)
783
799
  if model_obj.api_spec == "openai":
784
800
  image_tokens = 85
@@ -0,0 +1,75 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import Any, Callable
3
+
4
+ from .config import SamplingParams
5
+ from .prompt import CachePattern, Conversation
6
+ from .tracker import StatusTracker
7
+
8
+
9
+ @dataclass
10
+ class RequestContext:
11
+ """
12
+ Encapsulates all the parameters needed for an API request.
13
+ This reduces parameter shuttling and makes the request lifecycle clearer.
14
+ """
15
+
16
+ # Core request parameters
17
+ task_id: int
18
+ model_name: str
19
+ prompt: Conversation
20
+ sampling_params: SamplingParams
21
+
22
+ # Request configuration
23
+ attempts_left: int = 5
24
+ request_timeout: int = 30
25
+
26
+ # Infrastructure
27
+ status_tracker: StatusTracker | None = None
28
+ results_arr: list[Any] | None = (
29
+ None # list["APIRequestBase"] but avoiding circular import
30
+ )
31
+ callback: Callable | None = None
32
+
33
+ # Optional features
34
+ tools: list | None = None
35
+ cache: CachePattern | None = None
36
+ use_responses_api: bool = False
37
+
38
+ # Computed properties
39
+ cache_key: str = field(init=False)
40
+ num_tokens: int = field(init=False)
41
+
42
+ def __post_init__(self):
43
+ # Compute cache key from prompt fingerprint
44
+ self.cache_key = self.prompt.fingerprint
45
+
46
+ # Compute token count
47
+ self.num_tokens = self.prompt.count_tokens(self.sampling_params.max_new_tokens)
48
+
49
+ def maybe_callback(self, response, tracker):
50
+ if not self.callback:
51
+ return
52
+ self.callback(response, tracker)
53
+
54
+ def copy(self, **overrides):
55
+ """Create a copy of this RequestContext with optional field overrides."""
56
+ # Get all current field values
57
+ current_values = {
58
+ "task_id": self.task_id,
59
+ "model_name": self.model_name,
60
+ "prompt": self.prompt,
61
+ "sampling_params": self.sampling_params,
62
+ "attempts_left": self.attempts_left,
63
+ "request_timeout": self.request_timeout,
64
+ "status_tracker": self.status_tracker,
65
+ "results_arr": self.results_arr,
66
+ "callback": self.callback,
67
+ "tools": self.tools,
68
+ "cache": self.cache,
69
+ "use_responses_api": self.use_responses_api,
70
+ }
71
+
72
+ # Update with any overrides
73
+ current_values.update(overrides)
74
+
75
+ return RequestContext(**current_values)
lm_deluge/tool.py CHANGED
@@ -1,6 +1,6 @@
1
- from typing import Any, Literal, Callable, Coroutine, get_type_hints
2
- import inspect
3
1
  import asyncio
2
+ import inspect
3
+ from typing import Any, Callable, Coroutine, Literal, get_type_hints
4
4
 
5
5
  from fastmcp import Client # pip install fastmcp >= 2.0
6
6
  from mcp.types import Tool as MCPTool
@@ -40,11 +40,15 @@ class Tool(BaseModel):
40
40
 
41
41
  name: str
42
42
  description: str | None
43
- parameters: dict[str, Any]
43
+ parameters: dict[str, Any] | None
44
44
  required: list[str] = Field(default_factory=list)
45
45
  additionalProperties: bool | None = None # only
46
46
  # if desired, can provide a callable to run the tool
47
47
  run: Callable | None = None
48
+ # for built-in tools that don't require schema
49
+ built_in: bool = False
50
+ type: str | None = None
51
+ built_in_args: dict[str, Any] = Field(default_factory=dict)
48
52
 
49
53
  @field_validator("name")
50
54
  @classmethod
@@ -196,7 +200,7 @@ class Tool(BaseModel):
196
200
  )
197
201
 
198
202
  @staticmethod
199
- def _python_type_to_json_schema(python_type: type) -> dict[str, Any]:
203
+ def _python_type_to_json_schema(python_type) -> dict[str, Any]:
200
204
  """Convert Python type to JSON Schema type definition."""
201
205
  if python_type is int:
202
206
  return {"type": "integer"}
@@ -226,20 +230,16 @@ class Tool(BaseModel):
226
230
  return res
227
231
 
228
232
  # ---------- dumpers ----------
229
- def for_openai_responses(self) -> dict[str, Any]:
230
- return {
231
- "type": "function",
232
- "name": self.name,
233
- "description": self.description,
234
- "parameters": self._json_schema(include_additional_properties=True),
235
- }
236
-
237
- def for_openai_completions(self, *, strict: bool = True) -> dict[str, Any]:
233
+ def for_openai_completions(
234
+ self, *, strict: bool = True, **kwargs
235
+ ) -> dict[str, Any]:
236
+ if self.built_in:
237
+ return {"type": self.type, **self.built_in_args, **kwargs}
238
238
  if strict:
239
239
  # For strict mode, all parameters must be required and additionalProperties must be false
240
240
  schema = self._json_schema(include_additional_properties=True)
241
241
  schema["required"] = list(
242
- self.parameters.keys()
242
+ (self.parameters or {}).keys()
243
243
  ) # All parameters required in strict mode
244
244
  else:
245
245
  # For non-strict mode, use the original required list
@@ -255,7 +255,25 @@ class Tool(BaseModel):
255
255
  },
256
256
  }
257
257
 
258
- def for_anthropic(self) -> dict[str, Any]:
258
+ def for_openai_responses(self, **kwargs) -> dict[str, Any]:
259
+ if self.built_in:
260
+ return {"type": self.type, **self.built_in_args, **kwargs}
261
+ return {
262
+ "type": "function",
263
+ "name": self.name,
264
+ "description": self.description,
265
+ "parameters": self._json_schema(include_additional_properties=True),
266
+ }
267
+
268
+ def for_anthropic(self, **kwargs) -> dict[str, Any]:
269
+ # built-in tools have "name", "type", maybe metadata
270
+ if self.built_in:
271
+ return {
272
+ "name": self.name,
273
+ "type": self.type,
274
+ **self.built_in_args,
275
+ **kwargs,
276
+ }
259
277
  return {
260
278
  "name": self.name,
261
279
  "description": self.description,
@@ -272,6 +290,9 @@ class Tool(BaseModel):
272
290
  "parameters": self._json_schema(),
273
291
  }
274
292
 
293
+ def for_mistral(self) -> dict[str, Any]:
294
+ return self.for_openai_completions()
295
+
275
296
  def dump_for(
276
297
  self,
277
298
  provider: Literal[
@@ -288,3 +309,60 @@ class Tool(BaseModel):
288
309
  if provider == "google":
289
310
  return self.for_google()
290
311
  raise ValueError(provider)
312
+
313
+
314
+ class MCPServer(BaseModel):
315
+ """
316
+ Allow MCPServers to be passed directly, if provider supports it.
317
+ Provider can directly call MCP instead of handling it client-side.
318
+ Should work with Anthropic MCP connector and OpenAI responses API.
319
+ """
320
+
321
+ name: str
322
+ url: str
323
+ # anthropic-specific
324
+ token: str | None = None
325
+ configuration: dict | None = None
326
+ # openai-specific
327
+ headers: dict | None = None
328
+
329
+ def for_openai_responses(self):
330
+ # return {
331
+ # "type": "mcp",
332
+ # "server_label": "deepwiki",
333
+ # "server_url": "https://mcp.deepwiki.com/mcp",
334
+ # "require_approval": "never",
335
+ # }
336
+ res: dict[str, Any] = {
337
+ "type": "mcp",
338
+ "server_label": self.name,
339
+ "server_url": self.url,
340
+ "require_approval": "never",
341
+ }
342
+ if self.headers:
343
+ res["headers"] = self.headers
344
+
345
+ return res
346
+
347
+ def for_anthropic(self):
348
+ # return {
349
+ # "type": "url",
350
+ # "url": "https://example-server.modelcontextprotocol.io/sse",
351
+ # "name": "example-mcp",
352
+ # "tool_configuration": {
353
+ # "enabled": true,
354
+ # "allowed_tools": ["example_tool_1", "example_tool_2"]
355
+ # },
356
+ # "authorization_token": "YOUR_TOKEN"
357
+ # }
358
+ res: dict[str, Any] = {
359
+ "type": "url",
360
+ "url": self.url,
361
+ "name": self.name,
362
+ }
363
+ if self.token:
364
+ res["authorization_token"] = self.token
365
+ if self.configuration:
366
+ res["tool_configuration"] = self.configuration
367
+
368
+ return res
lm_deluge/tracker.py CHANGED
@@ -68,6 +68,7 @@ class StatusTracker:
68
68
  self.limiting_factor = factor
69
69
 
70
70
  def check_capacity(self, num_tokens: int, retry: bool = False):
71
+ self.update_capacity() # always update before checking
71
72
  request_available = self.available_request_capacity >= 1
72
73
  tokens_available = self.available_token_capacity >= num_tokens
73
74
  concurrent_request_available = (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.15
3
+ Version: 0.0.16
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -128,6 +128,30 @@ This just works. Images can be local images on disk, URLs, bytes, base64 data UR
128
128
 
129
129
  See a full multi-turn chat example in `examples/multiturn.md`.
130
130
 
131
+ ## Files
132
+
133
+ For models that support file uploads (OpenAI, Anthropic, and Gemini), you can easily include PDF files and other documents:
134
+
135
+ ```python
136
+ from lm_deluge import LLMClient, Conversation
137
+
138
+ # Simple file upload
139
+ client = LLMClient.basic("gpt-4.1-mini")
140
+ conversation = Conversation.user(
141
+ "Please summarize this document",
142
+ file="path/to/document.pdf"
143
+ )
144
+ resps = client.process_prompts_sync([conversation])
145
+
146
+ # You can also create File objects for more control
147
+ from lm_deluge import File
148
+ file = File("path/to/report.pdf", filename="Q4_Report.pdf")
149
+ conversation = Conversation.user("Analyze this financial report")
150
+ conversation.messages[0].parts.append(file)
151
+ ```
152
+
153
+ Files can be local paths, URLs, bytes, or base64 data URLs, just like images.
154
+
131
155
  ## Tool Use
132
156
 
133
157
  Define tools from Python functions and use them with any model:
@@ -1,45 +1,48 @@
1
1
  lm_deluge/__init__.py,sha256=mAztMuxINmh7dGbYnT8tsmw1eryQAvd0jpY8yHzd0EE,315
2
2
  lm_deluge/agent.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lm_deluge/batches.py,sha256=dI5G9uvmoDU9hMohrkEhlIDyJPsmsVwZPwxx6qETxxk,17728
3
+ lm_deluge/batches.py,sha256=05t8UL1xCKjLRKtZLkfbexLqro6T_ufFVsaNIMk05Fw,17725
4
4
  lm_deluge/cache.py,sha256=VB1kv8rM2t5XWPR60uhszFcxLDnVKOe1oA5hYjVDjIo,4375
5
- lm_deluge/client.py,sha256=kMHA3VlCRk_Ly1CiJ6rRz2GxttxhVuw6WEQtdMVrK-4,19806
5
+ lm_deluge/client.py,sha256=Qn44_3x73PI0AxlmbGwO1MNz7fzjrkjB-RkhGf7k0Jo,22691
6
6
  lm_deluge/config.py,sha256=H1tQyJDNHGFuwxqQNL5Z-CjWAC0luHSBA3iY_pxmACM,932
7
7
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
8
8
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
9
9
  lm_deluge/file.py,sha256=zQH1STMjCG9pczO7Fk9Jw0_0Pj_8CogcdIxTe4J4AJw,5414
10
10
  lm_deluge/gemini_limits.py,sha256=V9mpS9JtXYz7AY6OuKyQp5TuIMRH1BVv9YrSNmGmHNA,1569
11
- lm_deluge/image.py,sha256=hFbRajqEVQbkirAfOxsTPkeq-27Zl-so4AWBFeUbpBI,7161
12
- lm_deluge/models.py,sha256=Xad2Ya2U4nk0z6m0l8iba8EE34-mI2HbRqdXrM6Fqc0,48641
13
- lm_deluge/prompt.py,sha256=KOuJFwpRKuz2F5WLniZzjOTW05I--mzYyMglr-s47F8,34601
11
+ lm_deluge/image.py,sha256=SIf6vh4pZ5ccrBvWc3zB_ncsWeFw2lKuIJfP3ovo6hk,7444
12
+ lm_deluge/models.py,sha256=6ZCirxOpdcg_M24cKUABYbRpLK-r9dlkXxUS9aeh0UY,49657
13
+ lm_deluge/prompt.py,sha256=SaLcUjfzgeIZRzb6fxLp6PTFLxpvcSlaazJq3__2Sqs,35248
14
+ lm_deluge/request_context.py,sha256=SfPu9pl5NgDVLaWGQkSXdQZ7Mm-Vw4GSTlOu-PAOE3k,2290
14
15
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
15
- lm_deluge/tool.py,sha256=C2zwU9-7fldfYT0TZDoVVGGSC6dN_It9GSxnfkN6Z_w,9822
16
- lm_deluge/tracker.py,sha256=4QQ0-H01KQp8x8KccidBIJWA5zfSQyA0kgTynvSG0gk,9202
16
+ lm_deluge/tool.py,sha256=-jeP6lYbJxwLhuiS7m84LAfgbwCjgyH-yuUCt031L58,12239
17
+ lm_deluge/tracker.py,sha256=-EkFDAklh5mclIFR-5SthAwNL4p1yKS8LUN7rhpOVPQ,9266
17
18
  lm_deluge/usage.py,sha256=VMEKghePFIID5JFBObqYxFpgYxnbYm_dnHy7V1-_T6M,4866
18
- lm_deluge/api_requests/__init__.py,sha256=_aSpD6CJL9g6OpLPoChXiHjl4MH_OlGcKgfZaW8cgLM,71
19
- lm_deluge/api_requests/anthropic.py,sha256=itKPu1cqCYcrr4fkLarlvSYr6tqLEAGVLGXEG05QXWM,8345
20
- lm_deluge/api_requests/base.py,sha256=THgCceZ_z9YjA_E9WWME5f2tIRSOOI2OAQCAWVlV-Xg,12448
21
- lm_deluge/api_requests/bedrock.py,sha256=yh4-zMrjlQfmxoBbrc2WYJ8gEqVkTP_-tMR7-XbTAtQ,11753
19
+ lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
20
+ lm_deluge/api_requests/anthropic.py,sha256=nO4Gf59ZddZUURDqkiR3P3Mbr7De7sEcGL6fdYdbozU,7699
21
+ lm_deluge/api_requests/base.py,sha256=wKB6a5nNwD-ST_nNRVUlA3l_O9HhccPcGA2fJut7kfw,4430
22
+ lm_deluge/api_requests/bedrock.py,sha256=EDYzE7zeYscUeyIai-uHd-fDuPXZszWfSPn55XgUbCI,10846
22
23
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
23
- lm_deluge/api_requests/gemini.py,sha256=8qWdHFsse3gYU2MiJRI_FAmM-Ez8YCGmHtHGI6_z-ww,8203
24
- lm_deluge/api_requests/mistral.py,sha256=PkuoKbOJAB6DOK_NvzbxpWPAktfvonf69QjC0tVCYuE,5366
25
- lm_deluge/api_requests/openai.py,sha256=HUn83Y_Roo3pCUTBnrQhL9skW_PJ4OvS5gr5rIg58dU,19366
26
- lm_deluge/api_requests/response.py,sha256=X6AHXv-4dWHLKkPv7J0MSesweunqxIqJED6UY6ypdzE,5770
24
+ lm_deluge/api_requests/gemini.py,sha256=6brxdouJcsJSEb8OZxklrTaqbZ1M-gWulNkGJqAKWV8,7400
25
+ lm_deluge/api_requests/mistral.py,sha256=diflr8NlsJGpSlY1F5Ay0GMZhBDdv9L2JV70UaHnOBs,4431
26
+ lm_deluge/api_requests/openai.py,sha256=4MgEoEQ9n_vwsNOyM2tWaPIV3IN5x7UUCrXFlqeZYLk,20782
27
+ lm_deluge/api_requests/response.py,sha256=JFSwHAs-yaJYkscOgTAyHkt-v8FDZ5mgER9NmueXTGk,5866
27
28
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
28
29
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
29
30
  lm_deluge/api_requests/deprecated/deepseek.py,sha256=FEApI93VAWDwuaqTooIyKMgONYqRhdUmiAPBRme-IYs,4582
30
31
  lm_deluge/api_requests/deprecated/mistral.py,sha256=pOfOZUM4U35I3Plch84SnAFpDAzouHcSNNMtgxRvjy4,4709
31
32
  lm_deluge/api_requests/deprecated/vertex.py,sha256=ygXz2RjdXErPCSBbiHLEWbf5_sSTIi31WoX0UaoYzRI,15275
32
- lm_deluge/computer_use/anthropic_tools.py,sha256=p1CgHw1htX0PTdDW9Tni9N1azVMCoyA_ei-fMT6HHis,2478
33
+ lm_deluge/built_in_tools/anthropic.py,sha256=ZvO-8hBSQdD_RzWYF0APytr8grBnBovICA76yHYTFNA,4478
34
+ lm_deluge/built_in_tools/openai.py,sha256=aLuJdXbANvXVIU38Vo2zsir7zlwWgX0d8oDPT7Ql64A,721
33
35
  lm_deluge/llm_tools/__init__.py,sha256=TbZTETq9i_9yYskFWQKOG4pGh5ZiyE_D-h3RArfhGp4,231
34
- lm_deluge/llm_tools/extract.py,sha256=-GtyqJUxKvB567tk_NnCMklazz18xZBCPlAjYHTVUWg,3649
36
+ lm_deluge/llm_tools/extract.py,sha256=C3drVAMaoFx5jNE38Xi5cXxrqboyoZ9cE7nX5ylWbXw,4482
37
+ lm_deluge/llm_tools/ocr.py,sha256=7fDlvs6uUOvbxMasvGGNJx5Fj6biM6z3lijKZaGN26k,23
35
38
  lm_deluge/llm_tools/score.py,sha256=9oGA3-k2U5buHQXkXaEI9M4Wb5yysNhTLsPbGeghAlQ,2580
36
39
  lm_deluge/llm_tools/translate.py,sha256=iXyYvQZ8bC44FWhBk4qpdqjKM1WFF7Shq-H2PxhPgg4,1452
37
40
  lm_deluge/util/json.py,sha256=_4Oar2Cmz2L1DK3EtPLPDxD6rsYHxjROmV8ZpmMjQ-4,5822
38
41
  lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11768
39
42
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
40
43
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
41
- lm_deluge-0.0.15.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
42
- lm_deluge-0.0.15.dist-info/METADATA,sha256=Xahpew4j6u9EgJGJf6l_wvnKpq1c2I1hoQQh3RIhUes,11942
43
- lm_deluge-0.0.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
44
- lm_deluge-0.0.15.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
45
- lm_deluge-0.0.15.dist-info/RECORD,,
44
+ lm_deluge-0.0.16.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
45
+ lm_deluge-0.0.16.dist-info/METADATA,sha256=hfp55fuKt7dlheGj1uOcOoSMzSLmddXA85Gqe1U4KAM,12689
46
+ lm_deluge-0.0.16.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
47
+ lm_deluge-0.0.16.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
48
+ lm_deluge-0.0.16.dist-info/RECORD,,