lm-deluge 0.0.21__py3-none-any.whl → 0.0.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

lm_deluge/prompt.py CHANGED
@@ -102,11 +102,14 @@ class ToolCall:
102
102
  }
103
103
 
104
104
 
105
+ ToolResultPart = Text | Image
106
+
107
+
105
108
  @dataclass(slots=True)
106
109
  class ToolResult:
107
110
  tool_call_id: str # references the ToolCall.id
108
111
  # tool execution result - can be string or list for images
109
- result: str | dict | list[dict]
112
+ result: str | dict | list[ToolResultPart]
110
113
  type: str = field(init=False, default="tool_result")
111
114
  # NEW! instead of specific carve-out for computer use,
112
115
  # need to handle all built-ins for OpenAI
@@ -115,23 +118,64 @@ class ToolResult:
115
118
 
116
119
  @property
117
120
  def fingerprint(self) -> str:
118
- result_str = (
119
- json.dumps(self.result, sort_keys=True)
120
- if isinstance(self.result, list) or isinstance(self.result, dict)
121
- else str(self.result)
122
- )
121
+ if isinstance(self.result, str):
122
+ result_str = self.result
123
+ elif isinstance(self.result, list):
124
+ result_str = json.dumps([part.fingerprint for part in self.result])
125
+ else:
126
+ raise ValueError("unsupported self.result type")
123
127
  return xxhash.xxh64(f"{self.tool_call_id}:{result_str}".encode()).hexdigest()
124
128
 
125
129
  # ── provider-specific emission ────────────────────────────────────────────
130
+ def get_images(self) -> list[Image]:
131
+ # for openai, we can't include images in tool result, so we have to
132
+ # include them in the next user message
133
+ if isinstance(self.result, str):
134
+ return []
135
+ elif isinstance(self.result, list):
136
+ images = []
137
+ for block in self.result:
138
+ if isinstance(block, Image):
139
+ images.append(block)
140
+ return images
141
+ else:
142
+ raise ValueError("unexpected tool result type")
143
+
126
144
  def oa_chat(
127
145
  self,
128
146
  ) -> dict: # OpenAI Chat Completions - tool results are separate messages
129
- content = (
130
- json.dumps(self.result) if isinstance(self.result, list) else self.result
131
- )
132
- return {"tool_call_id": self.tool_call_id, "content": content}
147
+ print("serializing toolresult with oa_chat...")
148
+ print("typeof self.result:", type(self.result))
149
+ if isinstance(self.result, str):
150
+ return {
151
+ "role": "tool",
152
+ "tool_call_id": self.tool_call_id,
153
+ "content": self.result,
154
+ }
155
+ elif isinstance(self.result, list):
156
+ # OpenAI only accepts strings! this is a painful limitation
157
+ image_idx = 0
158
+ text_result = ""
159
+ for block in self.result:
160
+ if isinstance(block, Text):
161
+ text_result += block.text
162
+ text_result += "\n\n---\n\n"
163
+ else:
164
+ image_idx += 1
165
+ text_result += f"[Image {image_idx} in following user message]"
166
+ text_result += "\n\n---\n\n"
167
+
168
+ return {
169
+ "role": "tool",
170
+ "tool_call_id": self.tool_call_id,
171
+ "content": text_result,
172
+ }
173
+ else:
174
+ raise ValueError("result type not supported")
133
175
 
134
176
  def oa_resp(self) -> dict: # OpenAI Responses
177
+ print("serializing toolresult with oa_chat...")
178
+ print("typeof self.result:", type(self.result))
135
179
  # if normal (not built-in just return the regular output
136
180
  if not self.built_in:
137
181
  result = (
@@ -169,13 +213,25 @@ class ToolResult:
169
213
  return result
170
214
 
171
215
  def anthropic(self) -> dict: # Anthropic Messages
172
- return {
173
- "type": "tool_result",
174
- "tool_use_id": self.tool_call_id,
175
- "content": self.result,
176
- }
216
+ if isinstance(self.result, str):
217
+ return {
218
+ "type": "tool_result",
219
+ "tool_use_id": self.tool_call_id,
220
+ "content": self.result,
221
+ }
222
+ elif isinstance(self.result, list):
223
+ # handle list of content blocks
224
+ return {
225
+ "type": "tool_result",
226
+ "tool_use_id": self.tool_call_id,
227
+ "content": [part.anthropic() for part in self.result],
228
+ }
229
+ else:
230
+ raise ValueError("unsupported self.result type")
177
231
 
178
232
  def gemini(self) -> dict:
233
+ if not isinstance(self.result, str):
234
+ raise ValueError("can't handle content blocks for gemini yet")
179
235
  return {
180
236
  "functionResponse": {
181
237
  "name": self.tool_call_id, # Gemini uses name field for ID
@@ -346,7 +402,7 @@ class Message:
346
402
 
347
403
  def add_image(
348
404
  self,
349
- data: bytes | str | Path | io.BytesIO,
405
+ data: bytes | str | Path | io.BytesIO | Image,
350
406
  *,
351
407
  media_type: MediaType | None = None,
352
408
  detail: Literal["low", "high", "auto"] = "auto",
@@ -359,8 +415,10 @@ class Message:
359
415
  dimension equals max_size, but only if the longer dimension is currently
360
416
  larger than max_size.
361
417
  """
362
- img = Image(data, media_type=media_type, detail=detail)
363
-
418
+ if not isinstance(data, Image):
419
+ img = Image(data, media_type=media_type, detail=detail)
420
+ else:
421
+ img = data
364
422
  # Resize if max_size is provided
365
423
  if max_size is not None:
366
424
  img.resize(max_size)
@@ -387,7 +445,9 @@ class Message:
387
445
  self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
388
446
  return self
389
447
 
390
- def add_tool_result(self, tool_call_id: str, result: str) -> "Message":
448
+ def add_tool_result(
449
+ self, tool_call_id: str, result: str | list[ToolResultPart]
450
+ ) -> "Message":
391
451
  """Append a tool result block and return self for chaining."""
392
452
  self.parts.append(ToolResult(tool_call_id=tool_call_id, result=result))
393
453
  return self
@@ -493,11 +553,12 @@ class Message:
493
553
  tool_results = [p for p in self.parts if isinstance(p, ToolResult)]
494
554
  if len(tool_results) == 1:
495
555
  tool_result = tool_results[0]
496
- return {
497
- "role": "tool",
498
- "tool_call_id": tool_result.tool_call_id,
499
- "content": tool_result.result,
500
- }
556
+ return tool_result.oa_chat()
557
+ # {
558
+ # "role": "tool",
559
+ # "tool_call_id": tool_result.tool_call_id,
560
+ # "content": tool_result.result,
561
+ # }
501
562
  else:
502
563
  raise ValueError(
503
564
  f"Tool role messages must contain exactly one ToolResult part for OpenAI, got {len(tool_results)}"
@@ -597,7 +658,9 @@ class Conversation:
597
658
  self.messages.append(msg)
598
659
  return self
599
660
 
600
- def add_tool_result(self, tool_call_id: str, result: str) -> "Conversation":
661
+ def add_tool_result(
662
+ self, tool_call_id: str, result: str | list[ToolResultPart]
663
+ ) -> "Conversation":
601
664
  """Add a tool result to the conversation.
602
665
 
603
666
  If the conversation ends with a tool message, append to it (for parallel tool calls).
@@ -617,11 +680,23 @@ class Conversation:
617
680
  def to_openai(self) -> list[dict]:
618
681
  result = []
619
682
  for m in self.messages:
620
- if m.role == "tool" and len(m.tool_results) > 1:
683
+ if m.role == "tool":
621
684
  # Split tool messages with multiple results into separate messages for OpenAI
622
685
  for tool_result in m.tool_results:
623
686
  tool_msg = Message("tool", [tool_result])
624
687
  result.append(tool_msg.oa_chat())
688
+
689
+ # if tool response included images, add those to next user message
690
+ user_msg = Message("user", [])
691
+ for i, tool_result in enumerate(m.tool_results):
692
+ images = tool_result.get_images()
693
+ if len(images) > 0:
694
+ user_msg.add_text(
695
+ f"[Images for Tool Call {tool_result.tool_call_id}]"
696
+ )
697
+ for img in images:
698
+ user_msg.add_image(img)
699
+
625
700
  else:
626
701
  result.append(m.oa_chat())
627
702
  return result
@@ -853,7 +928,9 @@ class Conversation:
853
928
  {
854
929
  "type": "tool_result",
855
930
  "tool_call_id": p.tool_call_id,
856
- "result": p.result,
931
+ "result": p.result
932
+ if isinstance(p.result, str)
933
+ else f"<Tool result ({len(p.result)} blocks)>",
857
934
  }
858
935
  )
859
936
  elif isinstance(p, Thinking):
@@ -905,32 +982,3 @@ def prompts_to_conversations(prompts: Sequence[str | list[dict] | Conversation])
905
982
  return [ # type: ignore
906
983
  Conversation.user(p) if isinstance(p, str) else p for p in prompts
907
984
  ]
908
-
909
-
910
- ###############################################################################
911
- # --------------------------------------------------------------------------- #
912
- # Basic usage examples #
913
- # --------------------------------------------------------------------------- #
914
-
915
- # 1️⃣ trivial single-turn (text only) ---------------------------------------
916
- # conv = Conversation.user("Hi Claude, who won the 2018 World Cup?")
917
- # client.messages.create(model="claude-3-7-sonnet", **conv.to_anthropic())
918
-
919
- # # 2️⃣ system + vision + follow-up for OpenAI Chat Completions ---------------
920
- # conv = (
921
- # Conversation.system("You are a visual assistant.")
922
- # .add(
923
- # Message.with_image(
924
- # "user",
925
- # "What's in this photo?",
926
- # Image("boardwalk.jpg", detail="low"),
927
- # )
928
- # )
929
- # .add(Message.text("assistant", "Looks like a lakeside boardwalk."))
930
- # .add(Message.text("user", "Great, write a haiku about it."))
931
- # )
932
-
933
- # openai.chat.completions.create(model="gpt-4o-mini", messages=conv.to_openai_chat())
934
-
935
- # # 3️⃣ Same conversation sent through new Responses API -----------------------
936
- # openai.responses.create(model="gpt-4o-mini", **conv.to_openai_responses())
@@ -35,6 +35,7 @@ class RequestContext:
35
35
  cache: CachePattern | None = None
36
36
  use_responses_api: bool = False
37
37
  extra_headers: dict[str, str] | None = None
38
+ force_local_mcp: bool = False
38
39
 
39
40
  # Computed properties
40
41
  cache_key: str = field(init=False)
@@ -68,6 +69,7 @@ class RequestContext:
68
69
  "tools": self.tools,
69
70
  "cache": self.cache,
70
71
  "use_responses_api": self.use_responses_api,
72
+ "force_local_mcp": self.force_local_mcp,
71
73
  }
72
74
 
73
75
  # Update with any overrides
lm_deluge/tool.py CHANGED
@@ -1,11 +1,15 @@
1
1
  import asyncio
2
2
  import inspect
3
- from typing import Any, Callable, Coroutine, Literal, get_type_hints
3
+ from typing import Any, Callable, Coroutine, Literal, TypedDict, get_type_hints
4
+ from concurrent.futures import ThreadPoolExecutor
4
5
 
5
6
  from fastmcp import Client # pip install fastmcp >= 2.0
6
7
  from mcp.types import Tool as MCPTool
7
8
  from pydantic import BaseModel, Field, field_validator
8
9
 
10
+ from lm_deluge.prompt import Text, ToolResultPart
11
+ from lm_deluge.image import Image
12
+
9
13
 
10
14
  async def _load_all_mcp_tools(client: Client) -> list["Tool"]:
11
15
  metas: list[MCPTool] = await client.list_tools()
@@ -14,7 +18,18 @@ async def _load_all_mcp_tools(client: Client) -> list["Tool"]:
14
18
  async def _async_call(**kw):
15
19
  async with client:
16
20
  # maybe should be call_tool_mcp if don't want to raise error
17
- return await client.call_tool(name, kw)
21
+ content_blocks = await client.call_tool(name, kw)
22
+
23
+ # for now just concatenate them all into a result string
24
+ results = []
25
+ for block in content_blocks:
26
+ if block.type == "text":
27
+ results.append(Text(block.text))
28
+ elif block.type == "image":
29
+ data_url = f"data:{block.mimeType};base64,{block.data}"
30
+ results.append(Image(data=data_url))
31
+
32
+ return results
18
33
 
19
34
  return _async_call
20
35
 
@@ -63,7 +78,7 @@ class Tool(BaseModel):
63
78
  def _is_async(self) -> bool:
64
79
  return inspect.iscoroutinefunction(self.run)
65
80
 
66
- def call(self, **kwargs):
81
+ def call(self, **kwargs) -> str | list[ToolResultPart]:
67
82
  if self.run is None:
68
83
  raise ValueError("No run function provided")
69
84
 
@@ -71,17 +86,22 @@ class Tool(BaseModel):
71
86
  coro: Coroutine = self.run(**kwargs) # type: ignore[arg-type]
72
87
  try:
73
88
  loop = asyncio.get_running_loop()
89
+ assert loop
74
90
  except RuntimeError:
75
91
  # no loop → safe to block
76
92
  return asyncio.run(coro)
77
93
  else:
78
- # already inside a loop → schedule
79
- return loop.create_task(coro)
94
+ # Loop is running → execute coroutine in a worker thread
95
+ def _runner():
96
+ return asyncio.run(coro)
97
+
98
+ with ThreadPoolExecutor(max_workers=1) as executor:
99
+ return executor.submit(_runner).result()
80
100
  else:
81
101
  # plain function
82
102
  return self.run(**kwargs)
83
103
 
84
- async def acall(self, **kwargs):
104
+ async def acall(self, **kwargs) -> str | list[ToolResultPart]:
85
105
  if self.run is None:
86
106
  raise ValueError("No run function provided")
87
107
 
@@ -218,12 +238,64 @@ class Tool(BaseModel):
218
238
  # Default to string for unknown types
219
239
  return {"type": "string"}
220
240
 
221
- def _json_schema(self, include_additional_properties=False) -> dict[str, Any]:
241
+ def _json_schema(
242
+ self, include_additional_properties=False, remove_defaults=False
243
+ ) -> dict[str, Any]:
244
+ def _add_additional_properties_recursive(
245
+ schema: dict | list | Any, remove_defaults: bool = False
246
+ ) -> dict | list | Any:
247
+ """Recursively add additionalProperties: false to all object-type schemas."""
248
+ if isinstance(schema, dict):
249
+ # Copy the dictionary to avoid modifying the original
250
+ new_schema = schema.copy()
251
+
252
+ # make sure to label arrays and objects
253
+ if "type" not in new_schema:
254
+ if "properties" in new_schema:
255
+ new_schema["type"] = "object"
256
+ elif "items" in new_schema:
257
+ new_schema["type"] = "array"
258
+
259
+ # If this is an object type schema, set additionalProperties: false
260
+ if new_schema.get("type") == "object":
261
+ new_schema["additionalProperties"] = False
262
+
263
+ # Remove default values if requested (for strict mode)
264
+ if remove_defaults and "default" in new_schema:
265
+ del new_schema["default"]
266
+
267
+ # Recursively process all values in the dictionary
268
+ for key, value in new_schema.items():
269
+ new_schema[key] = _add_additional_properties_recursive(
270
+ value, remove_defaults
271
+ )
272
+
273
+ return new_schema
274
+ elif isinstance(schema, list):
275
+ # Recursively process all items in the list
276
+ return [
277
+ _add_additional_properties_recursive(item, remove_defaults)
278
+ for item in schema
279
+ ]
280
+ else:
281
+ # Return primitive values as-is
282
+ return schema
283
+
284
+ # Start with the base schema structure
285
+ if include_additional_properties and self.parameters:
286
+ # Apply recursive additionalProperties processing to parameters
287
+ processed_parameters = _add_additional_properties_recursive(
288
+ self.parameters, remove_defaults
289
+ )
290
+ else:
291
+ processed_parameters = self.parameters
292
+
222
293
  res = {
223
294
  "type": "object",
224
- "properties": self.parameters,
295
+ "properties": processed_parameters,
225
296
  "required": self.required, # Use the tool's actual required list
226
297
  }
298
+
227
299
  if include_additional_properties:
228
300
  res["additionalProperties"] = False
229
301
 
@@ -236,8 +308,10 @@ class Tool(BaseModel):
236
308
  if self.built_in:
237
309
  return {"type": self.type, **self.built_in_args, **kwargs}
238
310
  if strict:
239
- # For strict mode, all parameters must be required and additionalProperties must be false
240
- schema = self._json_schema(include_additional_properties=True)
311
+ # For strict mode, remove defaults and make all parameters required
312
+ schema = self._json_schema(
313
+ include_additional_properties=True, remove_defaults=True
314
+ )
241
315
  schema["required"] = list(
242
316
  (self.parameters or {}).keys()
243
317
  ) # All parameters required in strict mode
@@ -315,6 +389,14 @@ class Tool(BaseModel):
315
389
  raise ValueError(provider)
316
390
 
317
391
 
392
+ class OpenAIMCPSpec(TypedDict):
393
+ type: str
394
+ server_label: str
395
+ server_url: str
396
+ headers: dict | None
397
+ require_approval: str
398
+
399
+
318
400
  class MCPServer(BaseModel):
319
401
  """
320
402
  Allow MCPServers to be passed directly, if provider supports it.
@@ -330,13 +412,18 @@ class MCPServer(BaseModel):
330
412
  # openai-specific
331
413
  headers: dict | None = None
332
414
 
415
+ # tools cache
416
+ _tools: list[Tool] | None = None
417
+
418
+ @classmethod
419
+ def from_openai(cls, spec: OpenAIMCPSpec):
420
+ return cls(
421
+ name=spec["server_label"],
422
+ url=spec["server_url"],
423
+ headers=spec.get("headers"),
424
+ )
425
+
333
426
  def for_openai_responses(self):
334
- # return {
335
- # "type": "mcp",
336
- # "server_label": "deepwiki",
337
- # "server_url": "https://mcp.deepwiki.com/mcp",
338
- # "require_approval": "never",
339
- # }
340
427
  res: dict[str, Any] = {
341
428
  "type": "mcp",
342
429
  "server_label": self.name,
@@ -349,16 +436,6 @@ class MCPServer(BaseModel):
349
436
  return res
350
437
 
351
438
  def for_anthropic(self):
352
- # return {
353
- # "type": "url",
354
- # "url": "https://example-server.modelcontextprotocol.io/sse",
355
- # "name": "example-mcp",
356
- # "tool_configuration": {
357
- # "enabled": true,
358
- # "allowed_tools": ["example_tool_1", "example_tool_2"]
359
- # },
360
- # "authorization_token": "YOUR_TOKEN"
361
- # }
362
439
  res: dict[str, Any] = {
363
440
  "type": "url",
364
441
  "url": self.url,
@@ -370,3 +447,15 @@ class MCPServer(BaseModel):
370
447
  res["tool_configuration"] = self.configuration
371
448
 
372
449
  return res
450
+
451
+ async def to_tools(self) -> list[Tool]:
452
+ """
453
+ Compatible with ALL providers.
454
+ Caches so we don't have to hit the server a ton of times.
455
+ """
456
+ if self._tools:
457
+ return self._tools
458
+ else:
459
+ tools: list[Tool] = await Tool.from_mcp(self.name, url=self.url)
460
+ self._tools = tools
461
+ return tools
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.21
3
+ Version: 0.0.23
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -1,30 +1,30 @@
1
1
  lm_deluge/__init__.py,sha256=mAztMuxINmh7dGbYnT8tsmw1eryQAvd0jpY8yHzd0EE,315
2
2
  lm_deluge/agent.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lm_deluge/batches.py,sha256=05t8UL1xCKjLRKtZLkfbexLqro6T_ufFVsaNIMk05Fw,17725
3
+ lm_deluge/batches.py,sha256=OflXh9AHxb3qB5x5oo8cI5uhQihxj75Nvf7QDes3xV8,18630
4
4
  lm_deluge/cache.py,sha256=VB1kv8rM2t5XWPR60uhszFcxLDnVKOe1oA5hYjVDjIo,4375
5
- lm_deluge/client.py,sha256=H7rNqq9pOeyoqK2PyHbHUCIOygxXWJmRNj4WQA8qawE,25850
5
+ lm_deluge/client.py,sha256=nAGMwdUPDVx-x23hZF6U5Yhug6Zf5FT27RHj_kj8nZk,26369
6
6
  lm_deluge/config.py,sha256=H1tQyJDNHGFuwxqQNL5Z-CjWAC0luHSBA3iY_pxmACM,932
7
7
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
8
8
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
9
9
  lm_deluge/file.py,sha256=zQH1STMjCG9pczO7Fk9Jw0_0Pj_8CogcdIxTe4J4AJw,5414
10
10
  lm_deluge/gemini_limits.py,sha256=V9mpS9JtXYz7AY6OuKyQp5TuIMRH1BVv9YrSNmGmHNA,1569
11
- lm_deluge/image.py,sha256=D8kMh2yu8sTuOchKpW9DE3XKbE6oUiFl9cRi6H1GpDc,7526
12
- lm_deluge/models.py,sha256=6ZCirxOpdcg_M24cKUABYbRpLK-r9dlkXxUS9aeh0UY,49657
13
- lm_deluge/prompt.py,sha256=T8o2hwv3RuxG7-fL5pCl0v14WVpmV09PdRzCZzLNszE,35265
14
- lm_deluge/request_context.py,sha256=l1DrPTtG80WtUhyDWblTiyT695K7Al9lWWDfdl6PMK0,2338
11
+ lm_deluge/image.py,sha256=Qpa0k5yXfrpSaHzVUwW_TEn7yEgmwzYGL17Sa7-KhSA,7729
12
+ lm_deluge/models.py,sha256=3vgI1BlfT4_Higev25QhhXJufQvsI6pd0yjF9YL0crA,49812
13
+ lm_deluge/prompt.py,sha256=cfwzCAmT-1K0v7SfEMUrxpBkJGgf7IFlWfNLJrCcoBM,37025
14
+ lm_deluge/request_context.py,sha256=0X-5m8BKn51rnnjzGDDXqbuSUEFGjdayirQjbvPcjMI,2425
15
15
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
16
- lm_deluge/tool.py,sha256=X6NDabz53BVe1pokaKCeTLCF1-AlMAxOn1_KWiCSb7c,12407
16
+ lm_deluge/tool.py,sha256=_coOKB9nPNVZoseMRumRyQ8BMR7_d0IlstzMHNT69JY,15732
17
17
  lm_deluge/tracker.py,sha256=-EkFDAklh5mclIFR-5SthAwNL4p1yKS8LUN7rhpOVPQ,9266
18
18
  lm_deluge/usage.py,sha256=VMEKghePFIID5JFBObqYxFpgYxnbYm_dnHy7V1-_T6M,4866
19
19
  lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
20
- lm_deluge/api_requests/anthropic.py,sha256=kBvvZBjWsYrw2hV7-i_w3Hr0YlXzoNLBs0Eabw8YvWY,8045
21
- lm_deluge/api_requests/base.py,sha256=O3-Dsl_hr-xtLTekPLdrNnL5mTTfnfsN6Fcwq0-eKMg,5355
22
- lm_deluge/api_requests/bedrock.py,sha256=kZtKp6GqF73RpmLJKzEj4XTbllB8Kyq9-QydUuh2iu0,10977
20
+ lm_deluge/api_requests/anthropic.py,sha256=Vi2iGQw6LGPW8d4X489Jg-dlEye529jf8i5YsM3bLYQ,8139
21
+ lm_deluge/api_requests/base.py,sha256=EVHNFtlttKbN7Tt1MnLaO-NjvKHPSV5CqlRv-OnpVAE,5593
22
+ lm_deluge/api_requests/bedrock.py,sha256=p6leW5stnvb406lwbmVFmfTGxdEI-t0GfaTExpol3qk,10900
23
23
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
24
- lm_deluge/api_requests/gemini.py,sha256=N_94TpjpBLyekdrBjax2w0jPqYf70JycaRgZUNSsAAY,7531
25
- lm_deluge/api_requests/mistral.py,sha256=5EqYZgu9AfGrWs5-ucr8uJK_0cMEoUKKlEjBV3O6EPc,4561
26
- lm_deluge/api_requests/openai.py,sha256=fEIBchry-tLqkf0fhdFsS3CIjXbB_AV39Ig-PwAsT1I,21424
27
- lm_deluge/api_requests/response.py,sha256=JFSwHAs-yaJYkscOgTAyHkt-v8FDZ5mgER9NmueXTGk,5866
24
+ lm_deluge/api_requests/gemini.py,sha256=UeA4gsIePsIu6IQQ7izR359LLQQTi0Qky_ykjH1XQf4,7461
25
+ lm_deluge/api_requests/mistral.py,sha256=S_LpOfCGbCVEROH_od3P-tYeNYTKFMamMTL-c_wFCBI,4597
26
+ lm_deluge/api_requests/openai.py,sha256=jb_pBGqSbqs7SvbA45Odu3JkwS3jsKn-p3hG8-qkYbc,21509
27
+ lm_deluge/api_requests/response.py,sha256=FtkVYk_rDH93Kj9pqbB-l7a4dQHzVr6ivKL9khYKLbs,5966
28
28
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
29
29
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
30
30
  lm_deluge/api_requests/deprecated/deepseek.py,sha256=FEApI93VAWDwuaqTooIyKMgONYqRhdUmiAPBRme-IYs,4582
@@ -48,8 +48,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
48
48
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
49
49
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
50
50
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
51
- lm_deluge-0.0.21.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
52
- lm_deluge-0.0.21.dist-info/METADATA,sha256=DNMP7Z4uanLqddXCRVnqXn_4eYmC8A15DuchzkO79n4,12978
53
- lm_deluge-0.0.21.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
54
- lm_deluge-0.0.21.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
55
- lm_deluge-0.0.21.dist-info/RECORD,,
51
+ lm_deluge-0.0.23.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
52
+ lm_deluge-0.0.23.dist-info/METADATA,sha256=AFWuOPqIKF8GE5qyw9uPjzpOTMy74i5ED1t3MqwOUcc,12978
53
+ lm_deluge-0.0.23.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
54
+ lm_deluge-0.0.23.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
55
+ lm_deluge-0.0.23.dist-info/RECORD,,