lm-deluge 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

lm_deluge/prompt.py CHANGED
@@ -4,14 +4,14 @@ import tiktoken
4
4
  import xxhash
5
5
  from dataclasses import dataclass, field
6
6
  from pathlib import Path
7
- from typing import Literal, Sequence
7
+ from typing import Literal
8
8
  from lm_deluge.models import APIModel
9
9
  from lm_deluge.image import Image
10
10
 
11
11
  ###############################################################################
12
12
  # 1. Low-level content blocks – either text or an image #
13
13
  ###############################################################################
14
- Role = Literal["system", "user", "assistant"]
14
+ Role = Literal["system", "user", "assistant", "tool"]
15
15
 
16
16
 
17
17
  @dataclass(slots=True)
@@ -40,18 +40,235 @@ class Text:
40
40
  return {"type": "text", "text": self.text}
41
41
 
42
42
 
43
+ @dataclass(slots=True)
44
+ class ToolCall:
45
+ id: str # unique identifier
46
+ name: str # function name
47
+ arguments: dict # parsed arguments
48
+ type: str = field(init=False, default="tool_call")
49
+
50
+ @property
51
+ def fingerprint(self) -> str:
52
+ return xxhash.xxh64(
53
+ f"{self.id}:{self.name}:{json.dumps(self.arguments, sort_keys=True)}".encode()
54
+ ).hexdigest()
55
+
56
+ # ── provider-specific emission ────────────────────────────────────────────
57
+ def oa_chat(self) -> dict: # OpenAI Chat Completions
58
+ return {
59
+ "id": self.id,
60
+ "type": "function",
61
+ "function": {"name": self.name, "arguments": json.dumps(self.arguments)},
62
+ }
63
+
64
+ def oa_resp(self) -> dict: # OpenAI Responses
65
+ return {
66
+ "type": "function_call",
67
+ "id": self.id,
68
+ "name": self.name,
69
+ "arguments": self.arguments,
70
+ }
71
+
72
+ def anthropic(self) -> dict: # Anthropic Messages
73
+ return {
74
+ "type": "tool_use",
75
+ "id": self.id,
76
+ "name": self.name,
77
+ "input": self.arguments,
78
+ }
79
+
80
+ def gemini(self) -> dict:
81
+ return {"functionCall": {"name": self.name, "args": self.arguments}}
82
+
83
+ def mistral(self) -> dict:
84
+ return {
85
+ "type": "tool_call",
86
+ "id": self.id,
87
+ "function": {"name": self.name, "arguments": json.dumps(self.arguments)},
88
+ }
89
+
90
+
91
+ @dataclass(slots=True)
92
+ class ToolResult:
93
+ tool_call_id: str # references the ToolCall.id
94
+ result: str # tool execution result
95
+ type: str = field(init=False, default="tool_result")
96
+
97
+ @property
98
+ def fingerprint(self) -> str:
99
+ return xxhash.xxh64(f"{self.tool_call_id}:{self.result}".encode()).hexdigest()
100
+
101
+ # ── provider-specific emission ────────────────────────────────────────────
102
+ def oa_chat(
103
+ self,
104
+ ) -> dict: # OpenAI Chat Completions - tool results are separate messages
105
+ return {"tool_call_id": self.tool_call_id, "content": self.result}
106
+
107
+ def oa_resp(self) -> dict: # OpenAI Responses
108
+ return {
109
+ "type": "function_result",
110
+ "call_id": self.tool_call_id,
111
+ "result": self.result,
112
+ }
113
+
114
+ def anthropic(self) -> dict: # Anthropic Messages
115
+ return {
116
+ "type": "tool_result",
117
+ "tool_use_id": self.tool_call_id,
118
+ "content": self.result,
119
+ }
120
+
121
+ def gemini(self) -> dict:
122
+ return {
123
+ "functionResponse": {
124
+ "name": self.tool_call_id, # Gemini uses name field for ID
125
+ "response": {"result": self.result},
126
+ }
127
+ }
128
+
129
+ def mistral(self) -> dict:
130
+ return {
131
+ "type": "tool_result",
132
+ "tool_call_id": self.tool_call_id,
133
+ "content": self.result,
134
+ }
135
+
136
+
137
+ @dataclass(slots=True)
138
+ class Thinking:
139
+ content: str # reasoning content (o1, Claude thinking, etc.)
140
+ type: str = field(init=False, default="thinking")
141
+
142
+ @property
143
+ def fingerprint(self) -> str:
144
+ return xxhash.xxh64(self.content.encode()).hexdigest()
145
+
146
+ # ── provider-specific emission ────────────────────────────────────────────
147
+ def oa_chat(self) -> dict: # OpenAI Chat Completions
148
+ # Thinking is typically not emitted back, but if needed:
149
+ return {"type": "text", "text": f"[Thinking: {self.content}]"}
150
+
151
+ def oa_resp(self) -> dict: # OpenAI Responses
152
+ return {"type": "reasoning", "content": self.content}
153
+
154
+ def anthropic(self) -> dict: # Anthropic Messages
155
+ return {"type": "thinking", "thinking": self.content}
156
+
157
+ def gemini(self) -> dict:
158
+ return {"text": f"[Thinking: {self.content}]"}
159
+
160
+ def mistral(self) -> dict:
161
+ return {"type": "text", "text": f"[Thinking: {self.content}]"}
162
+
163
+
164
+ Part = Text | Image | ToolCall | ToolResult | Thinking
165
+
166
+
43
167
  ###############################################################################
44
168
  # 2. One conversational turn (role + parts) #
45
169
  ###############################################################################
46
170
  @dataclass(slots=True)
47
171
  class Message:
48
172
  role: Role
49
- parts: list[Text | Image]
173
+ parts: list[Part]
50
174
 
51
175
  @property
52
176
  def fingerprint(self) -> str:
53
177
  return self.role + "," + ",".join(part.fingerprint for part in self.parts)
54
178
 
179
+ @property
180
+ def completion(self) -> str | None:
181
+ """Extract text content from the first Text part, for backward compatibility."""
182
+ for part in self.parts:
183
+ if isinstance(part, Text):
184
+ return part.text
185
+ return None
186
+
187
+ @property
188
+ def tool_calls(self) -> list["ToolCall"]:
189
+ """Get all tool call parts with proper typing."""
190
+ return [part for part in self.parts if part.type == "tool_call"] # type: ignore
191
+
192
+ @property
193
+ def tool_results(self) -> list["ToolResult"]:
194
+ """Get all tool result parts with proper typing."""
195
+ return [part for part in self.parts if part.type == "tool_result"] # type: ignore
196
+
197
+ @property
198
+ def text_parts(self) -> list["Text"]:
199
+ """Get all text parts with proper typing."""
200
+ return [part for part in self.parts if part.type == "text"] # type: ignore
201
+
202
+ @property
203
+ def images(self) -> list[Image]:
204
+ """Get all image parts with proper typing."""
205
+ return [part for part in self.parts if part.type == "image"] # type: ignore
206
+
207
+ @property
208
+ def thinking_parts(self) -> list["Thinking"]:
209
+ """Get all thinking parts with proper typing."""
210
+ return [part for part in self.parts if part.type == "thinking"] # type: ignore
211
+
212
+ def to_log(self) -> dict:
213
+ """
214
+ Return a JSON-serialisable dict that fully captures the message.
215
+ """
216
+ content_blocks: list[dict] = []
217
+ for p in self.parts:
218
+ if isinstance(p, Text):
219
+ content_blocks.append({"type": "text", "text": p.text})
220
+ elif isinstance(p, Image): # Image – redact the bytes, keep a hint
221
+ w, h = p.size
222
+ content_blocks.append({"type": "image", "tag": f"<Image ({w}×{h})>"})
223
+ elif isinstance(p, ToolCall):
224
+ content_blocks.append(
225
+ {
226
+ "type": "tool_call",
227
+ "id": p.id,
228
+ "name": p.name,
229
+ "arguments": p.arguments,
230
+ }
231
+ )
232
+ elif isinstance(p, ToolResult):
233
+ content_blocks.append(
234
+ {
235
+ "type": "tool_result",
236
+ "tool_call_id": p.tool_call_id,
237
+ "result": p.result,
238
+ }
239
+ )
240
+ elif isinstance(p, Thinking):
241
+ content_blocks.append({"type": "thinking", "content": p.content})
242
+
243
+ return {"role": self.role, "content": content_blocks}
244
+
245
+ @classmethod
246
+ def from_log(cls, data: dict) -> "Message":
247
+ """Re-hydrate a Message previously produced by `to_log()`."""
248
+ role: Role = data["role"]
249
+ parts: list[Part] = []
250
+
251
+ for p in data["content"]:
252
+ if p["type"] == "text":
253
+ parts.append(Text(p["text"]))
254
+ elif p["type"] == "image":
255
+ # We only stored a placeholder tag, so keep that placeholder.
256
+ parts.append(Image(p["tag"], detail="low"))
257
+ elif p["type"] == "tool_call":
258
+ parts.append(
259
+ ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
260
+ )
261
+ elif p["type"] == "tool_result":
262
+ parts.append(
263
+ ToolResult(tool_call_id=p["tool_call_id"], result=p["result"])
264
+ )
265
+ elif p["type"] == "thinking":
266
+ parts.append(Thinking(content=p["content"]))
267
+ else:
268
+ raise ValueError(f"Unknown part type {p['type']!r}")
269
+
270
+ return cls(role, parts)
271
+
55
272
  def add_text(self, content: str) -> "Message":
56
273
  """Append a text block and return self for chaining."""
57
274
  self.parts.append(Text(content))
@@ -81,6 +298,21 @@ class Message:
81
298
  self.parts.append(img)
82
299
  return self
83
300
 
301
+ def add_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
302
+ """Append a tool call block and return self for chaining."""
303
+ self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
304
+ return self
305
+
306
+ def add_tool_result(self, tool_call_id: str, result: str) -> "Message":
307
+ """Append a tool result block and return self for chaining."""
308
+ self.parts.append(ToolResult(tool_call_id=tool_call_id, result=result))
309
+ return self
310
+
311
+ def add_thinking(self, content: str) -> "Message":
312
+ """Append a thinking block and return self for chaining."""
313
+ self.parts.append(Thinking(content=content))
314
+ return self
315
+
84
316
  # -------- convenient constructors --------
85
317
  @classmethod
86
318
  def user(
@@ -118,16 +350,32 @@ class Message:
118
350
  if msg["role"] in ["developer", "system"]
119
351
  else ("user" if msg["role"] == "user" else "assistant")
120
352
  )
121
- parts: Sequence[Text | Image] = []
353
+ parts: list[Part] = []
122
354
  content = msg["content"]
123
355
  if isinstance(content, str):
124
- parts.append(Text(content))
356
+ parts = [Text(content)]
125
357
  else:
358
+ part_list = []
126
359
  for item in content:
127
360
  if item["type"] == "text":
128
- parts.append(Text(item["text"]))
361
+ part_list.append(Text(item["text"]))
129
362
  elif item["type"] == "image_url":
130
- parts.append(Image(data=item["image_url"]["url"]))
363
+ part_list.append(Image(data=item["image_url"]["url"]))
364
+ parts = part_list
365
+
366
+ # Handle tool calls (assistant messages)
367
+ if "tool_calls" in msg:
368
+ part_list = list(parts) if parts else []
369
+ for tool_call in msg["tool_calls"]:
370
+ part_list.append(
371
+ ToolCall(
372
+ id=tool_call["id"],
373
+ name=tool_call["function"]["name"],
374
+ arguments=json.loads(tool_call["function"]["arguments"]),
375
+ )
376
+ )
377
+ parts = part_list
378
+
131
379
  return cls(role, parts)
132
380
 
133
381
  @classmethod
@@ -140,10 +388,35 @@ class Message:
140
388
 
141
389
  # ───── provider-specific emission ─────
142
390
  def oa_chat(self) -> dict:
143
- content = []
144
- for p in self.parts:
145
- content.append(p.oa_chat())
146
- return {"role": self.role, "content": content}
391
+ if self.role == "tool":
392
+ # For tool messages, we expect a single ToolResult part (after splitting in to_openai)
393
+ tool_results = [p for p in self.parts if isinstance(p, ToolResult)]
394
+ if len(tool_results) == 1:
395
+ tool_result = tool_results[0]
396
+ return {
397
+ "role": "tool",
398
+ "tool_call_id": tool_result.tool_call_id,
399
+ "content": tool_result.result,
400
+ }
401
+ else:
402
+ raise ValueError(
403
+ f"Tool role messages must contain exactly one ToolResult part for OpenAI, got {len(tool_results)}"
404
+ )
405
+ else:
406
+ content = []
407
+ tool_calls = []
408
+
409
+ for p in self.parts:
410
+ if isinstance(p, ToolCall):
411
+ tool_calls.append(p.oa_chat())
412
+ else:
413
+ content.append(p.oa_chat())
414
+
415
+ result = {"role": self.role, "content": content}
416
+ if tool_calls:
417
+ result["tool_calls"] = tool_calls
418
+
419
+ return result
147
420
 
148
421
  def oa_resp(self) -> dict:
149
422
  content = [p.oa_resp() for p in self.parts]
@@ -155,7 +428,7 @@ class Message:
155
428
  raise ValueError("Anthropic keeps system outside message list")
156
429
  content = [p.anthropic() for p in self.parts]
157
430
  # Shortcut: single text becomes a bare string
158
- if len(content) == 1 and content[0]["type"] == "text":
431
+ if len(content) == 1 and content[0].get("type") == "text":
159
432
  content = content[0]["text"]
160
433
  return {"role": self.role, "content": content}
161
434
 
@@ -210,9 +483,34 @@ class Conversation:
210
483
  self.messages.append(msg)
211
484
  return self
212
485
 
486
+ def add_tool_result(self, tool_call_id: str, result: str) -> "Conversation":
487
+ """Add a tool result to the conversation.
488
+
489
+ If the conversation ends with a tool message, append to it (for parallel tool calls).
490
+ Otherwise, create a new tool message.
491
+ """
492
+ if self.messages and self.messages[-1].role == "tool":
493
+ # Append to existing tool message (parallel tool calls)
494
+ self.messages[-1].add_tool_result(tool_call_id, result)
495
+ else:
496
+ # Create new tool message
497
+ tool_msg = Message("tool", [])
498
+ tool_msg.add_tool_result(tool_call_id, result)
499
+ self.messages.append(tool_msg)
500
+ return self
501
+
213
502
  # ── conversions -----------------------------------------------------------
214
503
  def to_openai(self) -> list[dict]:
215
- return [m.oa_chat() for m in self.messages]
504
+ result = []
505
+ for m in self.messages:
506
+ if m.role == "tool" and len(m.tool_results) > 1:
507
+ # Split tool messages with multiple results into separate messages for OpenAI
508
+ for tool_result in m.tool_results:
509
+ tool_msg = Message("tool", [tool_result])
510
+ result.append(tool_msg.oa_chat())
511
+ else:
512
+ result.append(m.oa_chat())
513
+ return result
216
514
 
217
515
  def to_openai_responses(self) -> dict:
218
516
  # OpenAI Responses = single “input” array, role must be user/assistant
@@ -227,7 +525,16 @@ class Conversation:
227
525
  ),
228
526
  None,
229
527
  )
230
- other = [m.anthropic() for m in self.messages if m.role != "system"]
528
+ other = []
529
+ for m in self.messages:
530
+ if m.role == "system":
531
+ continue
532
+ elif m.role == "tool":
533
+ # Convert tool messages to user messages for Anthropic
534
+ user_msg = Message("user", m.parts)
535
+ other.append(user_msg.anthropic())
536
+ else:
537
+ other.append(m.anthropic())
231
538
  return system_msg, other
232
539
 
233
540
  def to_gemini(self) -> tuple[str | None, list[dict]]:
@@ -295,11 +602,30 @@ class Conversation:
295
602
  for p in msg.parts:
296
603
  if isinstance(p, Text):
297
604
  content_blocks.append({"type": "text", "text": p.text})
298
- else: # Image – redact the bytes, keep a hint
605
+ elif isinstance(p, Image): # Image – redact the bytes, keep a hint
299
606
  w, h = p.size
300
607
  content_blocks.append(
301
608
  {"type": "image", "tag": f"<Image ({w}×{h})>"}
302
609
  )
610
+ elif isinstance(p, ToolCall):
611
+ content_blocks.append(
612
+ {
613
+ "type": "tool_call",
614
+ "id": p.id,
615
+ "name": p.name,
616
+ "arguments": p.arguments,
617
+ }
618
+ )
619
+ elif isinstance(p, ToolResult):
620
+ content_blocks.append(
621
+ {
622
+ "type": "tool_result",
623
+ "tool_call_id": p.tool_call_id,
624
+ "result": p.result,
625
+ }
626
+ )
627
+ elif isinstance(p, Thinking):
628
+ content_blocks.append({"type": "thinking", "content": p.content})
303
629
  serialized.append({"role": msg.role, "content": content_blocks})
304
630
 
305
631
  return {"messages": serialized}
@@ -311,7 +637,7 @@ class Conversation:
311
637
 
312
638
  for m in payload.get("messages", []):
313
639
  role: Role = m["role"] # 'system' | 'user' | 'assistant'
314
- parts: list[Text | Image] = []
640
+ parts: list[Text | Image | ToolCall | ToolResult | Thinking] = []
315
641
 
316
642
  for p in m["content"]:
317
643
  if p["type"] == "text":
@@ -320,6 +646,16 @@ class Conversation:
320
646
  # We only stored a placeholder tag, so keep that placeholder.
321
647
  # You could raise instead if real image bytes are required.
322
648
  parts.append(Image(p["tag"], detail="low"))
649
+ elif p["type"] == "tool_call":
650
+ parts.append(
651
+ ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
652
+ )
653
+ elif p["type"] == "tool_result":
654
+ parts.append(
655
+ ToolResult(tool_call_id=p["tool_call_id"], result=p["result"])
656
+ )
657
+ elif p["type"] == "thinking":
658
+ parts.append(Thinking(content=p["content"]))
323
659
  else:
324
660
  raise ValueError(f"Unknown part type {p['type']!r}")
325
661
 
lm_deluge/tool.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Literal, Callable
1
+ from typing import Any, Literal, Callable
2
2
  from pydantic import BaseModel, Field
3
3
 
4
4
 
@@ -9,7 +9,7 @@ class ToolSpec(BaseModel):
9
9
 
10
10
  name: str
11
11
  description: str
12
- parameters: Dict[str, Any]
12
+ parameters: dict[str, Any]
13
13
  required: list[str] = Field(default_factory=list)
14
14
  additionalProperties: bool | None = None # only
15
15
  # if desired, can provide a callable to run the tool
@@ -20,21 +20,20 @@ class ToolSpec(BaseModel):
20
20
  raise ValueError("No run function provided")
21
21
  return self.run(**kwargs)
22
22
 
23
- def _json_schema(self, include_additional_properties=False) -> Dict[str, Any]:
24
- return {
23
+ def _json_schema(self, include_additional_properties=False) -> dict[str, Any]:
24
+ res = {
25
25
  "type": "object",
26
26
  "properties": self.parameters,
27
- "required": self.required or [],
28
- **(
29
- {"additionalProperties": self.additionalProperties}
30
- if self.additionalProperties is not None
31
- and include_additional_properties
32
- else {}
33
- ),
27
+ # for openai all must be required
28
+ "required": list(self.parameters.keys()),
34
29
  }
30
+ if include_additional_properties:
31
+ res["additionalProperties"] = False
32
+
33
+ return res
35
34
 
36
35
  # ---------- dumpers ----------
37
- def for_openai_responses(self) -> Dict[str, Any]:
36
+ def for_openai_responses(self) -> dict[str, Any]:
38
37
  return {
39
38
  "type": "function",
40
39
  "name": self.name,
@@ -42,25 +41,25 @@ class ToolSpec(BaseModel):
42
41
  "parameters": self._json_schema(include_additional_properties=True),
43
42
  }
44
43
 
45
- def for_openai_completions(self, *, strict: bool = True) -> Dict[str, Any]:
44
+ def for_openai_completions(self, *, strict: bool = True) -> dict[str, Any]:
46
45
  return {
47
46
  "type": "function",
48
47
  "function": {
49
48
  "name": self.name,
50
49
  "description": self.description,
51
- "parameters": self._json_schema(),
50
+ "parameters": self._json_schema(include_additional_properties=True),
52
51
  "strict": strict,
53
52
  },
54
53
  }
55
54
 
56
- def for_anthropic(self) -> Dict[str, Any]:
55
+ def for_anthropic(self) -> dict[str, Any]:
57
56
  return {
58
57
  "name": self.name,
59
58
  "description": self.description,
60
59
  "input_schema": self._json_schema(),
61
60
  }
62
61
 
63
- def for_google(self) -> Dict[str, Any]:
62
+ def for_google(self) -> dict[str, Any]:
64
63
  """
65
64
  Shape used by google.genai docs.
66
65
  """
@@ -76,7 +75,7 @@ class ToolSpec(BaseModel):
76
75
  "openai-responses", "openai-completions", "anthropic", "google"
77
76
  ],
78
77
  **kw,
79
- ) -> Dict[str, Any]:
78
+ ) -> dict[str, Any]:
80
79
  if provider == "openai-responses":
81
80
  return self.for_openai_responses()
82
81
  if provider == "openai-completions":
@@ -86,21 +85,3 @@ class ToolSpec(BaseModel):
86
85
  if provider == "google":
87
86
  return self.for_google()
88
87
  raise ValueError(provider)
89
-
90
-
91
- # ---- computer tools (for non-CUA models) ----
92
- _BUTTONS = ["left", "right", "wheel", "back", "forward"]
93
-
94
- # --- helpers ----
95
- _COORD_OBJECT = {
96
- "type": "object",
97
- "properties": {
98
- "x": {"type": "integer", "description": "X-coordinate in pixels"},
99
- "y": {"type": "integer", "description": "Y-coordinate in pixels"},
100
- },
101
- "required": ["x", "y"],
102
- }
103
-
104
-
105
- def _coord_field(desc: str):
106
- return {"type": "integer", "description": desc}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.8
3
+ Version: 0.0.9
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -1,22 +1,23 @@
1
1
  lm_deluge/__init__.py,sha256=rndOr4Rcfnpttz-onWU3vVEm-MM0WDFgz6KexKPAx0k,222
2
2
  lm_deluge/cache.py,sha256=VB1kv8rM2t5XWPR60uhszFcxLDnVKOe1oA5hYjVDjIo,4375
3
- lm_deluge/client.py,sha256=ERH0SkNvdM1zc8HYS5dxRGxVxUb4CXpUhveG3mz-w2I,28533
3
+ lm_deluge/client.py,sha256=rk0YAUvC5kF3OuxnmYlkT8I6NjAmEJ6TUfryQbRkmXw,28960
4
4
  lm_deluge/embed.py,sha256=m-X8UK4gV9KKD7Wv3yarAceMQaj7gR1JwzD_sB0MOQY,13183
5
5
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
6
6
  lm_deluge/gemini_limits.py,sha256=V9mpS9JtXYz7AY6OuKyQp5TuIMRH1BVv9YrSNmGmHNA,1569
7
7
  lm_deluge/image.py,sha256=hFbRajqEVQbkirAfOxsTPkeq-27Zl-so4AWBFeUbpBI,7161
8
- lm_deluge/models.py,sha256=w_OqA4Jxcy8LCCcdPRsGzg8iLFv4S9fPS5b4oj82Bgs,42778
9
- lm_deluge/prompt.py,sha256=bhDAlfUQ_Fq6Wh-L9jOfoiMbDGyVKGkjGicnwKJWpcI,12680
8
+ lm_deluge/models.py,sha256=qdUsHC1kz82ZwduUU_rBcvJP4j2JXCL3Q2RtbHWc1H8,44998
9
+ lm_deluge/prompt.py,sha256=_pJYwgjL39lDzMNmae8pPIBoORm_ekSM_9qU2iGGpOc,25445
10
10
  lm_deluge/rerank.py,sha256=tW1c3gQCAqaF8Ez-r-4qxYAcdKqxnLMxwHApKOUKwk4,11289
11
11
  lm_deluge/sampling_params.py,sha256=E2kewh1vz-1Qcy5xNBCzihfGgT_GcHYMfzaWb3FLiXs,739
12
- lm_deluge/tool.py,sha256=RVUW3E3FW11jCM-R7pIL1GpRs1YKCOjvTkL1D5xPetk,3196
12
+ lm_deluge/tool.py,sha256=zXletfGtpgBCXuqietZn-eaOItbIyOROskTbaSjfwEk,2701
13
13
  lm_deluge/tracker.py,sha256=Dk99scN_NeDEO0gkLO5efXiZq11Ga-k6cerUHWN7IWY,1292
14
14
  lm_deluge/api_requests/__init__.py,sha256=_aSpD6CJL9g6OpLPoChXiHjl4MH_OlGcKgfZaW8cgLM,71
15
- lm_deluge/api_requests/anthropic.py,sha256=URbiD-ANn_P3StFJVP2JoDWuoloZVsAUly8CGSyV2Kw,6618
16
- lm_deluge/api_requests/base.py,sha256=Yt5Bxd5C5mZrbAMQYDghk0KRhUChSbTEsVI8DoThZBs,14805
17
- lm_deluge/api_requests/common.py,sha256=EjwTnKrvgBx-HnRVt0kSJZ9RM7CM-QyhlIQkr1jxP-4,220
18
- lm_deluge/api_requests/mistral.py,sha256=ThlV1jBfhpAwkaqPKhdUq-lIq2OienRbhEaSK4cctvI,5370
19
- lm_deluge/api_requests/openai.py,sha256=YgJMUio23ks6VLv6BDBZAW6Bnfd2fpidSidaHXzyXFY,6135
15
+ lm_deluge/api_requests/anthropic.py,sha256=MMI_w9hVbevQpcqP3NVVindpTmLb2KHqjJQpIzCi5RM,7240
16
+ lm_deluge/api_requests/base.py,sha256=w0MEOCIccxxy2c67Y2Y-QBox9rinIxQ7MLnp8953sjQ,15954
17
+ lm_deluge/api_requests/bedrock.py,sha256=cvB85BFvL9HKTUsP9qFUCLQzJh83IQNAcLXuW6ReZK8,10520
18
+ lm_deluge/api_requests/common.py,sha256=U0mX_wC3Tzg2-1u9nYUCTQqYzuYJqvLrICCNW_dbbJM,287
19
+ lm_deluge/api_requests/mistral.py,sha256=gCi4R61oh759ZX6TKrT-fnQwIQaOGcPXhWrDsjJwPOY,5388
20
+ lm_deluge/api_requests/openai.py,sha256=BuMiM_2zJQXfnUjTT94JxJi3ZX5V-KQQueRG-R0SGuc,7361
20
21
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
21
22
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
22
23
  lm_deluge/api_requests/deprecated/deepseek.py,sha256=FEApI93VAWDwuaqTooIyKMgONYqRhdUmiAPBRme-IYs,4582
@@ -30,8 +31,8 @@ lm_deluge/util/json.py,sha256=dCeG9j1D17rXmQJbKJH79X0CGof4Wlqd55TDg4D6ky8,5388
30
31
  lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11768
31
32
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
32
33
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
33
- lm_deluge-0.0.8.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
34
- lm_deluge-0.0.8.dist-info/METADATA,sha256=sRRWcI9rQ0BlCENlRF6EdY-eJY-p9CTAFmak8tstGOM,8076
35
- lm_deluge-0.0.8.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
36
- lm_deluge-0.0.8.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
37
- lm_deluge-0.0.8.dist-info/RECORD,,
34
+ lm_deluge-0.0.9.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
35
+ lm_deluge-0.0.9.dist-info/METADATA,sha256=hIv-9R30IJXuh6AHR0pZkktvsbSihThOuu9D9AniKIg,8076
36
+ lm_deluge-0.0.9.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
37
+ lm_deluge-0.0.9.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
38
+ lm_deluge-0.0.9.dist-info/RECORD,,