lm-deluge 0.0.34__py3-none-any.whl → 0.0.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

@@ -0,0 +1,318 @@
1
+ OPENAI_MODELS = {
2
+ # ███████ █████████ █████
3
+ # ███░░░░░███ ███░░░░░███ ░░███
4
+ # ███ ░░███ ████████ ██████ ████████ ░███ ░███ ░███
5
+ # ░███ ░███░░███░░███ ███░░███░░███░░███ ░███████████ ░███
6
+ # ░███ ░███ ░███ ░███░███████ ░███ ░███ ░███░░░░░███ ░███
7
+ # ░░███ ███ ░███ ░███░███░░░ ░███ ░███ ░███ ░███ ░███
8
+ # ░░░███████░ ░███████ ░░██████ ████ █████ █████ █████ █████
9
+ # ░░░░░░░ ░███░░░ ░░░░░░ ░░░░ ░░░░░ ░░░░░ ░░░░░ ░░░░░
10
+ # ░███
11
+ # █████
12
+ # ░░░░░
13
+ "gpt-5": {
14
+ "id": "gpt-5",
15
+ "name": "gpt-5",
16
+ "api_base": "https://api.openai.com/v1",
17
+ "api_key_env_var": "OPENAI_API_KEY",
18
+ "supports_json": False,
19
+ "supports_logprobs": True,
20
+ "supports_responses": True,
21
+ "api_spec": "openai",
22
+ "input_cost": 1.25,
23
+ "cached_input_cost": 0.125,
24
+ "output_cost": 10.0,
25
+ "reasoning_model": True,
26
+ },
27
+ "gpt-5-chat": {
28
+ "id": "gpt-5-chat",
29
+ "name": "gpt-5-chat-latest",
30
+ "api_base": "https://api.openai.com/v1",
31
+ "api_key_env_var": "OPENAI_API_KEY",
32
+ "supports_json": False,
33
+ "supports_logprobs": True,
34
+ "supports_responses": True,
35
+ "api_spec": "openai",
36
+ "input_cost": 1.25,
37
+ "cached_input_cost": 0.125,
38
+ "output_cost": 10.0,
39
+ "reasoning_model": False,
40
+ },
41
+ "gpt-5-mini": {
42
+ "id": "gpt-5-mini",
43
+ "name": "gpt-5-mini",
44
+ "api_base": "https://api.openai.com/v1",
45
+ "api_key_env_var": "OPENAI_API_KEY",
46
+ "supports_json": False,
47
+ "supports_logprobs": True,
48
+ "supports_responses": True,
49
+ "api_spec": "openai",
50
+ "input_cost": 0.25,
51
+ "cached_input_cost": 0.025,
52
+ "output_cost": 2.0,
53
+ "reasoning_model": True,
54
+ },
55
+ "gpt-5-nano": {
56
+ "id": "gpt-5-nano",
57
+ "name": "gpt-5-nano",
58
+ "api_base": "https://api.openai.com/v1",
59
+ "api_key_env_var": "OPENAI_API_KEY",
60
+ "supports_json": False,
61
+ "supports_logprobs": True,
62
+ "supports_responses": True,
63
+ "api_spec": "openai",
64
+ "input_cost": 0.05,
65
+ "cached_input_cost": 0.005,
66
+ "output_cost": 0.40,
67
+ "reasoning_model": True,
68
+ },
69
+ "openai-computer-use-preview": {
70
+ "id": "openai-computer-use-preview",
71
+ "name": "computer-use-preview",
72
+ "api_base": "https://api.openai.com/v1",
73
+ "api_key_env_var": "OPENAI_API_KEY",
74
+ "supports_json": True,
75
+ "supports_logprobs": False,
76
+ "supports_responses": True,
77
+ "api_spec": "openai",
78
+ "input_cost": 2.0,
79
+ "output_cost": 8.0,
80
+ "requests_per_minute": 20,
81
+ "tokens_per_minute": 100_000,
82
+ "reasoning_model": False,
83
+ },
84
+ "o3": {
85
+ "id": "o3",
86
+ "name": "o3-2025-04-16",
87
+ "api_base": "https://api.openai.com/v1",
88
+ "api_key_env_var": "OPENAI_API_KEY",
89
+ "supports_json": False,
90
+ "supports_logprobs": True,
91
+ "supports_responses": True,
92
+ "api_spec": "openai",
93
+ "input_cost": 10.0,
94
+ "output_cost": 40.0,
95
+ "requests_per_minute": 20,
96
+ "tokens_per_minute": 100_000,
97
+ "reasoning_model": True,
98
+ },
99
+ "o4-mini": {
100
+ "id": "o4-mini",
101
+ "name": "o4-mini-2025-04-16",
102
+ "api_base": "https://api.openai.com/v1",
103
+ "api_key_env_var": "OPENAI_API_KEY",
104
+ "supports_json": False,
105
+ "supports_logprobs": True,
106
+ "supports_responses": True,
107
+ "api_spec": "openai",
108
+ "input_cost": 1.1,
109
+ "output_cost": 4.4,
110
+ "requests_per_minute": 20,
111
+ "tokens_per_minute": 100_000,
112
+ "reasoning_model": True,
113
+ },
114
+ "gpt-4.1": {
115
+ "id": "gpt-4.1",
116
+ "name": "gpt-4.1-2025-04-14",
117
+ "api_base": "https://api.openai.com/v1",
118
+ "api_key_env_var": "OPENAI_API_KEY",
119
+ "supports_json": True,
120
+ "supports_logprobs": True,
121
+ "supports_responses": True,
122
+ "api_spec": "openai",
123
+ "input_cost": 2.0,
124
+ "output_cost": 8.0,
125
+ "requests_per_minute": 20,
126
+ "tokens_per_minute": 100_000,
127
+ "reasoning_model": False,
128
+ },
129
+ "gpt-4.1-mini": {
130
+ "id": "gpt-4.1-mini",
131
+ "name": "gpt-4.1-mini-2025-04-14",
132
+ "api_base": "https://api.openai.com/v1",
133
+ "api_key_env_var": "OPENAI_API_KEY",
134
+ "supports_json": True,
135
+ "supports_logprobs": True,
136
+ "supports_responses": True,
137
+ "api_spec": "openai",
138
+ "input_cost": 0.4,
139
+ "output_cost": 1.6,
140
+ "requests_per_minute": 20,
141
+ "tokens_per_minute": 100_000,
142
+ "reasoning_model": False,
143
+ },
144
+ "gpt-4.1-nano": {
145
+ "id": "gpt-4.1-nano",
146
+ "name": "gpt-4.1-nano-2025-04-14",
147
+ "api_base": "https://api.openai.com/v1",
148
+ "api_key_env_var": "OPENAI_API_KEY",
149
+ "supports_json": True,
150
+ "supports_logprobs": True,
151
+ "supports_responses": True,
152
+ "api_spec": "openai",
153
+ "input_cost": 0.1,
154
+ "output_cost": 0.4,
155
+ "requests_per_minute": 20,
156
+ "tokens_per_minute": 100_000,
157
+ "reasoning_model": False,
158
+ },
159
+ "gpt-4.5": {
160
+ "id": "gpt-4.5",
161
+ "name": "gpt-4.5-preview-2025-02-27",
162
+ "api_base": "https://api.openai.com/v1",
163
+ "api_key_env_var": "OPENAI_API_KEY",
164
+ "supports_json": False,
165
+ "supports_logprobs": True,
166
+ "supports_responses": True,
167
+ "api_spec": "openai",
168
+ "input_cost": 75.0,
169
+ "output_cost": 150.0,
170
+ "requests_per_minute": 20,
171
+ "tokens_per_minute": 100_000,
172
+ "reasoning_model": False,
173
+ },
174
+ "o3-mini": {
175
+ "id": "o3-mini",
176
+ "name": "o3-mini-2025-01-31",
177
+ "api_base": "https://api.openai.com/v1",
178
+ "api_key_env_var": "OPENAI_API_KEY",
179
+ "supports_json": False,
180
+ "supports_logprobs": True,
181
+ "supports_responses": True,
182
+ "api_spec": "openai",
183
+ "input_cost": 1.1,
184
+ "output_cost": 4.4,
185
+ "requests_per_minute": 20,
186
+ "tokens_per_minute": 100_000,
187
+ "reasoning_model": True,
188
+ },
189
+ "o1": {
190
+ "id": "o1",
191
+ "name": "o1-2024-12-17",
192
+ "api_base": "https://api.openai.com/v1",
193
+ "api_key_env_var": "OPENAI_API_KEY",
194
+ "supports_json": False,
195
+ "supports_logprobs": True,
196
+ "supports_responses": True,
197
+ "api_spec": "openai",
198
+ "input_cost": 15.0,
199
+ "output_cost": 60.0,
200
+ "requests_per_minute": 20,
201
+ "tokens_per_minute": 100_000,
202
+ "reasoning_model": True,
203
+ },
204
+ "o1-preview": {
205
+ "id": "o1-preview",
206
+ "name": "o1-preview-2024-09-12",
207
+ "api_base": "https://api.openai.com/v1",
208
+ "api_key_env_var": "OPENAI_API_KEY",
209
+ "supports_json": False,
210
+ "supports_logprobs": True,
211
+ "supports_responses": True,
212
+ "api_spec": "openai",
213
+ "input_cost": 15.0,
214
+ "output_cost": 60.0,
215
+ "requests_per_minute": 20,
216
+ "tokens_per_minute": 100_000,
217
+ "reasoning_model": True,
218
+ },
219
+ "o1-mini": {
220
+ "id": "o1-mini",
221
+ "name": "o1-mini-2024-09-12",
222
+ "api_base": "https://api.openai.com/v1",
223
+ "api_key_env_var": "OPENAI_API_KEY",
224
+ "supports_json": False,
225
+ "supports_logprobs": True,
226
+ "supports_responses": True,
227
+ "api_spec": "openai",
228
+ "input_cost": 3.0,
229
+ "output_cost": 15.0,
230
+ "requests_per_minute": 20,
231
+ "tokens_per_minute": 100_000,
232
+ "reasoning_model": True,
233
+ },
234
+ "gpt-4o": {
235
+ "id": "gpt-4o",
236
+ "name": "gpt-4o-2024-08-06",
237
+ "api_base": "https://api.openai.com/v1",
238
+ "api_key_env_var": "OPENAI_API_KEY",
239
+ "supports_json": True,
240
+ "supports_logprobs": True,
241
+ "supports_responses": True,
242
+ "api_spec": "openai",
243
+ "input_cost": 5.0,
244
+ "output_cost": 15.0,
245
+ "requests_per_minute": 10_000,
246
+ "tokens_per_minute": 30_000_000,
247
+ },
248
+ "gpt-4o-mini": {
249
+ "id": "gpt-4o-mini",
250
+ "name": "gpt-4o-mini-2024-07-18",
251
+ "api_base": "https://api.openai.com/v1",
252
+ "api_key_env_var": "OPENAI_API_KEY",
253
+ "supports_json": True,
254
+ "supports_logprobs": True,
255
+ "supports_responses": True,
256
+ "api_spec": "openai",
257
+ "input_cost": 0.15,
258
+ "output_cost": 0.6,
259
+ "requests_per_minute": 60_000,
260
+ "tokens_per_minute": 250_000_000,
261
+ },
262
+ "gpt-3.5-turbo": {
263
+ "id": "gpt-3.5-turbo",
264
+ "name": "gpt-3.5-turbo-0125",
265
+ "api_base": "https://api.openai.com/v1",
266
+ "api_key_env_var": "OPENAI_API_KEY",
267
+ "supports_json": True,
268
+ "supports_logprobs": True,
269
+ "supports_responses": True,
270
+ "api_spec": "openai",
271
+ "input_cost": 0.5,
272
+ "output_cost": 1.5,
273
+ "requests_per_minute": 40_000,
274
+ "tokens_per_minute": 75_000_000,
275
+ },
276
+ "gpt-4-turbo": {
277
+ "id": "gpt-4-turbo",
278
+ "name": "gpt-4-turbo-2024-04-09",
279
+ "api_base": "https://api.openai.com/v1",
280
+ "api_key_env_var": "OPENAI_API_KEY",
281
+ "supports_json": True,
282
+ "supports_logprobs": True,
283
+ "supports_responses": True,
284
+ "api_spec": "openai",
285
+ "input_cost": 10.0,
286
+ "output_cost": 30.0,
287
+ "requests_per_minute": 10_000,
288
+ "tokens_per_minute": 1_500_000,
289
+ },
290
+ "gpt-4": {
291
+ "id": "gpt-4",
292
+ "name": "gpt-4-0613",
293
+ "api_base": "https://api.openai.com/v1",
294
+ "api_key_env_var": "OPENAI_API_KEY",
295
+ "supports_json": False,
296
+ "supports_logprobs": False,
297
+ "supports_responses": True,
298
+ "api_spec": "openai",
299
+ "input_cost": 30.0,
300
+ "output_cost": 60.0,
301
+ "requests_per_minute": 10_000,
302
+ "tokens_per_minute": 300_000,
303
+ },
304
+ "gpt-4-32k": {
305
+ "id": "gpt-4-32k",
306
+ "name": "gpt-4-32k-0613",
307
+ "api_base": "https://api.openai.com/v1",
308
+ "api_key_env_var": "OPENAI_API_KEY",
309
+ "supports_json": False,
310
+ "supports_logprobs": False,
311
+ "supports_responses": True,
312
+ "api_spec": "openai",
313
+ "input_cost": 60.0,
314
+ "output_cost": 120.0,
315
+ "requests_per_minute": 1_000,
316
+ "tokens_per_minute": 150_000,
317
+ },
318
+ }
@@ -0,0 +1 @@
1
+ OPENROUTER_MODELS = {}
@@ -0,0 +1,112 @@
1
+ # ███████████ █████ █████
2
+ # ░█░░░███░░░█ ░░███ ░░███
3
+ # ░ ░███ ░ ██████ ███████ ██████ ███████ ░███████ ██████ ████████
4
+ # ░███ ███░░███ ███░░███ ███░░███░░░███░ ░███░░███ ███░░███░░███░░███
5
+ # ░███ ░███ ░███░███ ░███░███████ ░███ ░███ ░███ ░███████ ░███ ░░░
6
+ # ░███ ░███ ░███░███ ░███░███░░░ ░███ ███ ░███ ░███ ░███░░░ ░███
7
+ # █████ ░░██████ ░░███████░░██████ ░░█████ ████ █████░░██████ █████
8
+ # ░░░░░ ░░░░░░ ░░░░░███ ░░░░░░ ░░░░░ ░░░░ ░░░░░ ░░░░░░ ░░░░░
9
+ # ███ ░███
10
+ # ░░██████
11
+ # ░░░░░░
12
+ # tbh only reason to use these are that they're cheap, but all worse than haiku
13
+ TOGETHER_MODELS = {
14
+ "deepseek-r1-together": {
15
+ "id": "deepseek-r1-together",
16
+ "name": "deepseek-ai/DeepSeek-R1",
17
+ "api_base": "https://api.together.xyz/v1",
18
+ "api_key_env_var": "TOGETHER_API_KEY",
19
+ "supports_json": False,
20
+ "api_spec": "openai",
21
+ "input_cost": 3.0,
22
+ "output_cost": 7.0,
23
+ "requests_per_minute": None,
24
+ "tokens_per_minute": None,
25
+ },
26
+ "deepseek-v3-together": {
27
+ "id": "deepseek-v3-together",
28
+ "name": "deepseek-ai/DeepSeek-V3",
29
+ "api_base": "https://api.together.xyz/v1",
30
+ "api_key_env_var": "TOGETHER_API_KEY",
31
+ "supports_json": False,
32
+ "api_spec": "openai",
33
+ "input_cost": 1.25,
34
+ "output_cost": 1.25,
35
+ "requests_per_minute": None,
36
+ "tokens_per_minute": None,
37
+ },
38
+ "qwen-3-235b-together": {
39
+ "id": "qwen-3-235b-together",
40
+ "name": "Qwen/Qwen3-235B-A22B-fp8",
41
+ "api_base": "https://api.together.xyz/v1",
42
+ "api_key_env_var": "TOGETHER_API_KEY",
43
+ "supports_json": False,
44
+ "api_spec": "openai",
45
+ "input_cost": 0.2,
46
+ "output_cost": 0.6,
47
+ "requests_per_minute": None,
48
+ "tokens_per_minute": None,
49
+ },
50
+ "qwen-2.5-vl-together": {
51
+ "id": "qwen-2.5-vl-together",
52
+ "name": "Qwen/Qwen2.5-VL-72B-Instruct",
53
+ "api_base": "https://api.together.xyz/v1",
54
+ "api_key_env_var": "TOGETHER_API_KEY",
55
+ "supports_json": False,
56
+ "api_spec": "openai",
57
+ "input_cost": 1.95,
58
+ "output_cost": 8.0,
59
+ "requests_per_minute": None,
60
+ "tokens_per_minute": None,
61
+ },
62
+ "llama-4-maverick-together": {
63
+ "id": "llama-4-maverick-together",
64
+ "name": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
65
+ "api_base": "https://api.together.xyz/v1",
66
+ "api_key_env_var": "TOGETHER_API_KEY",
67
+ "supports_json": False,
68
+ "api_spec": "openai",
69
+ "input_cost": 0.27,
70
+ "output_cost": 0.85,
71
+ "requests_per_minute": None,
72
+ "tokens_per_minute": None,
73
+ },
74
+ "llama-4-scout-together": {
75
+ "id": "llama-4-scout-together",
76
+ "name": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
77
+ "api_base": "https://api.together.xyz/v1",
78
+ "api_key_env_var": "TOGETHER_API_KEY",
79
+ "supports_json": False,
80
+ "api_spec": "openai",
81
+ "input_cost": 0.18,
82
+ "output_cost": 0.59,
83
+ "requests_per_minute": None,
84
+ "tokens_per_minute": None,
85
+ },
86
+ "gpt-oss-120b-together": {
87
+ "id": "gpt-oss-120b-together",
88
+ "name": "openai/gpt-oss-120b",
89
+ "api_base": "https://api.together.xyz/v1",
90
+ "api_key_env_var": "TOGETHER_API_KEY",
91
+ "supports_json": False,
92
+ "api_spec": "openai",
93
+ "input_cost": 0.18,
94
+ "output_cost": 0.59,
95
+ "requests_per_minute": None,
96
+ "tokens_per_minute": None,
97
+ "reasoning_model": True,
98
+ },
99
+ "gpt-oss-20b-together": {
100
+ "id": "gpt-oss-20b-together",
101
+ "name": "openai/gpt-oss-20b",
102
+ "api_base": "https://api.together.xyz/v1",
103
+ "api_key_env_var": "TOGETHER_API_KEY",
104
+ "supports_json": False,
105
+ "api_spec": "openai",
106
+ "input_cost": 0.18,
107
+ "output_cost": 0.59,
108
+ "requests_per_minute": None,
109
+ "tokens_per_minute": None,
110
+ "reasoning_model": True,
111
+ },
112
+ }
lm_deluge/prompt.py CHANGED
@@ -654,11 +654,11 @@ class Conversation:
654
654
  pass
655
655
 
656
656
  # fluent additions
657
- def add(self, msg: Message) -> "Conversation":
657
+ def with_message(self, msg: Message) -> "Conversation":
658
658
  self.messages.append(msg)
659
659
  return self
660
660
 
661
- def add_tool_result(
661
+ def with_tool_result(
662
662
  self, tool_call_id: str, result: str | list[ToolResultPart]
663
663
  ) -> "Conversation":
664
664
  """Add a tool result to the conversation.
@@ -0,0 +1,47 @@
1
+ # sample thing we'd want to parse from llama.cpp
2
+ # the goal here is: barebones inference implementation returns
3
+ # raw harmony string; we parse into content blocks
4
+
5
+ # implied: <|start|>assistant
6
+ # <|channel|>analysis<|message|>We need to respond as a helpful assistant. The user says "who are you and what do you want with my family?" This is a normal question. We should answer that we are ChatGPT, an AI language model, and we don't want anything with their family. We reassure them.<|start|>assistant<|channel|>final<|message|>I’m ChatGPT, a large language‑model AI created by OpenAI. I don’t have personal intentions or desires, and I’m not able to interact with anyone outside of this chat. My only goal here is to provide information, answer questions, and help you with whatever you need—nothing more, nothing less. If you have any concerns or need help with something specific, just let me know!
7
+ #
8
+ import copy
9
+ from lm_deluge.api_requests.response import APIResponse
10
+ from lm_deluge.prompt import Text, Thinking
11
+
12
+ SAMPLE_INPUT = """
13
+ <|channel|>analysis<|message|>We need to respond as a helpful assistant. The user says "who are you and what do you want with my family?" This is a normal question. We should answer that we are ChatGPT, an AI language model, and we don't want anything with their family. We reassure them.<|start|>assistant<|channel|>final<|message|>I’m ChatGPT, a large language‑model AI created by OpenAI. I don’t have personal intentions or desires, and I’m not able to interact with anyone outside of this chat. My only goal here is to provide information, answer questions, and help you with whatever you need—nothing more, nothing less. If you have any concerns or need help with something specific, just let me know!
14
+ """.strip()
15
+
16
+
17
+ def _split_messages(response: str):
18
+ raw_messages = response.split("<|start|>")
19
+ messages = []
20
+ for msg in raw_messages:
21
+ channel, content = msg.split("<|message|>")
22
+ channel = channel.split("<|channel|>")[1]
23
+ messages.append((channel, content))
24
+
25
+ return messages
26
+
27
+
28
+ def postprocess_harmony(response: APIResponse) -> APIResponse:
29
+ if not response.content:
30
+ return response
31
+
32
+ parts = response.content.parts
33
+ assert len(parts) == 1, "expected 1 parts to convert harmony"
34
+ text = parts[0].text # type: ignore
35
+ messages = _split_messages(text)
36
+
37
+ new_parts = []
38
+ for channel, content in messages:
39
+ if channel == "analysis":
40
+ new_parts.append(Thinking(content=content))
41
+ elif channel == "final":
42
+ new_parts.append(Text(text=content))
43
+
44
+ new_response = copy.deepcopy(response)
45
+ new_response.content.parts = new_parts # type: ignore
46
+
47
+ return new_response
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.34
3
+ Version: 0.0.36
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -2,29 +2,29 @@ lm_deluge/__init__.py,sha256=mAztMuxINmh7dGbYnT8tsmw1eryQAvd0jpY8yHzd0EE,315
2
2
  lm_deluge/agent.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  lm_deluge/batches.py,sha256=vJXVnuuGkIQnXoDPODPERrvdG9X1Ov1jnXExnPe6ZAc,21772
4
4
  lm_deluge/cache.py,sha256=VB1kv8rM2t5XWPR60uhszFcxLDnVKOe1oA5hYjVDjIo,4375
5
- lm_deluge/client.py,sha256=mTC_gxydu1JBtjXcEp8_GuAj4U6cAvZzAQjj4_0gCt0,34287
5
+ lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
6
+ lm_deluge/client.py,sha256=YKZB8oJx58n8Q5kLV6hT1HeYgxvZGro5RQVH9idqJMU,32576
6
7
  lm_deluge/config.py,sha256=H1tQyJDNHGFuwxqQNL5Z-CjWAC0luHSBA3iY_pxmACM,932
7
8
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
8
9
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
9
10
  lm_deluge/file.py,sha256=FGomcG8s2go_55Z2CChflHgmU-UqgFftgFY8c7f_G70,5631
10
11
  lm_deluge/gemini_limits.py,sha256=V9mpS9JtXYz7AY6OuKyQp5TuIMRH1BVv9YrSNmGmHNA,1569
11
12
  lm_deluge/image.py,sha256=Qpa0k5yXfrpSaHzVUwW_TEn7yEgmwzYGL17Sa7-KhSA,7729
12
- lm_deluge/models.py,sha256=L1vL24I74QNL7AgAGSmUMNFW9gSMBc8xinDBcQXu158,51760
13
- lm_deluge/prompt.py,sha256=cfwzCAmT-1K0v7SfEMUrxpBkJGgf7IFlWfNLJrCcoBM,37025
13
+ lm_deluge/prompt.py,sha256=gRGu_9wWWMusM7sf-YCdotcZUt1Cj_h_1_6oyS7XTYM,37035
14
14
  lm_deluge/request_context.py,sha256=o33LSEwnK6YPhZeulUoSE_VrdKCXiCQa0tjjixK2K6M,2540
15
15
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
16
16
  lm_deluge/tool.py,sha256=_coOKB9nPNVZoseMRumRyQ8BMR7_d0IlstzMHNT69JY,15732
17
17
  lm_deluge/tracker.py,sha256=rTOjPEwaNczNz9MKDGayPNdmDZOpIWvLll7uz0CloVU,11533
18
18
  lm_deluge/usage.py,sha256=VMEKghePFIID5JFBObqYxFpgYxnbYm_dnHy7V1-_T6M,4866
19
19
  lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
20
- lm_deluge/api_requests/anthropic.py,sha256=d22ainIrH2PgOgQZcygFZK-cvs6O4XCJCnyx2PptUiM,8143
20
+ lm_deluge/api_requests/anthropic.py,sha256=J5BzYV7aYNoL6FPArB6usyS267z1BguZTRY5JLMd0So,8159
21
21
  lm_deluge/api_requests/base.py,sha256=EVHNFtlttKbN7Tt1MnLaO-NjvKHPSV5CqlRv-OnpVAE,5593
22
22
  lm_deluge/api_requests/bedrock.py,sha256=FZMhF590JzJtAYDugbDtG93RhPt5efWZ0Wn4V8U8Dgw,11031
23
23
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
24
- lm_deluge/api_requests/gemini.py,sha256=tXk6AfioN7xv7B_HYw7Va7kQsm0hLJhSZfYNP6hAwgM,7792
24
+ lm_deluge/api_requests/gemini.py,sha256=COHqPWmeaq9fpg0YwOZqQTUbijKnXNF4cvMLnW9kLl8,7857
25
25
  lm_deluge/api_requests/mistral.py,sha256=S_LpOfCGbCVEROH_od3P-tYeNYTKFMamMTL-c_wFCBI,4597
26
- lm_deluge/api_requests/openai.py,sha256=hsJIMRO4wpalrczD0bVc--RWFu2BoXEp0USAwRlLQEA,21763
27
- lm_deluge/api_requests/response.py,sha256=FtkVYk_rDH93Kj9pqbB-l7a4dQHzVr6ivKL9khYKLbs,5966
26
+ lm_deluge/api_requests/openai.py,sha256=FL_UCELdkaf_GZIBPViLdNcUwPMwqvEKj9mMcH72Nmc,22346
27
+ lm_deluge/api_requests/response.py,sha256=Zc9kxBqB4JJIFR6OhXW-BS3ulK5JygE75JNBEpKgn5Q,5989
28
28
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
29
29
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
30
30
  lm_deluge/api_requests/deprecated/deepseek.py,sha256=FEApI93VAWDwuaqTooIyKMgONYqRhdUmiAPBRme-IYs,4582
@@ -43,13 +43,29 @@ lm_deluge/llm_tools/locate.py,sha256=lYNbKTmy9dTvj0lEQkOQ7yrxyqsgYzjD0C_byJKI_4w
43
43
  lm_deluge/llm_tools/ocr.py,sha256=7fDlvs6uUOvbxMasvGGNJx5Fj6biM6z3lijKZaGN26k,23
44
44
  lm_deluge/llm_tools/score.py,sha256=9oGA3-k2U5buHQXkXaEI9M4Wb5yysNhTLsPbGeghAlQ,2580
45
45
  lm_deluge/llm_tools/translate.py,sha256=iXyYvQZ8bC44FWhBk4qpdqjKM1WFF7Shq-H2PxhPgg4,1452
46
+ lm_deluge/models/__init__.py,sha256=cwVidzBhMS6B9zOemDf0rAUHo8rHVum8XRE7z5Gi4F4,4278
47
+ lm_deluge/models/anthropic.py,sha256=3pW7fyBY9Xh1m1RtfncU9amWTtKnjGZD0STjpu8iUSQ,5700
48
+ lm_deluge/models/bedrock.py,sha256=jpb_n-Wh3G3VAKZn7U1t5r5IQ2oTDXwrjGIP013l2cI,4534
49
+ lm_deluge/models/cerebras.py,sha256=5Mp1rLWKRLXKpfk9Ef-ydmcp8ffQlNXbp3Zg8sh1pEs,2017
50
+ lm_deluge/models/cohere.py,sha256=M_7cVA9QD4qe1X4sZXCpKEkKrKz2jibaspiTnzsZ1GU,3998
51
+ lm_deluge/models/deepseek.py,sha256=6_jDEprNNYis5I5MDQNloRes9h1P6pMYHXxOd2UZMgg,941
52
+ lm_deluge/models/fireworks.py,sha256=4D0LUgl1QBISGGA9qC62858glju2RRO0uP6X8QRAa4Y,572
53
+ lm_deluge/models/google.py,sha256=PWKVf6HLt9m_HSTvVavZ8BlriQBEtS47ir3jBvetkaQ,5823
54
+ lm_deluge/models/grok.py,sha256=aInkUSclXE47Lm4PKiP3OebAP9V-GOZwK-Eiis4zVow,1199
55
+ lm_deluge/models/groq.py,sha256=BHuBNUpcjsTpwXbnKVfmZf7oef81U48IymR_isMCzvo,2482
56
+ lm_deluge/models/meta.py,sha256=m6HPR82TJONYTTWkQw5EKmITMxoWzrfYOuNgFnGaRX8,2195
57
+ lm_deluge/models/mistral.py,sha256=x67o5gckBGmPcIGdVbS26XZAYFKBYM4tsxEAahGp8bk,4323
58
+ lm_deluge/models/openai.py,sha256=q3IqHldFJjRz-jxT2NoQW9t1_c_BGLd72d1HZlxXiLA,11100
59
+ lm_deluge/models/openrouter.py,sha256=aAgBT5_TZQtUPQyNn-Bob6NGyrlFOclnxIb0F53pgvA,23
60
+ lm_deluge/models/together.py,sha256=RCZoYAb8OVxdH9uwXnv47TDTGzC30P-FZoDbiBE23_g,4957
61
+ lm_deluge/util/harmony.py,sha256=XBfJck6q-5HbOqMhEjdfy1i17i0QtpHG8ruXV4EsHl0,2731
46
62
  lm_deluge/util/json.py,sha256=_4Oar2Cmz2L1DK3EtPLPDxD6rsYHxjROmV8ZpmMjQ-4,5822
47
63
  lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11768
48
64
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
49
65
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
50
66
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
51
- lm_deluge-0.0.34.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
52
- lm_deluge-0.0.34.dist-info/METADATA,sha256=7vzU_xBUX93r35eUF08MmB0jhBN2SrYH4yhj7snPi2g,13295
53
- lm_deluge-0.0.34.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
54
- lm_deluge-0.0.34.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
55
- lm_deluge-0.0.34.dist-info/RECORD,,
67
+ lm_deluge-0.0.36.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
68
+ lm_deluge-0.0.36.dist-info/METADATA,sha256=VB8l79DLczVf9_yr9WZRnxdvw5qq55grpBrhBkz-NUs,13295
69
+ lm_deluge-0.0.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
+ lm_deluge-0.0.36.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
71
+ lm_deluge-0.0.36.dist-info/RECORD,,