lm-deluge 0.0.74__py3-none-any.whl → 0.0.75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,6 +12,10 @@ from lm_deluge.prompt import (
12
12
  from lm_deluge.request_context import RequestContext
13
13
  from lm_deluge.tool import MCPServer, Tool
14
14
  from lm_deluge.usage import Usage
15
+ from lm_deluge.util.schema import (
16
+ prepare_output_schema,
17
+ transform_schema_for_anthropic,
18
+ )
15
19
 
16
20
  from ..models import APIModel
17
21
  from .base import APIRequestBase, APIResponse
@@ -87,10 +91,15 @@ def _build_anthropic_request(
87
91
  # Handle structured outputs (output_format)
88
92
  if context.output_schema:
89
93
  if model.supports_json:
94
+ base_schema = prepare_output_schema(context.output_schema)
95
+
96
+ # Apply Anthropic-specific transformations (move unsupported constraints to description)
97
+ transformed_schema = transform_schema_for_anthropic(base_schema)
98
+
90
99
  _add_beta(base_headers, "structured-outputs-2025-11-13")
91
100
  request_json["output_format"] = {
92
101
  "type": "json_schema",
93
- "schema": context.output_schema,
102
+ "schema": transformed_schema,
94
103
  }
95
104
  else:
96
105
  print(
@@ -197,9 +197,7 @@ async def _build_openai_bedrock_request(
197
197
  request_tools = []
198
198
  for tool in tools:
199
199
  if isinstance(tool, Tool):
200
- request_tools.append(
201
- tool.dump_for("openai-completions", strict=False)
202
- )
200
+ request_tools.append(tool.dump_for("openai-completions", strict=False))
203
201
  elif isinstance(tool, MCPServer):
204
202
  as_tools = await tool.to_tools()
205
203
  request_tools.extend(
@@ -9,6 +9,10 @@ from aiohttp import ClientResponse
9
9
  from lm_deluge.request_context import RequestContext
10
10
  from lm_deluge.tool import MCPServer, Tool
11
11
  from lm_deluge.warnings import maybe_warn
12
+ from lm_deluge.util.schema import (
13
+ prepare_output_schema,
14
+ transform_schema_for_openai,
15
+ )
12
16
 
13
17
  from ..config import SamplingParams
14
18
  from ..models import APIModel
@@ -87,11 +91,16 @@ async def _build_oa_chat_request(
87
91
  # Handle structured outputs (output_schema takes precedence over json_mode)
88
92
  if context.output_schema:
89
93
  if model.supports_json:
94
+ base_schema = prepare_output_schema(context.output_schema)
95
+
96
+ # Apply OpenAI-specific transformations (currently passthrough with copy)
97
+ transformed_schema = transform_schema_for_openai(base_schema)
98
+
90
99
  request_json["response_format"] = {
91
100
  "type": "json_schema",
92
101
  "json_schema": {
93
102
  "name": "response",
94
- "schema": context.output_schema,
103
+ "schema": transformed_schema,
95
104
  "strict": True,
96
105
  },
97
106
  }
@@ -326,11 +335,16 @@ async def _build_oa_responses_request(
326
335
  # Handle structured outputs (output_schema takes precedence over json_mode)
327
336
  if context.output_schema:
328
337
  if model.supports_json:
338
+ base_schema = prepare_output_schema(context.output_schema)
339
+
340
+ # Apply OpenAI-specific transformations (currently passthrough with copy)
341
+ transformed_schema = transform_schema_for_openai(base_schema)
342
+
329
343
  request_json["text"] = {
330
344
  "format": {
331
345
  "type": "json_schema",
332
346
  "name": "response",
333
- "schema": context.output_schema,
347
+ "schema": transformed_schema,
334
348
  "strict": True,
335
349
  }
336
350
  }
lm_deluge/client.py CHANGED
@@ -561,7 +561,7 @@ class _LLMClient(BaseModel):
561
561
  return_completions_only: Literal[True],
562
562
  show_progress: bool = ...,
563
563
  tools: list[Tool | dict | MCPServer] | None = ...,
564
- output_schema: dict | None = ...,
564
+ output_schema: type[BaseModel] | dict | None = ...,
565
565
  cache: CachePattern | None = ...,
566
566
  service_tier: Literal["auto", "default", "flex", "priority"] | None = ...,
567
567
  ) -> list[str | None]: ...
@@ -574,7 +574,7 @@ class _LLMClient(BaseModel):
574
574
  return_completions_only: Literal[False] = ...,
575
575
  show_progress: bool = ...,
576
576
  tools: list[Tool | dict | MCPServer] | None = ...,
577
- output_schema: dict | None = ...,
577
+ output_schema: type[BaseModel] | dict | None = ...,
578
578
  cache: CachePattern | None = ...,
579
579
  service_tier: Literal["auto", "default", "flex", "priority"] | None = ...,
580
580
  ) -> list[APIResponse]: ...
@@ -586,7 +586,7 @@ class _LLMClient(BaseModel):
586
586
  return_completions_only: bool = False,
587
587
  show_progress: bool = True,
588
588
  tools: list[Tool | dict | MCPServer] | None = None,
589
- output_schema: dict | None = None,
589
+ output_schema: type[BaseModel] | dict | None = None,
590
590
  cache: CachePattern | None = None,
591
591
  service_tier: Literal["auto", "default", "flex", "priority"] | None = None,
592
592
  ) -> list[APIResponse] | list[str | None] | dict[str, int]:
@@ -661,7 +661,7 @@ class _LLMClient(BaseModel):
661
661
  return_completions_only: bool = False,
662
662
  show_progress=True,
663
663
  tools: list[Tool | dict | MCPServer] | None = None,
664
- output_schema: dict | None = None,
664
+ output_schema: type[BaseModel] | dict | None = None,
665
665
  cache: CachePattern | None = None,
666
666
  ):
667
667
  return asyncio.run(
@@ -694,7 +694,7 @@ class _LLMClient(BaseModel):
694
694
  prompt: Prompt,
695
695
  *,
696
696
  tools: list[Tool | dict | MCPServer] | None = None,
697
- output_schema: dict | None = None,
697
+ output_schema: type[BaseModel] | dict | None = None,
698
698
  cache: CachePattern | None = None,
699
699
  service_tier: Literal["auto", "default", "flex", "priority"] | None = None,
700
700
  ) -> int:
@@ -731,7 +731,7 @@ class _LLMClient(BaseModel):
731
731
  prompt: Prompt,
732
732
  *,
733
733
  tools: list[Tool | dict | MCPServer] | None = None,
734
- output_schema: dict | None = None,
734
+ output_schema: type[BaseModel] | dict | None = None,
735
735
  cache: CachePattern | None = None,
736
736
  service_tier: Literal["auto", "default", "flex", "priority"] | None = None,
737
737
  ) -> APIResponse:
@@ -1,11 +1,14 @@
1
1
  from dataclasses import dataclass, field
2
2
  from functools import cached_property
3
- from typing import Any, Callable
3
+ from typing import Any, Callable, TYPE_CHECKING
4
4
 
5
5
  from .config import SamplingParams
6
6
  from .prompt import CachePattern, Conversation
7
7
  from .tracker import StatusTracker
8
8
 
9
+ if TYPE_CHECKING:
10
+ from pydantic import BaseModel
11
+
9
12
 
10
13
  @dataclass
11
14
  class RequestContext:
@@ -32,7 +35,7 @@ class RequestContext:
32
35
 
33
36
  # Optional features
34
37
  tools: list | None = None
35
- output_schema: dict | None = None
38
+ output_schema: "type[BaseModel] | dict | None" = None
36
39
  cache: CachePattern | None = None
37
40
  use_responses_api: bool = False
38
41
  background: bool = False
@@ -0,0 +1,412 @@
1
+ """Schema transformation utilities for structured outputs.
2
+
3
+ This module provides utilities for transforming Pydantic models and JSON schemas
4
+ to be compatible with provider-specific structured output requirements (OpenAI, Anthropic).
5
+
6
+ Key functions:
7
+ - to_strict_json_schema: Convert Pydantic model to strict JSON schema
8
+ - transform_schema_for_openai: Apply OpenAI-specific transformations
9
+ - transform_schema_for_anthropic: Apply Anthropic-specific transformations
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import copy
15
+ import inspect
16
+ from typing import Any, TypeGuard, TYPE_CHECKING, Type
17
+
18
+ if TYPE_CHECKING:
19
+ from pydantic import BaseModel
20
+
21
+ try:
22
+ import pydantic
23
+ from pydantic import BaseModel as _BaseModel
24
+ except ImportError:
25
+ pydantic = None
26
+ _BaseModel = None # type: ignore
27
+
28
+
29
+ def is_pydantic_model(obj: Any) -> bool:
30
+ """Check if an object is a Pydantic model class."""
31
+ if pydantic is None or _BaseModel is None:
32
+ return False
33
+ return inspect.isclass(obj) and issubclass(obj, _BaseModel)
34
+
35
+
36
+ def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
37
+ """Type guard for dictionaries."""
38
+ return isinstance(obj, dict)
39
+
40
+
41
+ def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
42
+ """Check if a dictionary has more than n keys."""
43
+ i = 0
44
+ for _ in obj.keys():
45
+ i += 1
46
+ if i > n:
47
+ return True
48
+ return False
49
+
50
+
51
+ def resolve_ref(*, root: dict[str, object], ref: str) -> object:
52
+ """Resolve a JSON Schema $ref pointer.
53
+
54
+ Args:
55
+ root: The root schema object
56
+ ref: The $ref string (e.g., "#/$defs/MyType")
57
+
58
+ Returns:
59
+ The resolved schema object
60
+
61
+ Raises:
62
+ ValueError: If the $ref format is invalid or cannot be resolved
63
+ """
64
+ if not ref.startswith("#/"):
65
+ raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/")
66
+
67
+ path = ref[2:].split("/")
68
+ resolved = root
69
+ for key in path:
70
+ value = resolved[key]
71
+ if not is_dict(value):
72
+ raise ValueError(
73
+ f"Encountered non-dictionary entry while resolving {ref} - {resolved}"
74
+ )
75
+ resolved = value
76
+
77
+ return resolved
78
+
79
+
80
+ def to_strict_json_schema(model: Type["BaseModel"]) -> dict[str, Any]:
81
+ """Convert a Pydantic model to a strict JSON schema.
82
+
83
+ This function extracts the JSON schema from a Pydantic model and ensures
84
+ it conforms to the strict mode requirements for structured outputs.
85
+
86
+ Args:
87
+ model: A Pydantic BaseModel class
88
+
89
+ Returns:
90
+ A JSON schema dict that conforms to strict mode requirements
91
+
92
+ Raises:
93
+ TypeError: If the model is not a Pydantic BaseModel
94
+ ImportError: If pydantic is not installed
95
+ """
96
+ if pydantic is None or _BaseModel is None:
97
+ raise ImportError(
98
+ "pydantic is required for Pydantic model support. "
99
+ "Install it with: pip install pydantic"
100
+ )
101
+
102
+ if not is_pydantic_model(model):
103
+ raise TypeError(
104
+ f"Expected a Pydantic BaseModel class, got {type(model).__name__}"
105
+ )
106
+
107
+ schema = model.model_json_schema()
108
+ return _ensure_strict_json_schema(schema, path=(), root=schema)
109
+
110
+
111
+ def prepare_output_schema(
112
+ schema_obj: Type["BaseModel"] | dict[str, Any],
113
+ ) -> dict[str, Any]:
114
+ """Normalize a user-provided schema into strict JSON schema form.
115
+
116
+ Args:
117
+ schema_obj: Either a Pydantic BaseModel subclass or a JSON schema dict.
118
+
119
+ Returns:
120
+ A strict JSON schema suitable for provider-specific transformation.
121
+
122
+ Notes:
123
+ Dict schemas are deep-copied before normalization so the caller's
124
+ original object is left untouched.
125
+ """
126
+
127
+ if is_pydantic_model(schema_obj):
128
+ return to_strict_json_schema(schema_obj) # type: ignore[arg-type]
129
+
130
+ if is_dict(schema_obj):
131
+ schema_copy = copy.deepcopy(schema_obj)
132
+ return _ensure_strict_json_schema(
133
+ schema_copy,
134
+ path=(),
135
+ root=schema_copy,
136
+ )
137
+
138
+ raise TypeError(
139
+ "output_schema must be a Pydantic BaseModel subclass or a JSON schema dict"
140
+ )
141
+
142
+
143
+ def _ensure_strict_json_schema(
144
+ json_schema: object,
145
+ *,
146
+ path: tuple[str, ...],
147
+ root: dict[str, object],
148
+ ) -> dict[str, Any]:
149
+ """Recursively ensure a JSON schema conforms to strict mode requirements.
150
+
151
+ This function:
152
+ - Adds additionalProperties: false to all objects
153
+ - Makes all properties required
154
+ - Removes unsupported constraints and adds them to descriptions
155
+ - Expands $refs that are mixed with other properties
156
+ - Processes $defs, anyOf, allOf, etc.
157
+
158
+ Args:
159
+ json_schema: The schema to transform
160
+ path: Current path in the schema (for error messages)
161
+ root: The root schema (for resolving $refs)
162
+
163
+ Returns:
164
+ The transformed schema
165
+ """
166
+ if not is_dict(json_schema):
167
+ raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
168
+
169
+ # Process $defs recursively
170
+ defs = json_schema.get("$defs")
171
+ if is_dict(defs):
172
+ for def_name, def_schema in defs.items():
173
+ _ensure_strict_json_schema(
174
+ def_schema, path=(*path, "$defs", def_name), root=root
175
+ )
176
+
177
+ # Process definitions recursively
178
+ definitions = json_schema.get("definitions")
179
+ if is_dict(definitions):
180
+ for definition_name, definition_schema in definitions.items():
181
+ _ensure_strict_json_schema(
182
+ definition_schema,
183
+ path=(*path, "definitions", definition_name),
184
+ root=root,
185
+ )
186
+
187
+ typ = json_schema.get("type")
188
+
189
+ # Object types - add additionalProperties: false and make all fields required
190
+ if typ == "object" and "additionalProperties" not in json_schema:
191
+ json_schema["additionalProperties"] = False
192
+
193
+ properties = json_schema.get("properties")
194
+ if is_dict(properties):
195
+ # Make all properties required
196
+ json_schema["required"] = list(properties.keys())
197
+
198
+ # Process each property recursively
199
+ json_schema["properties"] = {
200
+ key: _ensure_strict_json_schema(
201
+ prop_schema, path=(*path, "properties", key), root=root
202
+ )
203
+ for key, prop_schema in properties.items()
204
+ }
205
+
206
+ # Arrays - process items schema
207
+ items = json_schema.get("items")
208
+ if is_dict(items):
209
+ json_schema["items"] = _ensure_strict_json_schema(
210
+ items, path=(*path, "items"), root=root
211
+ )
212
+
213
+ # Unions - process each variant
214
+ any_of = json_schema.get("anyOf")
215
+ if isinstance(any_of, list):
216
+ json_schema["anyOf"] = [
217
+ _ensure_strict_json_schema(
218
+ variant, path=(*path, "anyOf", str(i)), root=root
219
+ )
220
+ for i, variant in enumerate(any_of)
221
+ ]
222
+
223
+ # Intersections - process each entry
224
+ all_of = json_schema.get("allOf")
225
+ if isinstance(all_of, list):
226
+ if len(all_of) == 1:
227
+ # Flatten single-element allOf
228
+ json_schema.update(
229
+ _ensure_strict_json_schema(
230
+ all_of[0], path=(*path, "allOf", "0"), root=root
231
+ )
232
+ )
233
+ json_schema.pop("allOf")
234
+ else:
235
+ json_schema["allOf"] = [
236
+ _ensure_strict_json_schema(
237
+ entry, path=(*path, "allOf", str(i)), root=root
238
+ )
239
+ for i, entry in enumerate(all_of)
240
+ ]
241
+
242
+ # Remove None defaults (redundant with nullable)
243
+ if "default" in json_schema and json_schema["default"] is None:
244
+ json_schema.pop("default")
245
+
246
+ # Expand $refs that are mixed with other properties
247
+ ref = json_schema.get("$ref")
248
+ if ref and has_more_than_n_keys(json_schema, 1):
249
+ if not isinstance(ref, str):
250
+ raise ValueError(f"Received non-string $ref - {ref}")
251
+
252
+ resolved = resolve_ref(root=root, ref=ref)
253
+ if not is_dict(resolved):
254
+ raise ValueError(
255
+ f"Expected `$ref: {ref}` to resolve to a dictionary but got {resolved}"
256
+ )
257
+
258
+ # Properties from json_schema take priority over $ref
259
+ json_schema.update({**resolved, **json_schema})
260
+ json_schema.pop("$ref")
261
+
262
+ # Re-process the expanded schema
263
+ return _ensure_strict_json_schema(json_schema, path=path, root=root)
264
+
265
+ return json_schema
266
+
267
+
268
+ def _move_constraints_to_description(
269
+ json_schema: dict[str, Any],
270
+ constraint_keys: list[str],
271
+ ) -> dict[str, Any]:
272
+ """Move unsupported constraints to the description field.
273
+
274
+ This helps the model follow constraints even when they can't be enforced
275
+ by the grammar.
276
+
277
+ Args:
278
+ json_schema: The schema to modify
279
+ constraint_keys: List of constraint keys to move to description
280
+
281
+ Returns:
282
+ The modified schema
283
+ """
284
+ constraints_found = {}
285
+
286
+ for key in constraint_keys:
287
+ if key in json_schema:
288
+ constraints_found[key] = json_schema.pop(key)
289
+
290
+ if constraints_found:
291
+ description = json_schema.get("description", "")
292
+ constraint_str = ", ".join(
293
+ f"{key}: {value}" for key, value in constraints_found.items()
294
+ )
295
+
296
+ if description:
297
+ json_schema["description"] = f"{description}\n\n{{{constraint_str}}}"
298
+ else:
299
+ json_schema["description"] = f"{{{constraint_str}}}"
300
+
301
+ return json_schema
302
+
303
+
304
+ def transform_schema_for_openai(schema: dict[str, Any]) -> dict[str, Any]:
305
+ """Return a deep copy of the schema for OpenAI requests.
306
+
307
+ OpenAI Structured Outputs currently support the standard constraints we
308
+ rely on (min/max length, numeric bounds, etc.), so we intentionally leave
309
+ the schema untouched apart from copying it to prevent downstream mutation.
310
+ """
311
+
312
+ return copy.deepcopy(schema)
313
+
314
+
315
+ def _transform_schema_recursive_anthropic(
316
+ json_schema: dict[str, Any],
317
+ root: dict[str, Any],
318
+ ) -> dict[str, Any]:
319
+ """Recursively strip unsupported constraints for Anthropic."""
320
+ if not is_dict(json_schema):
321
+ return json_schema
322
+
323
+ # Process $defs
324
+ if "$defs" in json_schema and is_dict(json_schema["$defs"]):
325
+ for def_name, def_schema in json_schema["$defs"].items():
326
+ if is_dict(def_schema):
327
+ _transform_schema_recursive_anthropic(def_schema, root)
328
+
329
+ # Process definitions
330
+ if "definitions" in json_schema and is_dict(json_schema["definitions"]):
331
+ for def_name, def_schema in json_schema["definitions"].items():
332
+ if is_dict(def_schema):
333
+ _transform_schema_recursive_anthropic(def_schema, root)
334
+
335
+ typ = json_schema.get("type")
336
+
337
+ # Handle unsupported constraints based on type
338
+ if typ == "string":
339
+ _move_constraints_to_description(
340
+ json_schema,
341
+ ["minLength", "maxLength", "pattern"],
342
+ )
343
+ elif typ in ("number", "integer"):
344
+ _move_constraints_to_description(
345
+ json_schema,
346
+ [
347
+ "minimum",
348
+ "maximum",
349
+ "exclusiveMinimum",
350
+ "exclusiveMaximum",
351
+ "multipleOf",
352
+ ],
353
+ )
354
+ elif typ == "array":
355
+ _move_constraints_to_description(
356
+ json_schema,
357
+ [
358
+ "minItems",
359
+ "maxItems",
360
+ ],
361
+ )
362
+
363
+ # Recursively process nested schemas
364
+ if "properties" in json_schema and is_dict(json_schema["properties"]):
365
+ for prop_name, prop_schema in json_schema["properties"].items():
366
+ if is_dict(prop_schema):
367
+ _transform_schema_recursive_anthropic(prop_schema, root)
368
+
369
+ if "items" in json_schema and is_dict(json_schema["items"]):
370
+ _transform_schema_recursive_anthropic(json_schema["items"], root)
371
+
372
+ if "anyOf" in json_schema and isinstance(json_schema["anyOf"], list):
373
+ for variant in json_schema["anyOf"]:
374
+ if is_dict(variant):
375
+ _transform_schema_recursive_anthropic(variant, root)
376
+
377
+ if "allOf" in json_schema and isinstance(json_schema["allOf"], list):
378
+ for entry in json_schema["allOf"]:
379
+ if is_dict(entry):
380
+ _transform_schema_recursive_anthropic(entry, root)
381
+
382
+ return json_schema
383
+
384
+
385
+ def transform_schema_for_anthropic(schema: dict[str, Any]) -> dict[str, Any]:
386
+ """Transform a JSON schema for Anthropic's structured output requirements."""
387
+
388
+ schema_copy = copy.deepcopy(schema)
389
+ return _transform_schema_recursive_anthropic(schema_copy, schema_copy)
390
+
391
+
392
+ def get_json_schema(obj: Type["BaseModel"] | dict[str, Any]) -> dict[str, Any]:
393
+ """Get JSON schema from a Pydantic model or dict.
394
+
395
+ This is a convenience function that handles both Pydantic models
396
+ and raw dictionaries.
397
+
398
+ Args:
399
+ obj: Either a Pydantic BaseModel class or a dict
400
+
401
+ Returns:
402
+ The JSON schema dict
403
+ """
404
+ if is_pydantic_model(obj):
405
+ # Type narrowing: if is_pydantic_model returns True, obj must have model_json_schema
406
+ return obj.model_json_schema() # type: ignore
407
+ elif is_dict(obj):
408
+ return obj # type: ignore
409
+ else:
410
+ raise TypeError(
411
+ f"Expected Pydantic BaseModel or dict, got {type(obj).__name__}"
412
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.74
3
+ Version: 0.0.75
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -2,7 +2,7 @@ lm_deluge/__init__.py,sha256=zF5lAitfgJ8A28IXJ5BE9OUCqGOqSnGOWn3ZIlizNyY,822
2
2
  lm_deluge/batches.py,sha256=Km6QM5_7BlF2qEyo4WPlhkaZkpzrLqf50AaveHXQOoY,25127
3
3
  lm_deluge/cache.py,sha256=xO2AIYvP3tUpTMKQjwQQYfGRJSRi6e7sMlRhLjsS-u4,4873
4
4
  lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
5
- lm_deluge/client.py,sha256=zR99e3apeH8A7kHnfaOnPoaa1jr3F-1Yzw6ZJL4zgjc,41874
5
+ lm_deluge/client.py,sha256=pDar-uHG3XeXazhuWkZgalbyKHHalj51-ZhVJ6iHU0c,41982
6
6
  lm_deluge/config.py,sha256=d7lS6i5-J23nM-UPjJ_gFcsmfgI8aPVwqw_IGGb1x3I,975
7
7
  lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
8
8
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
@@ -10,21 +10,21 @@ lm_deluge/file.py,sha256=PTmlJQ-IaYcYUFun9V0bJ1NPVP84edJrR0hvCMWFylY,19697
10
10
  lm_deluge/image.py,sha256=5AMXmn2x47yXeYNfMSMAOWcnlrOxxOel-4L8QCJwU70,8928
11
11
  lm_deluge/mock_openai.py,sha256=-u4kxSzwoxDt_2fLh5LaiqETnu0Jg_VDL7TWAAYHGNw,21762
12
12
  lm_deluge/prompt.py,sha256=_6xkMuEKJEizRNbjQ5gbZesoE_vtwQe2wJHdm-E6vP0,64002
13
- lm_deluge/request_context.py,sha256=Eek8jebtURTFlVjVH8oQEq7Ory50rqeB1K85cUAMT_8,2663
13
+ lm_deluge/request_context.py,sha256=eM_cCXZsrVb5FF3VQl6u1dZeZrWv00wW42Cr_Fjs5oA,2752
14
14
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
15
15
  lm_deluge/tool.py,sha256=ipgNy4OpfH3CA9OPQq5zfn1xO8H08GMvDynB8ZPQ5mA,30617
16
16
  lm_deluge/tracker.py,sha256=aeS9GUJpgOSQRVXAnGDvlMO8qYpSxpTNLYj2hrMg0m8,14757
17
17
  lm_deluge/usage.py,sha256=xz9tAw2hqaJvv9aAVhnQ6N1Arn7fS8Shb28VwCW26wI,5136
18
18
  lm_deluge/warnings.py,sha256=xXXYXEfaaSVr__16BKOEEWLdfZi1L-2ylzTrXTRyO18,1748
19
19
  lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
20
- lm_deluge/api_requests/anthropic.py,sha256=-jj2mdAjW28l-GhvcBeSfkgCkOPAx1cjr03KFUOBBag,10062
20
+ lm_deluge/api_requests/anthropic.py,sha256=OvkciXTHyrG1cFyC1vv6nYyCFTqtMgt1r15Q-pbHiUQ,10411
21
21
  lm_deluge/api_requests/base.py,sha256=mXEM85mcU_5LD-ugELpCl28tv-tpHKcaxerTIVLQZVo,10436
22
- lm_deluge/api_requests/bedrock.py,sha256=flC777E3sZZZ4qUWleO1vF0b3NVXuR8I7ayV4isosKc,15635
22
+ lm_deluge/api_requests/bedrock.py,sha256=mY1xTvgfCLyqLlfFFmu_baKgkVq1Df1_MJXeN_G1jWQ,15597
23
23
  lm_deluge/api_requests/chat_reasoning.py,sha256=sJvstvKFqsSBUjYcwxzGt2_FH4cEp3Z6gKcBPyPjGwk,236
24
24
  lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
25
25
  lm_deluge/api_requests/gemini.py,sha256=4uD7fQl0yWyAvYkPNi3oO1InBnvYfo5_QR6k-va-2GI,7838
26
26
  lm_deluge/api_requests/mistral.py,sha256=8JZP2CDf1XZfaPcTk0WS4q-VfYYj58ptpoH8LD3MQG4,4528
27
- lm_deluge/api_requests/openai.py,sha256=bS8Kvv3BJgeYoEltJgVmEk7sm5OZL6OCjDnCw3nvIgU,28283
27
+ lm_deluge/api_requests/openai.py,sha256=E0oakhcb2T5Swfn6ATMjRZKuLyRrx4Zj5SREo1JILfc,28841
28
28
  lm_deluge/api_requests/response.py,sha256=vG194gAH5p7ulpNy4qy5Pryfb1p3ZV21-YGoj__ru3E,7436
29
29
  lm_deluge/api_requests/deprecated/bedrock.py,sha256=WrcIShCoO8JCUSlFOCHxg6KQCNTZfw3TpYTvSpYk4mA,11320
30
30
  lm_deluge/api_requests/deprecated/cohere.py,sha256=KgDScD6_bWhAzOY5BHZQKSA3kurt4KGENqC4wLsGmcU,5142
@@ -66,11 +66,12 @@ lm_deluge/presets/meta.py,sha256=QrreLAVgYS6VIC_NQth1vgGAYuxY38jFQQZSe6ot7C8,364
66
66
  lm_deluge/util/harmony.py,sha256=XBfJck6q-5HbOqMhEjdfy1i17i0QtpHG8ruXV4EsHl0,2731
67
67
  lm_deluge/util/json.py,sha256=_4Oar2Cmz2L1DK3EtPLPDxD6rsYHxjROmV8ZpmMjQ-4,5822
68
68
  lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11768
69
+ lm_deluge/util/schema.py,sha256=q6uwhA4s1lM2dHT1Kwc46E7OY1VecMOtTEI0PTFn6tA,13206
69
70
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
70
71
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
71
72
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
72
- lm_deluge-0.0.74.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
73
- lm_deluge-0.0.74.dist-info/METADATA,sha256=6eIB7IwAxqRTxfOeXCl9JDmyCM9KSCuuS2EfdhLfTm8,13514
74
- lm_deluge-0.0.74.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- lm_deluge-0.0.74.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
76
- lm_deluge-0.0.74.dist-info/RECORD,,
73
+ lm_deluge-0.0.75.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
74
+ lm_deluge-0.0.75.dist-info/METADATA,sha256=rMPfj2uVgeUOix65rco3_i7qowOM3S51ZR-u-Sc6rec,13514
75
+ lm_deluge-0.0.75.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
76
+ lm_deluge-0.0.75.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
77
+ lm_deluge-0.0.75.dist-info/RECORD,,