lm-deluge 0.0.55__tar.gz → 0.0.57__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (80) hide show
  1. {lm_deluge-0.0.55/src/lm_deluge.egg-info → lm_deluge-0.0.57}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/__init__.py +2 -1
  4. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/client.py +12 -12
  5. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/prompt.py +6 -7
  6. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/tool.py +338 -18
  7. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/tracker.py +68 -1
  8. {lm_deluge-0.0.55 → lm_deluge-0.0.57/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  9. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge.egg-info/SOURCES.txt +0 -2
  10. lm_deluge-0.0.55/src/lm_deluge/agent.py +0 -0
  11. lm_deluge-0.0.55/src/lm_deluge/gemini_limits.py +0 -65
  12. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/LICENSE +0 -0
  13. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/README.md +0 -0
  14. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/setup.cfg +0 -0
  15. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/__init__.py +0 -0
  16. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/anthropic.py +0 -0
  17. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/base.py +0 -0
  18. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/bedrock.py +0 -0
  19. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/common.py +0 -0
  20. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  21. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  22. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  23. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  24. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  25. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/gemini.py +0 -0
  26. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/mistral.py +0 -0
  27. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/openai.py +0 -0
  28. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/api_requests/response.py +0 -0
  29. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/batches.py +0 -0
  30. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  31. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  32. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  33. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  34. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/built_in_tools/base.py +0 -0
  35. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/built_in_tools/openai.py +0 -0
  36. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/cache.py +0 -0
  37. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/cli.py +0 -0
  38. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/config.py +0 -0
  39. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/embed.py +0 -0
  40. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/errors.py +0 -0
  41. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/file.py +0 -0
  42. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/image.py +0 -0
  43. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/__init__.py +0 -0
  44. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/classify.py +0 -0
  45. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/extract.py +0 -0
  46. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/locate.py +0 -0
  47. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/ocr.py +0 -0
  48. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/score.py +0 -0
  49. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/llm_tools/translate.py +0 -0
  50. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/__init__.py +0 -0
  51. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/anthropic.py +0 -0
  52. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/bedrock.py +0 -0
  53. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/cerebras.py +0 -0
  54. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/cohere.py +0 -0
  55. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/deepseek.py +0 -0
  56. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/fireworks.py +0 -0
  57. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/google.py +0 -0
  58. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/grok.py +0 -0
  59. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/groq.py +0 -0
  60. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/meta.py +0 -0
  61. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/mistral.py +0 -0
  62. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/openai.py +0 -0
  63. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/openrouter.py +0 -0
  64. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/models/together.py +0 -0
  65. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/presets/cerebras.py +0 -0
  66. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/presets/meta.py +0 -0
  67. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/request_context.py +0 -0
  68. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/rerank.py +0 -0
  69. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/usage.py +0 -0
  70. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/util/harmony.py +0 -0
  71. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/util/json.py +0 -0
  72. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/util/logprobs.py +0 -0
  73. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/util/spatial.py +0 -0
  74. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/util/validation.py +0 -0
  75. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge/util/xml.py +0 -0
  76. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  77. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge.egg-info/requires.txt +0 -0
  78. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/src/lm_deluge.egg-info/top_level.txt +0 -0
  79. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/tests/test_builtin_tools.py +0 -0
  80. {lm_deluge-0.0.55 → lm_deluge-0.0.57}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.55
3
+ Version: 0.0.57
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.55"
6
+ version = "0.0.57"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -1,7 +1,7 @@
1
1
  from .client import APIResponse, LLMClient, SamplingParams
2
2
  from .file import File
3
3
  from .prompt import Conversation, Message
4
- from .tool import Tool
4
+ from .tool import Tool, ToolParams
5
5
 
6
6
  # dotenv.load_dotenv() - don't do this, fucks with other packages
7
7
 
@@ -12,5 +12,6 @@ __all__ = [
12
12
  "Conversation",
13
13
  "Message",
14
14
  "Tool",
15
+ "ToolParams",
15
16
  "File",
16
17
  ]
@@ -295,6 +295,7 @@ class _LLMClient(BaseModel):
295
295
  # Handle successful response
296
296
  if not response.is_error:
297
297
  context.status_tracker.task_succeeded(context.task_id)
298
+ context.status_tracker.track_usage(response)
298
299
  # Cache successful responses immediately
299
300
  if self.cache and response.completion:
300
301
  # print(f"DEBUG: Caching successful response")
@@ -333,6 +334,8 @@ class _LLMClient(BaseModel):
333
334
 
334
335
  # No retries left or no retry queue - final failure
335
336
  context.status_tracker.task_failed(context.task_id)
337
+ # Track usage even for failed requests if they made an API call
338
+ context.status_tracker.track_usage(response)
336
339
  context.maybe_callback(response, context.status_tracker)
337
340
 
338
341
  # Print final error message
@@ -615,23 +618,20 @@ class _LLMClient(BaseModel):
615
618
  mcp_tools = await tool.to_tools()
616
619
  expanded_tools.extend(mcp_tools)
617
620
 
618
- last_response: APIResponse | None = None
621
+ response: APIResponse | None = None
619
622
 
620
623
  for _ in range(max_rounds):
621
- responses = await self.process_prompts_async(
622
- [conversation],
624
+ response = await self.start(
625
+ conversation,
623
626
  tools=tools, # type: ignore
624
- return_completions_only=False,
625
- show_progress=show_progress,
626
627
  )
627
628
 
628
- last_response = responses[0]
629
- if last_response is None or last_response.content is None:
629
+ if response is None or response.content is None:
630
630
  break
631
631
 
632
- conversation = conversation.with_message(last_response.content)
632
+ conversation = conversation.with_message(response.content)
633
633
 
634
- tool_calls = last_response.content.tool_calls
634
+ tool_calls = response.content.tool_calls
635
635
  if not tool_calls:
636
636
  break
637
637
 
@@ -654,12 +654,12 @@ class _LLMClient(BaseModel):
654
654
  if not isinstance(result, (str, dict, list)):
655
655
  result = str(result)
656
656
 
657
- conversation.add_tool_result(call.id, result) # type: ignore
657
+ conversation.with_tool_result(call.id, result) # type: ignore
658
658
 
659
- if last_response is None:
659
+ if response is None:
660
660
  raise RuntimeError("model did not return a response")
661
661
 
662
- return conversation, last_response
662
+ return conversation, response
663
663
 
664
664
  def run_agent_loop_sync(
665
665
  self,
@@ -144,8 +144,8 @@ class ToolResult:
144
144
  def oa_chat(
145
145
  self,
146
146
  ) -> dict: # OpenAI Chat Completions - tool results are separate messages
147
- print("serializing toolresult with oa_chat...")
148
- print("typeof self.result:", type(self.result))
147
+ # print("serializing toolresult with oa_chat...")
148
+ # print("typeof self.result:", type(self.result))
149
149
  if isinstance(self.result, str):
150
150
  return {
151
151
  "role": "tool",
@@ -174,8 +174,7 @@ class ToolResult:
174
174
  raise ValueError("result type not supported")
175
175
 
176
176
  def oa_resp(self) -> dict: # OpenAI Responses
177
- print("serializing toolresult with oa_chat...")
178
- print("typeof self.result:", type(self.result))
177
+ # print("typeof self.result:", type(self.result))
179
178
  # if normal (not built-in just return the regular output
180
179
  if not self.built_in:
181
180
  result = (
@@ -466,7 +465,7 @@ class Message:
466
465
  self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
467
466
  return self
468
467
 
469
- def add_tool_result(
468
+ def with_tool_result(
470
469
  self, tool_call_id: str, result: str | list[ToolResultPart]
471
470
  ) -> "Message":
472
471
  """Append a tool result block and return self for chaining."""
@@ -1189,11 +1188,11 @@ class Conversation:
1189
1188
  """
1190
1189
  if self.messages and self.messages[-1].role == "tool":
1191
1190
  # Append to existing tool message (parallel tool calls)
1192
- self.messages[-1].add_tool_result(tool_call_id, result)
1191
+ self.messages[-1].with_tool_result(tool_call_id, result)
1193
1192
  else:
1194
1193
  # Create new tool message
1195
1194
  tool_msg = Message("tool", [])
1196
- tool_msg.add_tool_result(tool_call_id, result)
1195
+ tool_msg.with_tool_result(tool_call_id, result)
1197
1196
  self.messages.append(tool_msg)
1198
1197
  return self
1199
1198
 
@@ -1,7 +1,17 @@
1
1
  import asyncio
2
2
  import inspect
3
3
  from concurrent.futures import ThreadPoolExecutor
4
- from typing import Any, Callable, Coroutine, Literal, TypedDict, get_type_hints
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Coroutine,
8
+ Literal,
9
+ Type,
10
+ TypedDict,
11
+ get_args,
12
+ get_origin,
13
+ get_type_hints,
14
+ )
5
15
 
6
16
  from fastmcp import Client # pip install fastmcp >= 2.0
7
17
  from mcp.types import Tool as MCPTool
@@ -11,6 +21,196 @@ from lm_deluge.image import Image
11
21
  from lm_deluge.prompt import Text, ToolResultPart
12
22
 
13
23
 
24
+ def _python_type_to_json_schema_enhanced(python_type: Any) -> dict[str, Any]:
25
+ """
26
+ Convert Python type annotations to JSON Schema.
27
+ Handles: primitives, Optional, Literal, list[T], dict[str, T], Union.
28
+ """
29
+ # Get origin and args for generic types
30
+ origin = get_origin(python_type)
31
+ args = get_args(python_type)
32
+
33
+ # Handle Optional[T] or T | None
34
+ if origin is type(None) or python_type is type(None):
35
+ return {"type": "null"}
36
+
37
+ # Handle Union types (including Optional)
38
+ if origin is Literal:
39
+ # Literal["a", "b"] -> enum
40
+ return {"type": "string", "enum": list(args)}
41
+
42
+ # Handle list[T]
43
+ if origin is list:
44
+ if args:
45
+ items_schema = _python_type_to_json_schema_enhanced(args[0])
46
+ return {"type": "array", "items": items_schema}
47
+ return {"type": "array"}
48
+
49
+ # Handle dict[str, T]
50
+ if origin is dict:
51
+ if len(args) >= 2:
52
+ # For dict[str, T], we can set additionalProperties
53
+ value_schema = _python_type_to_json_schema_enhanced(args[1])
54
+ return {"type": "object", "additionalProperties": value_schema}
55
+ return {"type": "object"}
56
+
57
+ # Handle basic types
58
+ if python_type is int:
59
+ return {"type": "integer"}
60
+ elif python_type is float:
61
+ return {"type": "number"}
62
+ elif python_type is str:
63
+ return {"type": "string"}
64
+ elif python_type is bool:
65
+ return {"type": "boolean"}
66
+ elif python_type is list:
67
+ return {"type": "array"}
68
+ elif python_type is dict:
69
+ return {"type": "object"}
70
+ else:
71
+ # Default to string for unknown types
72
+ return {"type": "string"}
73
+
74
+
75
+ class ToolParams:
76
+ """
77
+ Helper class for constructing tool parameters more easily.
78
+
79
+ Usage:
80
+ # Simple constructor with Python types
81
+ params = ToolParams({"city": str, "age": int})
82
+
83
+ # With extras (description, enum, etc)
84
+ params = ToolParams({
85
+ "operation": (str, {"enum": ["add", "sub"], "description": "Math operation"}),
86
+ "value": (int, {"description": "The value"})
87
+ })
88
+
89
+ # From Pydantic model
90
+ params = ToolParams.from_pydantic(MyModel)
91
+
92
+ # From TypedDict
93
+ params = ToolParams.from_typed_dict(MyTypedDict)
94
+
95
+ # From existing JSON Schema
96
+ params = ToolParams.from_json_schema(schema_dict, required=["field1"])
97
+ """
98
+
99
+ def __init__(self, spec: dict[str, Any]):
100
+ """
101
+ Create ToolParams from a dict mapping parameter names to types or (type, extras) tuples.
102
+
103
+ Args:
104
+ spec: Dict where values can be:
105
+ - A Python type (str, int, list[str], etc.)
106
+ - A tuple of (type, extras_dict) for additional JSON Schema properties
107
+ - An already-formed JSON Schema dict (passed through as-is)
108
+ """
109
+ self.parameters: dict[str, Any] = {}
110
+ self.required: list[str] = []
111
+
112
+ for param_name, param_spec in spec.items():
113
+ # If it's a tuple, extract (type, extras)
114
+ if isinstance(param_spec, tuple):
115
+ param_type, extras = param_spec
116
+ schema = _python_type_to_json_schema_enhanced(param_type)
117
+ schema.update(extras)
118
+ self.parameters[param_name] = schema
119
+ # Mark as required unless explicitly marked as optional
120
+ if extras.get("optional") is not True:
121
+ self.required.append(param_name)
122
+ # If it's already a dict with "type" key, use as-is
123
+ elif isinstance(param_spec, dict) and "type" in param_spec:
124
+ self.parameters[param_name] = param_spec
125
+ # Assume required unless marked optional
126
+ if param_spec.get("optional") is not True:
127
+ self.required.append(param_name)
128
+ # Otherwise treat as a Python type
129
+ else:
130
+ self.parameters[param_name] = _python_type_to_json_schema_enhanced(
131
+ param_spec
132
+ )
133
+ self.required.append(param_name)
134
+
135
+ @classmethod
136
+ def from_pydantic(cls, model: Type[BaseModel]) -> "ToolParams":
137
+ """
138
+ Create ToolParams from a Pydantic model.
139
+
140
+ Args:
141
+ model: A Pydantic BaseModel class
142
+ """
143
+ # Get the JSON schema from Pydantic
144
+ schema = model.model_json_schema()
145
+ properties = schema.get("properties", {})
146
+ required = schema.get("required", [])
147
+
148
+ return cls.from_json_schema(properties, required)
149
+
150
+ @classmethod
151
+ def from_typed_dict(cls, typed_dict: Type) -> "ToolParams":
152
+ """
153
+ Create ToolParams from a TypedDict.
154
+
155
+ Args:
156
+ typed_dict: A TypedDict class
157
+ """
158
+ hints = get_type_hints(typed_dict)
159
+
160
+ # TypedDict doesn't have a built-in way to mark optional fields,
161
+ # but we can check for Optional in the type hints
162
+ params = {}
163
+ required = []
164
+
165
+ for field_name, field_type in hints.items():
166
+ # Check if it's Optional (Union with None)
167
+ origin = get_origin(field_type)
168
+ # args = get_args(field_type)
169
+
170
+ is_optional = False
171
+ actual_type = field_type
172
+
173
+ # Check for Union types (including Optional[T] which is Union[T, None])
174
+ if origin is type(None):
175
+ is_optional = True
176
+ actual_type = type(None)
177
+
178
+ # For now, treat all TypedDict fields as required unless they're explicitly Optional
179
+ schema = _python_type_to_json_schema_enhanced(actual_type)
180
+ params[field_name] = schema
181
+
182
+ if not is_optional:
183
+ required.append(field_name)
184
+
185
+ instance = cls.__new__(cls)
186
+ instance.parameters = params
187
+ instance.required = required
188
+ return instance
189
+
190
+ @classmethod
191
+ def from_json_schema(
192
+ cls, properties: dict[str, Any], required: list[str] | None = None
193
+ ) -> "ToolParams":
194
+ """
195
+ Create ToolParams from an existing JSON Schema properties dict.
196
+
197
+ Args:
198
+ properties: The "properties" section of a JSON Schema
199
+ required: List of required field names
200
+ """
201
+ instance = cls.__new__(cls)
202
+ instance.parameters = properties
203
+ instance.required = required or []
204
+ return instance
205
+
206
+ def to_dict(self) -> dict[str, Any]:
207
+ """
208
+ Convert to a dict with 'parameters' and 'required' keys.
209
+ Useful for unpacking into Tool constructor.
210
+ """
211
+ return {"parameters": self.parameters, "required": self.required}
212
+
213
+
14
214
  async def _load_all_mcp_tools(client: Client) -> list["Tool"]:
15
215
  metas: list[MCPTool] = await client.list_tools()
16
216
 
@@ -79,6 +279,24 @@ class Tool(BaseModel):
79
279
  )
80
280
  return v
81
281
 
282
+ @field_validator("parameters", mode="before")
283
+ @classmethod
284
+ def validate_parameters(cls, v: Any) -> dict[str, Any] | None:
285
+ """Accept ToolParams objects and convert to dict for backwards compatibility."""
286
+ if isinstance(v, ToolParams):
287
+ return v.parameters
288
+ return v
289
+
290
+ def model_post_init(self, __context: Any) -> None:
291
+ """
292
+ After validation, if parameters came from ToolParams, also update required list.
293
+ This is called by Pydantic after __init__.
294
+ """
295
+ # This is a bit tricky - we need to capture the required list from ToolParams
296
+ # Since Pydantic has already converted it in the validator, we can't access it here
297
+ # Instead, we'll handle this differently in the convenience constructors
298
+ pass
299
+
82
300
  def _is_async(self) -> bool:
83
301
  return inspect.iscoroutinefunction(self.run)
84
302
 
@@ -143,7 +361,7 @@ class Tool(BaseModel):
143
361
  param_type = type_hints.get(param_name, str)
144
362
 
145
363
  # Convert Python types to JSON Schema types
146
- json_type = cls._python_type_to_json_schema(param_type)
364
+ json_type = _python_type_to_json_schema_enhanced(param_type)
147
365
 
148
366
  parameters[param_name] = json_type
149
367
 
@@ -209,6 +427,119 @@ class Tool(BaseModel):
209
427
  return t
210
428
  raise ValueError(f"Tool '{tool_name}' not found on that server")
211
429
 
430
+ @classmethod
431
+ def from_params(
432
+ cls,
433
+ name: str,
434
+ params: ToolParams,
435
+ *,
436
+ description: str | None = None,
437
+ run: Callable | None = None,
438
+ **kwargs,
439
+ ) -> "Tool":
440
+ """
441
+ Create a Tool from a ToolParams object.
442
+
443
+ Args:
444
+ name: Tool name
445
+ params: ToolParams object defining the parameter schema
446
+ description: Optional description
447
+ run: Optional callable to execute the tool
448
+ **kwargs: Additional Tool arguments
449
+
450
+ Example:
451
+ params = ToolParams({"city": str, "age": int})
452
+ tool = Tool.from_params("get_user", params, run=my_function)
453
+ """
454
+ return cls(
455
+ name=name,
456
+ description=description,
457
+ parameters=params.parameters,
458
+ required=params.required,
459
+ run=run,
460
+ **kwargs,
461
+ )
462
+
463
+ @classmethod
464
+ def from_pydantic(
465
+ cls,
466
+ name: str,
467
+ model: Type[BaseModel],
468
+ *,
469
+ description: str | None = None,
470
+ run: Callable | None = None,
471
+ **kwargs,
472
+ ) -> "Tool":
473
+ """
474
+ Create a Tool from a Pydantic model.
475
+
476
+ Args:
477
+ name: Tool name
478
+ model: Pydantic BaseModel class
479
+ description: Optional description (defaults to model docstring)
480
+ run: Optional callable to execute the tool
481
+ **kwargs: Additional Tool arguments
482
+
483
+ Example:
484
+ class UserQuery(BaseModel):
485
+ city: str
486
+ age: int
487
+
488
+ tool = Tool.from_pydantic("get_user", UserQuery, run=my_function)
489
+ """
490
+ params = ToolParams.from_pydantic(model)
491
+
492
+ # Use model docstring as default description if not provided
493
+ if description is None and model.__doc__:
494
+ description = model.__doc__.strip()
495
+
496
+ return cls(
497
+ name=name,
498
+ description=description,
499
+ parameters=params.parameters,
500
+ required=params.required,
501
+ run=run,
502
+ **kwargs,
503
+ )
504
+
505
+ @classmethod
506
+ def from_typed_dict(
507
+ cls,
508
+ name: str,
509
+ typed_dict: Type,
510
+ *,
511
+ description: str | None = None,
512
+ run: Callable | None = None,
513
+ **kwargs,
514
+ ) -> "Tool":
515
+ """
516
+ Create a Tool from a TypedDict.
517
+
518
+ Args:
519
+ name: Tool name
520
+ typed_dict: TypedDict class
521
+ description: Optional description
522
+ run: Optional callable to execute the tool
523
+ **kwargs: Additional Tool arguments
524
+
525
+ Example:
526
+ class UserQuery(TypedDict):
527
+ city: str
528
+ age: int
529
+
530
+ tool = Tool.from_typed_dict("get_user", UserQuery, run=my_function)
531
+ """
532
+ params = ToolParams.from_typed_dict(typed_dict)
533
+
534
+ return cls(
535
+ name=name,
536
+ description=description,
537
+ parameters=params.parameters,
538
+ required=params.required,
539
+ run=run,
540
+ **kwargs,
541
+ )
542
+
212
543
  @staticmethod
213
544
  def _tool_from_meta(meta: dict[str, Any], runner) -> "Tool":
214
545
  props = meta["inputSchema"].get("properties", {})
@@ -225,22 +556,11 @@ class Tool(BaseModel):
225
556
 
226
557
  @staticmethod
227
558
  def _python_type_to_json_schema(python_type) -> dict[str, Any]:
228
- """Convert Python type to JSON Schema type definition."""
229
- if python_type is int:
230
- return {"type": "integer"}
231
- elif python_type is float:
232
- return {"type": "number"}
233
- elif python_type is str:
234
- return {"type": "string"}
235
- elif python_type is bool:
236
- return {"type": "boolean"}
237
- elif python_type is list:
238
- return {"type": "array"}
239
- elif python_type is dict:
240
- return {"type": "object"}
241
- else:
242
- # Default to string for unknown types
243
- return {"type": "string"}
559
+ """
560
+ Convert Python type to JSON Schema type definition.
561
+ Now delegates to enhanced version for better type support.
562
+ """
563
+ return _python_type_to_json_schema_enhanced(python_type)
244
564
 
245
565
  def _json_schema(
246
566
  self, include_additional_properties=False, remove_defaults=False
@@ -33,6 +33,13 @@ class StatusTracker:
33
33
  total_requests: int = 0
34
34
  retry_queue: asyncio.Queue = field(default_factory=asyncio.Queue)
35
35
 
36
+ # Cumulative usage tracking
37
+ total_cost: float = 0.0
38
+ total_input_tokens: int = 0 # non-cached input tokens
39
+ total_cache_read_tokens: int = 0
40
+ total_cache_write_tokens: int = 0
41
+ total_output_tokens: int = 0
42
+
36
43
  # Progress bar configuration
37
44
  use_progress_bar: bool = True
38
45
  progress_bar_total: int | None = None
@@ -131,6 +138,25 @@ class StatusTracker:
131
138
  self.num_tasks_in_progress -= 1
132
139
  self.num_tasks_failed += 1
133
140
 
141
+ def track_usage(self, response):
142
+ """Accumulate usage statistics from a completed request.
143
+
144
+ Args:
145
+ response: APIResponse object containing usage and cost information
146
+ """
147
+ if response.cost:
148
+ self.total_cost += response.cost
149
+
150
+ if response.usage:
151
+ self.total_output_tokens += response.usage.output_tokens
152
+ self.total_input_tokens += response.usage.input_tokens
153
+
154
+ if response.usage.cache_read_tokens:
155
+ self.total_cache_read_tokens += response.usage.cache_read_tokens
156
+
157
+ if response.usage.cache_write_tokens:
158
+ self.total_cache_write_tokens += response.usage.cache_write_tokens
159
+
134
160
  def log_final_status(self):
135
161
  # Close progress bar before printing final status
136
162
  self.close_progress_bar()
@@ -144,6 +170,26 @@ class StatusTracker:
144
170
  f"{self.num_rate_limit_errors} rate limit errors received. Consider running at a lower rate."
145
171
  )
146
172
 
173
+ # Display cumulative usage stats if available
174
+ if (
175
+ self.total_cost > 0
176
+ or self.total_input_tokens > 0
177
+ or self.total_output_tokens > 0
178
+ ):
179
+ usage_parts = []
180
+ if self.total_cost > 0:
181
+ usage_parts.append(f"💰 Cost: ${self.total_cost:.4f}")
182
+ if self.total_input_tokens > 0 or self.total_output_tokens > 0:
183
+ usage_parts.append(
184
+ f"🔡 Tokens: {self.total_input_tokens:,} in / {self.total_output_tokens:,} out"
185
+ )
186
+ if self.total_cache_read_tokens > 0:
187
+ usage_parts.append(f"Cache: {self.total_cache_read_tokens:,} read")
188
+ if self.total_cache_write_tokens > 0:
189
+ usage_parts.append(f"{self.total_cache_write_tokens:,} write")
190
+
191
+ print(" ", " • ".join(usage_parts))
192
+
147
193
  @property
148
194
  def pbar(self) -> tqdm | None:
149
195
  """Backward compatibility property to access progress bar."""
@@ -229,7 +275,28 @@ class StatusTracker:
229
275
  f" [gold3]Capacity:[/gold3] {tokens_info} • {reqs_info}"
230
276
  )
231
277
 
232
- display = Group(self._rich_progress, in_progress, capacity_text)
278
+ # Format usage stats
279
+ usage_parts = []
280
+ if self.total_cost > 0:
281
+ usage_parts.append(f"${self.total_cost:.4f}")
282
+ if self.total_input_tokens > 0 or self.total_output_tokens > 0:
283
+ input_k = self.total_input_tokens / 1000
284
+ output_k = self.total_output_tokens / 1000
285
+ usage_parts.append(f"{input_k:.1f}k in • {output_k:.1f}k out")
286
+ if self.total_cache_read_tokens > 0:
287
+ cache_k = self.total_cache_read_tokens / 1000
288
+ usage_parts.append(f"{cache_k:.1f}k cached")
289
+
290
+ usage_text = ""
291
+ if usage_parts:
292
+ usage_text = f" [gold3]Usage:[/gold3] {' • '.join(usage_parts)}"
293
+
294
+ if usage_text:
295
+ display = Group(
296
+ self._rich_progress, in_progress, capacity_text, usage_text
297
+ )
298
+ else:
299
+ display = Group(self._rich_progress, in_progress, capacity_text)
233
300
  live.update(display)
234
301
 
235
302
  await asyncio.sleep(0.1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.55
3
+ Version: 0.0.57
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -2,7 +2,6 @@ LICENSE
2
2
  README.md
3
3
  pyproject.toml
4
4
  src/lm_deluge/__init__.py
5
- src/lm_deluge/agent.py
6
5
  src/lm_deluge/batches.py
7
6
  src/lm_deluge/cache.py
8
7
  src/lm_deluge/cli.py
@@ -11,7 +10,6 @@ src/lm_deluge/config.py
11
10
  src/lm_deluge/embed.py
12
11
  src/lm_deluge/errors.py
13
12
  src/lm_deluge/file.py
14
- src/lm_deluge/gemini_limits.py
15
13
  src/lm_deluge/image.py
16
14
  src/lm_deluge/prompt.py
17
15
  src/lm_deluge/request_context.py
File without changes
@@ -1,65 +0,0 @@
1
- gemini_flash_limits = {
2
- "asia-east1": 2000,
3
- "asia-east2": 200,
4
- "asia-northeast1": 200,
5
- "asia-northeast3": 200,
6
- "asia-south1": 200,
7
- "asia-southeast1": 3_000,
8
- "australia-southeast1": 200,
9
- "europe-central2": 200,
10
- "europe-north1": 200,
11
- "europe-southwest1": 200,
12
- "europe-west1": 10_000,
13
- "europe-west2": 200,
14
- "europe-west3": 200,
15
- "europe-west4": 200,
16
- "europe-west6": 200,
17
- "europe-west8": 200,
18
- "europe-west9": 200,
19
- # 'me-central1': 200,
20
- "me-central2": 200,
21
- "me-west1": 200,
22
- "northamerica-northeast1": 200,
23
- "southamerica-east1": 200,
24
- "us-central1": 5_000,
25
- "us-east1": 3_000,
26
- "us-east4": 200,
27
- # 'us-east5': 200,
28
- "us-south1": 3_000,
29
- "us-west1": 5_000,
30
- "us-west4": 200,
31
- }
32
-
33
- # total: 7_520
34
- gemini_1_5_pro_limits = {
35
- "asia-east1": 500,
36
- "asia-east2": 500,
37
- "asia-northeast1": 500,
38
- # "asia-northeast2": 500,
39
- "asia-northeast3": 500,
40
- "asia-south1": 500,
41
- "asia-southeast1": 500,
42
- "australia-southeast1": 60,
43
- "europe-central2": 500,
44
- "europe-north1": 60,
45
- "europe-southwest1": 60,
46
- "europe-west1": 500,
47
- "europe-west2": 60,
48
- "europe-west3": 60,
49
- "europe-west4": 60,
50
- "europe-west6": 60,
51
- "europe-west8": 60,
52
- "europe-west9": 60,
53
- "me-central1": 60,
54
- "me-central2": 60,
55
- "me-west1": 60,
56
- "northamerica-northeast1": 60,
57
- "southamerica-east1": 500,
58
- "us-central1": 500,
59
- "us-east1": 500,
60
- "us-east4": 60,
61
- # "us-east5": 60,
62
- "us-south1": 60,
63
- "us-west1": 500,
64
- "us-west4": 60,
65
- }
File without changes
File without changes
File without changes