lm-deluge 0.0.9__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

lm_deluge/client.py CHANGED
@@ -9,7 +9,7 @@ from typing import Sequence, overload, Literal, Any
9
9
  from tqdm.auto import tqdm
10
10
 
11
11
  from lm_deluge.prompt import Conversation
12
- from lm_deluge.tool import ToolSpec
12
+ from lm_deluge.tool import Tool
13
13
 
14
14
  from .tracker import StatusTracker
15
15
  from .sampling_params import SamplingParams
@@ -231,7 +231,7 @@ class LLMClient:
231
231
  show_progress: bool = ...,
232
232
  dry_run: Literal[True],
233
233
  verbose: bool = ...,
234
- tools: list[ToolSpec] | None = ...,
234
+ tools: list[Tool] | None = ...,
235
235
  ) -> dict[str, int]: ...
236
236
 
237
237
  @overload
@@ -243,7 +243,7 @@ class LLMClient:
243
243
  show_progress: bool = ...,
244
244
  dry_run: bool = ...,
245
245
  verbose: bool = ...,
246
- tools: list[ToolSpec] | None = ...,
246
+ tools: list[Tool] | None = ...,
247
247
  ) -> list[str | None]: ...
248
248
 
249
249
  @overload
@@ -255,7 +255,7 @@ class LLMClient:
255
255
  show_progress: bool = ...,
256
256
  dry_run: bool = ...,
257
257
  verbose: bool = ...,
258
- tools: list[ToolSpec] | None = ...,
258
+ tools: list[Tool] | None = ...,
259
259
  ) -> list[APIResponse | None]: ...
260
260
 
261
261
  async def process_prompts_async(
@@ -266,7 +266,7 @@ class LLMClient:
266
266
  show_progress: bool = True,
267
267
  dry_run: bool = False,
268
268
  verbose: bool = False,
269
- tools: list[ToolSpec] | None = None,
269
+ tools: list[Tool] | None = None,
270
270
  ) -> list[APIResponse | None] | list[str | None] | dict[str, int]:
271
271
  # if prompts are not Conversations, convert them.
272
272
  # can only handle strings for now
@@ -372,7 +372,7 @@ class LLMClient:
372
372
  show_progress=True,
373
373
  dry_run: bool = False,
374
374
  verbose: bool = False,
375
- tools: list[ToolSpec] | None = None,
375
+ tools: list[Tool] | None = None,
376
376
  ):
377
377
  return asyncio.run(
378
378
  self.process_prompts_async(
@@ -569,7 +569,7 @@ async def process_api_prompts_async(
569
569
  progress_bar: tqdm | None = None,
570
570
  use_qps: bool = False,
571
571
  verbose: bool = False,
572
- tools: list[ToolSpec] | None = None,
572
+ tools: list[Tool] | None = None,
573
573
  ):
574
574
  """Processes API requests in parallel, throttling to stay under rate limits."""
575
575
  # change ids to integer list
lm_deluge/models.py CHANGED
@@ -664,7 +664,7 @@ registry = {
664
664
  "output_cost": 15.0,
665
665
  "requests_per_minute": 4_000,
666
666
  "tokens_per_minute": 400_000,
667
- "reasoning_model": True,
667
+ "reasoning_model": False,
668
668
  },
669
669
  "claude-3.6-sonnet-bedrock": {
670
670
  "id": "claude-3.6-sonnet-bedrock",
@@ -677,10 +677,10 @@ registry = {
677
677
  "output_cost": 15.0,
678
678
  "requests_per_minute": 4_000,
679
679
  "tokens_per_minute": 400_000,
680
- "reasoning_model": True,
680
+ "reasoning_model": False,
681
681
  },
682
- "claude-sonnet-3.7-bedrock": {
683
- "id": "claude-sonnet-3.7-bedrock",
682
+ "claude-3.7-sonnet-bedrock": {
683
+ "id": "claude-3.7-sonnet-bedrock",
684
684
  "name": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
685
685
  "regions": ["us-east-1", "us-west-2", "eu-west-1"],
686
686
  "api_base": "",
@@ -692,8 +692,8 @@ registry = {
692
692
  "tokens_per_minute": 400_000,
693
693
  "reasoning_model": True,
694
694
  },
695
- "claude-sonnet-4-bedrock": {
696
- "id": "claude-sonnet-4-bedrock",
695
+ "claude-4-sonnet-bedrock": {
696
+ "id": "claude-4-sonnet-bedrock",
697
697
  "name": "us.anthropic.claude-sonnet-4-20250514-v1:0",
698
698
  "regions": ["us-east-1", "us-west-2", "us-east-2"],
699
699
  "api_base": "",
@@ -703,10 +703,10 @@ registry = {
703
703
  "output_cost": 15.0,
704
704
  "requests_per_minute": 4_000,
705
705
  "tokens_per_minute": 400_000,
706
- "reasoning_model": False,
706
+ "reasoning_model": True,
707
707
  },
708
- "claude-opus-4-bedrock": {
709
- "id": "claude-opus-4-bedrock",
708
+ "claude-4-opus-bedrock": {
709
+ "id": "claude-4-opus-bedrock",
710
710
  "name": "us.anthropic.claude-opus-4-20250514-v1:0",
711
711
  "regions": ["us-east-1", "us-west-2", "us-east-2"],
712
712
  "api_base": "",
lm_deluge/tool.py CHANGED
@@ -1,31 +1,214 @@
1
- from typing import Any, Literal, Callable
1
+ from typing import Any, Literal, Callable, Coroutine, get_type_hints
2
+ import inspect
3
+ import asyncio
4
+
5
+ from fastmcp import Client # pip install fastmcp >= 2.0
6
+ from mcp.types import Tool as MCPTool
2
7
  from pydantic import BaseModel, Field
3
8
 
4
9
 
5
- class ToolSpec(BaseModel):
10
+ async def _load_all_mcp_tools(client: Client) -> list["Tool"]:
11
+ metas: list[MCPTool] = await client.list_tools()
12
+
13
+ def make_runner(name: str):
14
+ async def _async_call(**kw):
15
+ async with client:
16
+ # maybe should be call_tool_mcp if don't want to raise error
17
+ return await client.call_tool(name, kw)
18
+
19
+ return _async_call
20
+
21
+ tools: list[Tool] = []
22
+ for m in metas:
23
+ tools.append(
24
+ Tool(
25
+ name=m.name,
26
+ description=m.description,
27
+ parameters=m.inputSchema.get("properties", {}),
28
+ required=m.inputSchema.get("required", []),
29
+ additionalProperties=m.inputSchema.get("additionalProperties"),
30
+ run=make_runner(m.name),
31
+ )
32
+ )
33
+ return tools
34
+
35
+
36
+ class Tool(BaseModel):
6
37
  """
7
38
  Provider‑agnostic tool definition with no extra nesting.
8
39
  """
9
40
 
10
41
  name: str
11
- description: str
42
+ description: str | None
12
43
  parameters: dict[str, Any]
13
44
  required: list[str] = Field(default_factory=list)
14
45
  additionalProperties: bool | None = None # only
15
46
  # if desired, can provide a callable to run the tool
16
47
  run: Callable | None = None
17
48
 
49
+ def _is_async(self) -> bool:
50
+ return inspect.iscoroutinefunction(self.run)
51
+
18
52
  def call(self, **kwargs):
19
53
  if self.run is None:
20
54
  raise ValueError("No run function provided")
21
- return self.run(**kwargs)
55
+
56
+ if self._is_async():
57
+ coro: Coroutine = self.run(**kwargs) # type: ignore[arg-type]
58
+ try:
59
+ loop = asyncio.get_running_loop()
60
+ except RuntimeError:
61
+ # no loop → safe to block
62
+ return asyncio.run(coro)
63
+ else:
64
+ # already inside a loop → schedule
65
+ return loop.create_task(coro)
66
+ else:
67
+ # plain function
68
+ return self.run(**kwargs)
69
+
70
+ async def acall(self, **kwargs):
71
+ if self.run is None:
72
+ raise ValueError("No run function provided")
73
+
74
+ if self._is_async():
75
+ return await self.run(**kwargs) # type: ignore[func-returns-value]
76
+ else:
77
+ loop = asyncio.get_running_loop()
78
+ assert self.run is not None, "can't run None"
79
+ return await loop.run_in_executor(None, lambda: self.run(**kwargs)) # type: ignore
80
+
81
+ @classmethod
82
+ def from_function(cls, func: Callable) -> "Tool":
83
+ """Create a Tool from a function using introspection."""
84
+ # Get function name
85
+ name = func.__name__
86
+
87
+ # Get docstring for description
88
+ description = func.__doc__ or f"Call the {name} function"
89
+ description = description.strip()
90
+
91
+ # Get function signature and type hints
92
+ sig = inspect.signature(func)
93
+ type_hints = get_type_hints(func)
94
+
95
+ # Build parameters and required list
96
+ parameters = {}
97
+ required = []
98
+
99
+ for param_name, param in sig.parameters.items():
100
+ # Skip *args and **kwargs
101
+ if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
102
+ continue
103
+
104
+ # Get type hint
105
+ param_type = type_hints.get(param_name, str)
106
+
107
+ # Convert Python types to JSON Schema types
108
+ json_type = cls._python_type_to_json_schema(param_type)
109
+
110
+ parameters[param_name] = json_type
111
+
112
+ # Add to required if no default value
113
+ if param.default is param.empty:
114
+ required.append(param_name)
115
+
116
+ return cls(
117
+ name=name,
118
+ description=description,
119
+ parameters=parameters,
120
+ required=required,
121
+ run=func,
122
+ )
123
+
124
+ @classmethod
125
+ async def from_mcp_config(
126
+ cls,
127
+ config: dict[str, Any],
128
+ *,
129
+ timeout: float | None = None,
130
+ ) -> list["Tool"]:
131
+ """
132
+ config: full Claude-Desktop-style dict *or* just its "mcpServers" block
133
+ Returns {server_key: [Tool, …], …}
134
+ """
135
+ # allow caller to pass either the whole desktop file or just the sub-dict
136
+ servers_block = config.get("mcpServers", config)
137
+
138
+ # FastMCP understands the whole config dict directly
139
+ client = Client({"mcpServers": servers_block}, timeout=timeout)
140
+ async with client:
141
+ all_tools = await _load_all_mcp_tools(client)
142
+
143
+ # bucket by prefix that FastMCP added (serverkey_toolname)
144
+ return all_tools
145
+
146
+ @classmethod
147
+ async def from_mcp(
148
+ cls,
149
+ server_name: str,
150
+ *,
151
+ tool_name: str,
152
+ timeout: float | None = None,
153
+ **server_spec, # url="…" OR command="…" args=[…]
154
+ ) -> Any: # Tool | list[Tool]
155
+ """
156
+ Thin wrapper for one server. Example uses:
157
+
158
+ Tool.from_mcp(url="https://weather.example.com/mcp")
159
+ Tool.from_mcp(command="python", args=["./assistant.py"], tool_name="answer_question")
160
+ """
161
+ # ensure at least one of command or url is defined
162
+ if not (server_spec.get("url") or server_spec.get("command")):
163
+ raise ValueError("most provide url or command")
164
+ # build a one-server desktop-style dict
165
+ cfg = {server_name: server_spec}
166
+ tools = await cls.from_mcp_config(cfg, timeout=timeout)
167
+ if tool_name is None:
168
+ return tools
169
+ for t in tools:
170
+ if t.name.endswith(f"{tool_name}"): # prefixed by FastMCP
171
+ return t
172
+ raise ValueError(f"Tool '{tool_name}' not found on that server")
173
+
174
+ @staticmethod
175
+ def _tool_from_meta(meta: dict[str, Any], runner) -> "Tool":
176
+ props = meta["inputSchema"].get("properties", {})
177
+ req = meta["inputSchema"].get("required", [])
178
+ addl = meta["inputSchema"].get("additionalProperties")
179
+ return Tool(
180
+ name=meta["name"],
181
+ description=meta.get("description", ""),
182
+ parameters=props,
183
+ required=req,
184
+ additionalProperties=addl,
185
+ run=runner,
186
+ )
187
+
188
+ @staticmethod
189
+ def _python_type_to_json_schema(python_type: type) -> dict[str, Any]:
190
+ """Convert Python type to JSON Schema type definition."""
191
+ if python_type is int:
192
+ return {"type": "integer"}
193
+ elif python_type is float:
194
+ return {"type": "number"}
195
+ elif python_type is str:
196
+ return {"type": "string"}
197
+ elif python_type is bool:
198
+ return {"type": "boolean"}
199
+ elif python_type is list:
200
+ return {"type": "array"}
201
+ elif python_type is dict:
202
+ return {"type": "object"}
203
+ else:
204
+ # Default to string for unknown types
205
+ return {"type": "string"}
22
206
 
23
207
  def _json_schema(self, include_additional_properties=False) -> dict[str, Any]:
24
208
  res = {
25
209
  "type": "object",
26
210
  "properties": self.parameters,
27
- # for openai all must be required
28
- "required": list(self.parameters.keys()),
211
+ "required": self.required, # Use the tool's actual required list
29
212
  }
30
213
  if include_additional_properties:
31
214
  res["additionalProperties"] = False
@@ -42,12 +225,22 @@ class ToolSpec(BaseModel):
42
225
  }
43
226
 
44
227
  def for_openai_completions(self, *, strict: bool = True) -> dict[str, Any]:
228
+ if strict:
229
+ # For strict mode, all parameters must be required and additionalProperties must be false
230
+ schema = self._json_schema(include_additional_properties=True)
231
+ schema["required"] = list(
232
+ self.parameters.keys()
233
+ ) # All parameters required in strict mode
234
+ else:
235
+ # For non-strict mode, use the original required list
236
+ schema = self._json_schema(include_additional_properties=True)
237
+
45
238
  return {
46
239
  "type": "function",
47
240
  "function": {
48
241
  "name": self.name,
49
242
  "description": self.description,
50
- "parameters": self._json_schema(include_additional_properties=True),
243
+ "parameters": schema,
51
244
  "strict": strict,
52
245
  },
53
246
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.9
3
+ Version: 0.0.10
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -21,6 +21,7 @@ Requires-Dist: bs4
21
21
  Requires-Dist: lxml
22
22
  Requires-Dist: pdf2image
23
23
  Requires-Dist: pillow
24
+ Requires-Dist: fastmcp>=2.4
24
25
  Requires-Dist: fasttext-wheel
25
26
  Requires-Dist: fasttext-langdetect
26
27
  Dynamic: license-file
@@ -32,6 +33,8 @@ Dynamic: license-file
32
33
  - **Unified client** – Send prompts to all relevant models with a single client.
33
34
  - **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
34
35
  - **Spray across models/providers** – Configure a client with multiple models from any provider(s), and sampling weights. The client samples a model for each request.
36
+ - **Tool Use** – Unified API for defining tools for all providers, and creating tools automatically from python functions.
37
+ - **MCP Support** – Instantiate a `Tool` from a local or remote MCP server so that any LLM can use it, whether or not that provider natively supports MCP.
35
38
  - **Caching** – Save completions in a local or distributed cache to avoid repeated LLM calls to process the same input.
36
39
  - **Convenient message constructor** – No more looking up how to build an Anthropic messages list with images. Our `Conversation` and `Message` classes work great with our client or with the `openai` and `anthropic` packages.
37
40
  - **Sync and async APIs** – Use the client from sync or async code.
@@ -1,15 +1,15 @@
1
1
  lm_deluge/__init__.py,sha256=rndOr4Rcfnpttz-onWU3vVEm-MM0WDFgz6KexKPAx0k,222
2
2
  lm_deluge/cache.py,sha256=VB1kv8rM2t5XWPR60uhszFcxLDnVKOe1oA5hYjVDjIo,4375
3
- lm_deluge/client.py,sha256=rk0YAUvC5kF3OuxnmYlkT8I6NjAmEJ6TUfryQbRkmXw,28960
3
+ lm_deluge/client.py,sha256=lGD4rqT7qHkTKddjRvKK_1bh7s8GNIzXzQ52GCZhfCg,28932
4
4
  lm_deluge/embed.py,sha256=m-X8UK4gV9KKD7Wv3yarAceMQaj7gR1JwzD_sB0MOQY,13183
5
5
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
6
6
  lm_deluge/gemini_limits.py,sha256=V9mpS9JtXYz7AY6OuKyQp5TuIMRH1BVv9YrSNmGmHNA,1569
7
7
  lm_deluge/image.py,sha256=hFbRajqEVQbkirAfOxsTPkeq-27Zl-so4AWBFeUbpBI,7161
8
- lm_deluge/models.py,sha256=qdUsHC1kz82ZwduUU_rBcvJP4j2JXCL3Q2RtbHWc1H8,44998
8
+ lm_deluge/models.py,sha256=6c_UZ3KlygpHpF0nq1_MRLtgOBdB1Q6FffLgm4ye_t0,44999
9
9
  lm_deluge/prompt.py,sha256=_pJYwgjL39lDzMNmae8pPIBoORm_ekSM_9qU2iGGpOc,25445
10
10
  lm_deluge/rerank.py,sha256=tW1c3gQCAqaF8Ez-r-4qxYAcdKqxnLMxwHApKOUKwk4,11289
11
11
  lm_deluge/sampling_params.py,sha256=E2kewh1vz-1Qcy5xNBCzihfGgT_GcHYMfzaWb3FLiXs,739
12
- lm_deluge/tool.py,sha256=zXletfGtpgBCXuqietZn-eaOItbIyOROskTbaSjfwEk,2701
12
+ lm_deluge/tool.py,sha256=3hlOTdm-RJMGHOU2tI_quJa2UNIrXPT8hxGb3mnheAg,9462
13
13
  lm_deluge/tracker.py,sha256=Dk99scN_NeDEO0gkLO5efXiZq11Ga-k6cerUHWN7IWY,1292
14
14
  lm_deluge/api_requests/__init__.py,sha256=_aSpD6CJL9g6OpLPoChXiHjl4MH_OlGcKgfZaW8cgLM,71
15
15
  lm_deluge/api_requests/anthropic.py,sha256=MMI_w9hVbevQpcqP3NVVindpTmLb2KHqjJQpIzCi5RM,7240
@@ -31,8 +31,8 @@ lm_deluge/util/json.py,sha256=dCeG9j1D17rXmQJbKJH79X0CGof4Wlqd55TDg4D6ky8,5388
31
31
  lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11768
32
32
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
33
33
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
34
- lm_deluge-0.0.9.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
35
- lm_deluge-0.0.9.dist-info/METADATA,sha256=hIv-9R30IJXuh6AHR0pZkktvsbSihThOuu9D9AniKIg,8076
36
- lm_deluge-0.0.9.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
37
- lm_deluge-0.0.9.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
38
- lm_deluge-0.0.9.dist-info/RECORD,,
34
+ lm_deluge-0.0.10.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
35
+ lm_deluge-0.0.10.dist-info/METADATA,sha256=hn8Arn1L8N9PDaPJzZtnDB9WMVZsm1Ur7suIq3jYvZs,8387
36
+ lm_deluge-0.0.10.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
37
+ lm_deluge-0.0.10.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
38
+ lm_deluge-0.0.10.dist-info/RECORD,,