tooluniverse 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tooluniverse might be problematic. Click here for more details.
- tooluniverse/__init__.py +17 -5
- tooluniverse/agentic_tool.py +268 -330
- tooluniverse/compose_scripts/output_summarizer.py +21 -15
- tooluniverse/data/agentic_tools.json +2 -2
- tooluniverse/data/odphp_tools.json +354 -0
- tooluniverse/data/output_summarization_tools.json +2 -2
- tooluniverse/default_config.py +1 -0
- tooluniverse/llm_clients.py +570 -0
- tooluniverse/mcp_tool_registry.py +3 -3
- tooluniverse/odphp_tool.py +226 -0
- tooluniverse/output_hook.py +92 -3
- tooluniverse/remote/boltz/boltz_mcp_server.py +2 -2
- tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +2 -2
- tooluniverse/smcp.py +204 -112
- tooluniverse/smcp_server.py +23 -20
- tooluniverse/test/list_azure_openai_models.py +210 -0
- tooluniverse/test/test_agentic_tool_azure_models.py +91 -0
- tooluniverse/test/test_api_key_validation_min.py +64 -0
- tooluniverse/test/test_claude_sdk.py +86 -0
- tooluniverse/test/test_global_fallback.py +288 -0
- tooluniverse/test/test_hooks_direct.py +219 -0
- tooluniverse/test/test_odphp_tool.py +166 -0
- tooluniverse/test/test_openrouter_client.py +288 -0
- tooluniverse/test/test_stdio_hooks.py +285 -0
- tooluniverse/test/test_tool_finder.py +1 -1
- {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/METADATA +101 -74
- {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/RECORD +31 -19
- tooluniverse-1.0.5.dist-info/licenses/LICENSE +201 -0
- tooluniverse-1.0.3.dist-info/licenses/LICENSE +0 -21
- {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/WHEEL +0 -0
- {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/entry_points.txt +0 -0
- {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,570 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
import json as _json
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class BaseLLMClient:
|
|
9
|
+
def test_api(self) -> None:
|
|
10
|
+
raise NotImplementedError
|
|
11
|
+
|
|
12
|
+
def infer(
|
|
13
|
+
self,
|
|
14
|
+
messages: List[Dict[str, str]],
|
|
15
|
+
temperature: Optional[float],
|
|
16
|
+
max_tokens: Optional[int],
|
|
17
|
+
return_json: bool,
|
|
18
|
+
custom_format: Any = None,
|
|
19
|
+
max_retries: int = 5,
|
|
20
|
+
retry_delay: int = 5,
|
|
21
|
+
) -> Optional[str]:
|
|
22
|
+
raise NotImplementedError
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class AzureOpenAIClient(BaseLLMClient):
|
|
26
|
+
# Built-in defaults for model families (can be overridden by env)
|
|
27
|
+
DEFAULT_MODEL_LIMITS: Dict[str, Dict[str, int]] = {
|
|
28
|
+
# GPT-4.1 series
|
|
29
|
+
"gpt-4.1": {"max_output": 32768, "context_window": 1_047_576},
|
|
30
|
+
"gpt-4.1-mini": {"max_output": 32768, "context_window": 1_047_576},
|
|
31
|
+
"gpt-4.1-nano": {"max_output": 32768, "context_window": 1_047_576},
|
|
32
|
+
# GPT-4o series
|
|
33
|
+
"gpt-4o-1120": {"max_output": 16384, "context_window": 128_000},
|
|
34
|
+
"gpt-4o-0806": {"max_output": 16384, "context_window": 128_000},
|
|
35
|
+
"gpt-4o-mini-0718": {"max_output": 16384, "context_window": 128_000},
|
|
36
|
+
"gpt-4o": {"max_output": 16384, "context_window": 128_000}, # general prefix
|
|
37
|
+
# O-series
|
|
38
|
+
"o4-mini-0416": {"max_output": 100_000, "context_window": 200_000},
|
|
39
|
+
"o3-mini-0131": {"max_output": 100_000, "context_window": 200_000},
|
|
40
|
+
"o4-mini": {"max_output": 100_000, "context_window": 200_000},
|
|
41
|
+
"o3-mini": {"max_output": 100_000, "context_window": 200_000},
|
|
42
|
+
# Embeddings (for completeness)
|
|
43
|
+
"embedding-ada": {"max_output": 8192, "context_window": 8192},
|
|
44
|
+
"text-embedding-3-small": {"max_output": 8192, "context_window": 8192},
|
|
45
|
+
"text-embedding-3-large": {"max_output": 8192, "context_window": 8192},
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
def __init__(self, model_id: str, api_version: Optional[str], logger):
|
|
49
|
+
try:
|
|
50
|
+
from openai import AzureOpenAI as _AzureOpenAI # type: ignore
|
|
51
|
+
import openai as _openai # type: ignore
|
|
52
|
+
except Exception as e: # pragma: no cover
|
|
53
|
+
raise RuntimeError("openai AzureOpenAI client is not available") from e
|
|
54
|
+
self._AzureOpenAI = _AzureOpenAI
|
|
55
|
+
self._openai = _openai
|
|
56
|
+
|
|
57
|
+
self.model_name = model_id
|
|
58
|
+
self.logger = logger
|
|
59
|
+
|
|
60
|
+
resolved_version = api_version or self._resolve_api_version(model_id)
|
|
61
|
+
self.logger.debug(
|
|
62
|
+
f"Resolved Azure API version for {model_id}: {resolved_version}"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
|
66
|
+
if not api_key:
|
|
67
|
+
raise ValueError("AZURE_OPENAI_API_KEY not set")
|
|
68
|
+
endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "https://azure-ai.hms.edu")
|
|
69
|
+
self.client = self._AzureOpenAI(
|
|
70
|
+
azure_endpoint=endpoint, api_key=api_key, api_version=resolved_version
|
|
71
|
+
)
|
|
72
|
+
self.api_version = resolved_version
|
|
73
|
+
|
|
74
|
+
# Load env overrides for model limits (JSON dict of {prefix: {max_output, context_window}})
|
|
75
|
+
env_limits_raw = os.getenv("AZURE_DEFAULT_MODEL_LIMITS")
|
|
76
|
+
self._default_limits: Dict[str, Dict[str, int]] = (
|
|
77
|
+
self.DEFAULT_MODEL_LIMITS.copy()
|
|
78
|
+
)
|
|
79
|
+
if env_limits_raw:
|
|
80
|
+
try:
|
|
81
|
+
env_limits = _json.loads(env_limits_raw)
|
|
82
|
+
# shallow merge by keys
|
|
83
|
+
for k, v in env_limits.items():
|
|
84
|
+
if isinstance(v, dict):
|
|
85
|
+
base = self._default_limits.get(k, {}).copy()
|
|
86
|
+
base.update(
|
|
87
|
+
{
|
|
88
|
+
kk: int(vv)
|
|
89
|
+
for kk, vv in v.items()
|
|
90
|
+
if isinstance(vv, (int, float, str))
|
|
91
|
+
}
|
|
92
|
+
)
|
|
93
|
+
self._default_limits[k] = base
|
|
94
|
+
except Exception:
|
|
95
|
+
# ignore bad env format
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
# --------- helpers (Azure specific) ---------
|
|
99
|
+
def _resolve_api_version(self, model_id: str) -> str:
|
|
100
|
+
mapping_raw = os.getenv("AZURE_OPENAI_API_VERSION_BY_MODEL")
|
|
101
|
+
mapping: Dict[str, str] = {}
|
|
102
|
+
if mapping_raw:
|
|
103
|
+
try:
|
|
104
|
+
mapping = _json.loads(mapping_raw)
|
|
105
|
+
except Exception:
|
|
106
|
+
mapping = {}
|
|
107
|
+
if model_id in mapping:
|
|
108
|
+
return mapping[model_id]
|
|
109
|
+
for k, v in mapping.items():
|
|
110
|
+
try:
|
|
111
|
+
if model_id.startswith(k):
|
|
112
|
+
return v
|
|
113
|
+
except Exception:
|
|
114
|
+
continue
|
|
115
|
+
try:
|
|
116
|
+
if model_id.startswith("o3-mini") or model_id.startswith("o4-mini"):
|
|
117
|
+
return "2024-12-01-preview"
|
|
118
|
+
except Exception:
|
|
119
|
+
pass
|
|
120
|
+
return os.getenv("AZURE_OPENAI_API_VERSION", "2024-12-01-preview")
|
|
121
|
+
|
|
122
|
+
def _resolve_default_max_tokens(self, model_id: str) -> Optional[int]:
|
|
123
|
+
# Highest priority: explicit env per-model tokens mapping
|
|
124
|
+
mapping_raw = os.getenv("AZURE_MAX_TOKENS_BY_MODEL")
|
|
125
|
+
mapping: Dict[str, Any] = {}
|
|
126
|
+
if mapping_raw:
|
|
127
|
+
try:
|
|
128
|
+
mapping = _json.loads(mapping_raw)
|
|
129
|
+
except Exception:
|
|
130
|
+
mapping = {}
|
|
131
|
+
if model_id in mapping:
|
|
132
|
+
try:
|
|
133
|
+
return int(mapping[model_id])
|
|
134
|
+
except Exception:
|
|
135
|
+
pass
|
|
136
|
+
for k, v in mapping.items():
|
|
137
|
+
try:
|
|
138
|
+
if model_id.startswith(k):
|
|
139
|
+
return int(v)
|
|
140
|
+
except Exception:
|
|
141
|
+
continue
|
|
142
|
+
# Next: built-in/default-limits map (with env merged)
|
|
143
|
+
if model_id in self._default_limits:
|
|
144
|
+
return int(self._default_limits[model_id].get("max_output", 0)) or None
|
|
145
|
+
for k, v in self._default_limits.items():
|
|
146
|
+
try:
|
|
147
|
+
if model_id.startswith(k):
|
|
148
|
+
return int(v.get("max_output", 0)) or None
|
|
149
|
+
except Exception:
|
|
150
|
+
continue
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
def _normalize_temperature(
|
|
154
|
+
self, model_id: str, temperature: Optional[float]
|
|
155
|
+
) -> Optional[float]:
|
|
156
|
+
if isinstance(model_id, str) and (
|
|
157
|
+
model_id.startswith("o3-mini") or model_id.startswith("o4-mini")
|
|
158
|
+
):
|
|
159
|
+
if temperature is not None:
|
|
160
|
+
self.logger.warning(
|
|
161
|
+
f"Model {model_id} does not support 'temperature'; ignoring provided value."
|
|
162
|
+
)
|
|
163
|
+
return None
|
|
164
|
+
return temperature
|
|
165
|
+
|
|
166
|
+
# --------- public API ---------
|
|
167
|
+
def test_api(self) -> None:
|
|
168
|
+
test_messages = [{"role": "user", "content": "ping"}]
|
|
169
|
+
token_attempts = [1, 4, 16, 32]
|
|
170
|
+
last_error: Optional[Exception] = None
|
|
171
|
+
for tok in token_attempts:
|
|
172
|
+
try:
|
|
173
|
+
try:
|
|
174
|
+
self.client.chat.completions.create(
|
|
175
|
+
model=self.model_name,
|
|
176
|
+
messages=test_messages,
|
|
177
|
+
max_tokens=tok,
|
|
178
|
+
temperature=0,
|
|
179
|
+
)
|
|
180
|
+
return
|
|
181
|
+
except self._openai.BadRequestError: # type: ignore[attr-defined]
|
|
182
|
+
self.client.chat.completions.create(
|
|
183
|
+
model=self.model_name,
|
|
184
|
+
messages=test_messages,
|
|
185
|
+
max_completion_tokens=tok,
|
|
186
|
+
)
|
|
187
|
+
return
|
|
188
|
+
except Exception as e: # noqa: BLE001
|
|
189
|
+
last_error = e
|
|
190
|
+
msg = str(e).lower()
|
|
191
|
+
if (
|
|
192
|
+
"max_tokens" in msg
|
|
193
|
+
or "model output limit" in msg
|
|
194
|
+
or "finish the message" in msg
|
|
195
|
+
) and tok != token_attempts[-1]:
|
|
196
|
+
continue
|
|
197
|
+
break
|
|
198
|
+
if last_error:
|
|
199
|
+
raise ValueError(f"ChatGPT API test failed: {last_error}")
|
|
200
|
+
raise ValueError("ChatGPT API test failed: unknown error")
|
|
201
|
+
|
|
202
|
+
def infer(
|
|
203
|
+
self,
|
|
204
|
+
messages: List[Dict[str, str]],
|
|
205
|
+
temperature: Optional[float],
|
|
206
|
+
max_tokens: Optional[int],
|
|
207
|
+
return_json: bool,
|
|
208
|
+
custom_format: Any = None,
|
|
209
|
+
max_retries: int = 5,
|
|
210
|
+
retry_delay: int = 5,
|
|
211
|
+
) -> Optional[str]:
|
|
212
|
+
retries = 0
|
|
213
|
+
call_fn = (
|
|
214
|
+
self.client.chat.completions.parse
|
|
215
|
+
if custom_format is not None
|
|
216
|
+
else self.client.chat.completions.create
|
|
217
|
+
)
|
|
218
|
+
response_format = (
|
|
219
|
+
custom_format
|
|
220
|
+
if custom_format is not None
|
|
221
|
+
else ({"type": "json_object"} if return_json else None)
|
|
222
|
+
)
|
|
223
|
+
eff_temp = self._normalize_temperature(self.model_name, temperature)
|
|
224
|
+
eff_max = (
|
|
225
|
+
max_tokens
|
|
226
|
+
if max_tokens is not None
|
|
227
|
+
else self._resolve_default_max_tokens(self.model_name)
|
|
228
|
+
)
|
|
229
|
+
while retries < max_retries:
|
|
230
|
+
try:
|
|
231
|
+
kwargs: Dict[str, Any] = {
|
|
232
|
+
"model": self.model_name,
|
|
233
|
+
"messages": messages,
|
|
234
|
+
}
|
|
235
|
+
if response_format is not None:
|
|
236
|
+
kwargs["response_format"] = response_format
|
|
237
|
+
if eff_temp is not None:
|
|
238
|
+
kwargs["temperature"] = eff_temp
|
|
239
|
+
try:
|
|
240
|
+
if eff_max is not None:
|
|
241
|
+
resp = call_fn(max_tokens=eff_max, **kwargs)
|
|
242
|
+
else:
|
|
243
|
+
resp = call_fn(**kwargs)
|
|
244
|
+
except self._openai.BadRequestError as be: # type: ignore[attr-defined]
|
|
245
|
+
if eff_max is not None:
|
|
246
|
+
resp = call_fn(max_completion_tokens=eff_max, **kwargs)
|
|
247
|
+
else:
|
|
248
|
+
be_msg = str(be).lower()
|
|
249
|
+
fallback_limits = [
|
|
250
|
+
8192,
|
|
251
|
+
4096,
|
|
252
|
+
2048,
|
|
253
|
+
1024,
|
|
254
|
+
512,
|
|
255
|
+
256,
|
|
256
|
+
128,
|
|
257
|
+
64,
|
|
258
|
+
32,
|
|
259
|
+
]
|
|
260
|
+
if any(
|
|
261
|
+
k in be_msg
|
|
262
|
+
for k in [
|
|
263
|
+
"max_tokens",
|
|
264
|
+
"output limit",
|
|
265
|
+
"finish the message",
|
|
266
|
+
"max_completion_tokens",
|
|
267
|
+
]
|
|
268
|
+
):
|
|
269
|
+
last_exc: Optional[Exception] = be
|
|
270
|
+
for lim in fallback_limits:
|
|
271
|
+
try:
|
|
272
|
+
try:
|
|
273
|
+
resp = call_fn(
|
|
274
|
+
max_completion_tokens=lim, **kwargs
|
|
275
|
+
)
|
|
276
|
+
last_exc = None
|
|
277
|
+
break
|
|
278
|
+
except Exception as inner_e: # noqa: BLE001
|
|
279
|
+
last_exc = inner_e
|
|
280
|
+
resp = call_fn(max_tokens=lim, **kwargs)
|
|
281
|
+
last_exc = None
|
|
282
|
+
break
|
|
283
|
+
except Exception as inner2: # noqa: BLE001
|
|
284
|
+
last_exc = inner2
|
|
285
|
+
continue
|
|
286
|
+
if last_exc is not None:
|
|
287
|
+
raise last_exc
|
|
288
|
+
else:
|
|
289
|
+
raise be
|
|
290
|
+
if custom_format is not None:
|
|
291
|
+
return resp.choices[0].message.parsed.model_dump()
|
|
292
|
+
return resp.choices[0].message.content
|
|
293
|
+
except self._openai.RateLimitError: # type: ignore[attr-defined]
|
|
294
|
+
self.logger.warning(
|
|
295
|
+
f"Rate limit exceeded. Retrying in {retry_delay} seconds..."
|
|
296
|
+
)
|
|
297
|
+
retries += 1
|
|
298
|
+
time.sleep(retry_delay * retries)
|
|
299
|
+
except Exception as e: # noqa: BLE001
|
|
300
|
+
self.logger.error(f"An error occurred: {e}")
|
|
301
|
+
import traceback
|
|
302
|
+
|
|
303
|
+
traceback.print_exc()
|
|
304
|
+
break
|
|
305
|
+
self.logger.error("Max retries exceeded. Unable to complete the request.")
|
|
306
|
+
return None
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
class GeminiClient(BaseLLMClient):
|
|
310
|
+
def __init__(self, model_name: str, logger):
|
|
311
|
+
try:
|
|
312
|
+
import google.generativeai as genai # type: ignore
|
|
313
|
+
except Exception as e: # pragma: no cover
|
|
314
|
+
raise RuntimeError("google.generativeai not available") from e
|
|
315
|
+
api_key = os.getenv("GEMINI_API_KEY")
|
|
316
|
+
if not api_key:
|
|
317
|
+
raise ValueError("GEMINI_API_KEY not found")
|
|
318
|
+
self._genai = genai
|
|
319
|
+
self._genai.configure(api_key=api_key)
|
|
320
|
+
self.model_name = model_name
|
|
321
|
+
self.logger = logger
|
|
322
|
+
|
|
323
|
+
def _build_model(self):
|
|
324
|
+
return self._genai.GenerativeModel(self.model_name)
|
|
325
|
+
|
|
326
|
+
def test_api(self) -> None:
|
|
327
|
+
model = self._build_model()
|
|
328
|
+
model.generate_content(
|
|
329
|
+
"ping",
|
|
330
|
+
generation_config={
|
|
331
|
+
"max_output_tokens": 8,
|
|
332
|
+
"temperature": 0,
|
|
333
|
+
},
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
def infer(
|
|
337
|
+
self,
|
|
338
|
+
messages: List[Dict[str, str]],
|
|
339
|
+
temperature: Optional[float],
|
|
340
|
+
max_tokens: Optional[int],
|
|
341
|
+
return_json: bool,
|
|
342
|
+
custom_format: Any = None,
|
|
343
|
+
max_retries: int = 5,
|
|
344
|
+
retry_delay: int = 5,
|
|
345
|
+
) -> Optional[str]:
|
|
346
|
+
if return_json:
|
|
347
|
+
raise ValueError("Gemini JSON mode not supported here")
|
|
348
|
+
contents = ""
|
|
349
|
+
for m in messages:
|
|
350
|
+
if m["role"] in ("user", "system"):
|
|
351
|
+
contents += f"{m['content']}\n"
|
|
352
|
+
retries = 0
|
|
353
|
+
while retries < max_retries:
|
|
354
|
+
try:
|
|
355
|
+
gen_cfg: Dict[str, Any] = {
|
|
356
|
+
"temperature": (temperature if temperature is not None else 0)
|
|
357
|
+
}
|
|
358
|
+
if max_tokens is not None:
|
|
359
|
+
gen_cfg["max_output_tokens"] = max_tokens
|
|
360
|
+
model = self._build_model()
|
|
361
|
+
resp = model.generate_content(contents, generation_config=gen_cfg)
|
|
362
|
+
return getattr(resp, "text", None) or getattr(resp, "candidates", [{}])[
|
|
363
|
+
0
|
|
364
|
+
].get("content")
|
|
365
|
+
except Exception as e: # noqa: BLE001
|
|
366
|
+
self.logger.error(f"Gemini error: {e}")
|
|
367
|
+
retries += 1
|
|
368
|
+
time.sleep(retry_delay * retries)
|
|
369
|
+
return None
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
class OpenRouterClient(BaseLLMClient):
|
|
373
|
+
"""
|
|
374
|
+
OpenRouter client using OpenAI SDK with custom base URL.
|
|
375
|
+
Supports models from OpenAI, Anthropic, Google, Qwen, and many other providers.
|
|
376
|
+
"""
|
|
377
|
+
|
|
378
|
+
# Default model limits based on latest OpenRouter offerings
|
|
379
|
+
DEFAULT_MODEL_LIMITS: Dict[str, Dict[str, int]] = {
|
|
380
|
+
"openai/gpt-5": {"max_output": 128_000, "context_window": 400_000},
|
|
381
|
+
"openai/gpt-5-codex": {"max_output": 128_000, "context_window": 400_000},
|
|
382
|
+
"google/gemini-2.5-flash": {"max_output": 65_536, "context_window": 1_000_000},
|
|
383
|
+
"google/gemini-2.5-pro": {"max_output": 65_536, "context_window": 1_000_000},
|
|
384
|
+
"anthropic/claude-sonnet-4.5": {"max_output": 16_384, "context_window": 1_000_000},
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
def __init__(self, model_id: str, logger):
|
|
388
|
+
try:
|
|
389
|
+
from openai import OpenAI as _OpenAI # type: ignore
|
|
390
|
+
import openai as _openai # type: ignore
|
|
391
|
+
except Exception as e: # pragma: no cover
|
|
392
|
+
raise RuntimeError("openai client is not available") from e
|
|
393
|
+
|
|
394
|
+
self._OpenAI = _OpenAI
|
|
395
|
+
self._openai = _openai
|
|
396
|
+
self.model_name = model_id
|
|
397
|
+
self.logger = logger
|
|
398
|
+
|
|
399
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
400
|
+
if not api_key:
|
|
401
|
+
raise ValueError("OPENROUTER_API_KEY not set")
|
|
402
|
+
|
|
403
|
+
# Optional headers for OpenRouter
|
|
404
|
+
default_headers = {}
|
|
405
|
+
if site_url := os.getenv("OPENROUTER_SITE_URL"):
|
|
406
|
+
default_headers["HTTP-Referer"] = site_url
|
|
407
|
+
if site_name := os.getenv("OPENROUTER_SITE_NAME"):
|
|
408
|
+
default_headers["X-Title"] = site_name
|
|
409
|
+
|
|
410
|
+
self.client = self._OpenAI(
|
|
411
|
+
base_url="https://openrouter.ai/api/v1",
|
|
412
|
+
api_key=api_key,
|
|
413
|
+
default_headers=default_headers if default_headers else None,
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Load env overrides for model limits
|
|
417
|
+
env_limits_raw = os.getenv("OPENROUTER_DEFAULT_MODEL_LIMITS")
|
|
418
|
+
self._default_limits: Dict[str, Dict[str, int]] = (
|
|
419
|
+
self.DEFAULT_MODEL_LIMITS.copy()
|
|
420
|
+
)
|
|
421
|
+
if env_limits_raw:
|
|
422
|
+
try:
|
|
423
|
+
env_limits = _json.loads(env_limits_raw)
|
|
424
|
+
for k, v in env_limits.items():
|
|
425
|
+
if isinstance(v, dict):
|
|
426
|
+
base = self._default_limits.get(k, {}).copy()
|
|
427
|
+
base.update(
|
|
428
|
+
{
|
|
429
|
+
kk: int(vv)
|
|
430
|
+
for kk, vv in v.items()
|
|
431
|
+
if isinstance(vv, (int, float, str))
|
|
432
|
+
}
|
|
433
|
+
)
|
|
434
|
+
self._default_limits[k] = base
|
|
435
|
+
except Exception:
|
|
436
|
+
pass
|
|
437
|
+
|
|
438
|
+
def _resolve_default_max_tokens(self, model_id: str) -> Optional[int]:
|
|
439
|
+
"""Resolve default max tokens for a model."""
|
|
440
|
+
# Highest priority: explicit env per-model tokens mapping
|
|
441
|
+
mapping_raw = os.getenv("OPENROUTER_MAX_TOKENS_BY_MODEL")
|
|
442
|
+
mapping: Dict[str, Any] = {}
|
|
443
|
+
if mapping_raw:
|
|
444
|
+
try:
|
|
445
|
+
mapping = _json.loads(mapping_raw)
|
|
446
|
+
except Exception:
|
|
447
|
+
mapping = {}
|
|
448
|
+
|
|
449
|
+
if model_id in mapping:
|
|
450
|
+
try:
|
|
451
|
+
return int(mapping[model_id])
|
|
452
|
+
except Exception:
|
|
453
|
+
pass
|
|
454
|
+
|
|
455
|
+
# Check for prefix match
|
|
456
|
+
for k, v in mapping.items():
|
|
457
|
+
try:
|
|
458
|
+
if model_id.startswith(k):
|
|
459
|
+
return int(v)
|
|
460
|
+
except Exception:
|
|
461
|
+
continue
|
|
462
|
+
|
|
463
|
+
# Next: built-in/default-limits map
|
|
464
|
+
if model_id in self._default_limits:
|
|
465
|
+
return int(self._default_limits[model_id].get("max_output", 0)) or None
|
|
466
|
+
|
|
467
|
+
# Check for prefix match in default limits
|
|
468
|
+
for k, v in self._default_limits.items():
|
|
469
|
+
try:
|
|
470
|
+
if model_id.startswith(k):
|
|
471
|
+
return int(v.get("max_output", 0)) or None
|
|
472
|
+
except Exception:
|
|
473
|
+
continue
|
|
474
|
+
|
|
475
|
+
return None
|
|
476
|
+
|
|
477
|
+
def test_api(self) -> None:
|
|
478
|
+
"""Test API connectivity with minimal token usage."""
|
|
479
|
+
test_messages = [{"role": "user", "content": "ping"}]
|
|
480
|
+
token_attempts = [1, 4, 16, 32]
|
|
481
|
+
last_error: Optional[Exception] = None
|
|
482
|
+
|
|
483
|
+
for tok in token_attempts:
|
|
484
|
+
try:
|
|
485
|
+
self.client.chat.completions.create(
|
|
486
|
+
model=self.model_name,
|
|
487
|
+
messages=test_messages,
|
|
488
|
+
max_tokens=tok,
|
|
489
|
+
temperature=0,
|
|
490
|
+
)
|
|
491
|
+
return
|
|
492
|
+
except Exception as e: # noqa: BLE001
|
|
493
|
+
last_error = e
|
|
494
|
+
msg = str(e).lower()
|
|
495
|
+
if (
|
|
496
|
+
"max_tokens" in msg
|
|
497
|
+
or "model output limit" in msg
|
|
498
|
+
or "finish the message" in msg
|
|
499
|
+
) and tok != token_attempts[-1]:
|
|
500
|
+
continue
|
|
501
|
+
break
|
|
502
|
+
|
|
503
|
+
if last_error:
|
|
504
|
+
raise ValueError(f"OpenRouter API test failed: {last_error}")
|
|
505
|
+
raise ValueError("OpenRouter API test failed: unknown error")
|
|
506
|
+
|
|
507
|
+
def infer(
|
|
508
|
+
self,
|
|
509
|
+
messages: List[Dict[str, str]],
|
|
510
|
+
temperature: Optional[float],
|
|
511
|
+
max_tokens: Optional[int],
|
|
512
|
+
return_json: bool,
|
|
513
|
+
custom_format: Any = None,
|
|
514
|
+
max_retries: int = 5,
|
|
515
|
+
retry_delay: int = 5,
|
|
516
|
+
) -> Optional[str]:
|
|
517
|
+
"""Execute inference using OpenRouter."""
|
|
518
|
+
retries = 0
|
|
519
|
+
call_fn = (
|
|
520
|
+
self.client.chat.completions.parse
|
|
521
|
+
if custom_format is not None
|
|
522
|
+
else self.client.chat.completions.create
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
response_format = (
|
|
526
|
+
custom_format
|
|
527
|
+
if custom_format is not None
|
|
528
|
+
else ({"type": "json_object"} if return_json else None)
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
eff_max = (
|
|
532
|
+
max_tokens
|
|
533
|
+
if max_tokens is not None
|
|
534
|
+
else self._resolve_default_max_tokens(self.model_name)
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
while retries < max_retries:
|
|
538
|
+
try:
|
|
539
|
+
kwargs: Dict[str, Any] = {
|
|
540
|
+
"model": self.model_name,
|
|
541
|
+
"messages": messages,
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
if response_format is not None:
|
|
545
|
+
kwargs["response_format"] = response_format
|
|
546
|
+
if temperature is not None:
|
|
547
|
+
kwargs["temperature"] = temperature
|
|
548
|
+
if eff_max is not None:
|
|
549
|
+
kwargs["max_tokens"] = eff_max
|
|
550
|
+
|
|
551
|
+
resp = call_fn(**kwargs)
|
|
552
|
+
|
|
553
|
+
if custom_format is not None:
|
|
554
|
+
return resp.choices[0].message.parsed.model_dump()
|
|
555
|
+
return resp.choices[0].message.content
|
|
556
|
+
|
|
557
|
+
except self._openai.RateLimitError: # type: ignore[attr-defined]
|
|
558
|
+
self.logger.warning(
|
|
559
|
+
f"Rate limit exceeded. Retrying in {retry_delay} seconds..."
|
|
560
|
+
)
|
|
561
|
+
retries += 1
|
|
562
|
+
time.sleep(retry_delay * retries)
|
|
563
|
+
except Exception as e: # noqa: BLE001
|
|
564
|
+
self.logger.error(f"OpenRouter error: {e}")
|
|
565
|
+
import traceback
|
|
566
|
+
traceback.print_exc()
|
|
567
|
+
break
|
|
568
|
+
|
|
569
|
+
self.logger.error("Max retries exceeded. Unable to complete the request.")
|
|
570
|
+
return None
|
|
@@ -327,13 +327,12 @@ def _start_server_for_port(port: int, **kwargs):
|
|
|
327
327
|
|
|
328
328
|
print(f"🚀 Starting MCP server on port {port} with {len(tools)} tools...")
|
|
329
329
|
|
|
330
|
-
# Create SMCP server
|
|
330
|
+
# Create SMCP server for compatibility
|
|
331
331
|
server = _get_smcp()(
|
|
332
332
|
name=config["server_name"],
|
|
333
333
|
auto_expose_tools=False, # We'll add tools manually
|
|
334
334
|
search_enabled=True,
|
|
335
335
|
max_workers=config.get("max_workers", 5),
|
|
336
|
-
stateless_http=True, # Enable stateless mode for MCPAutoLoaderTool compatibility
|
|
337
336
|
**kwargs,
|
|
338
337
|
)
|
|
339
338
|
|
|
@@ -347,8 +346,9 @@ def _start_server_for_port(port: int, **kwargs):
|
|
|
347
346
|
# Start server in background thread
|
|
348
347
|
def run_server():
|
|
349
348
|
try:
|
|
349
|
+
# Enable stateless mode for MCPAutoLoaderTool compatibility
|
|
350
350
|
server.run_simple(
|
|
351
|
-
transport=config["transport"], host=config["host"], port=port
|
|
351
|
+
transport=config["transport"], host=config["host"], port=port, stateless_http=True
|
|
352
352
|
)
|
|
353
353
|
except Exception as e:
|
|
354
354
|
print(f"❌ Error running MCP server on port {port}: {e}")
|