mojentic 1.0.1__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _examples/audit_openai_capabilities.py +561 -0
- mojentic/llm/gateways/openai_model_registry.py +141 -40
- mojentic/llm/gateways/openai_model_registry_spec.py +173 -6
- mojentic/llm/gateways/openai_temperature_handling_spec.py +22 -20
- {mojentic-1.0.1.dist-info → mojentic-1.1.1.dist-info}/METADATA +11 -9
- {mojentic-1.0.1.dist-info → mojentic-1.1.1.dist-info}/RECORD +9 -8
- {mojentic-1.0.1.dist-info → mojentic-1.1.1.dist-info}/WHEEL +0 -0
- {mojentic-1.0.1.dist-info → mojentic-1.1.1.dist-info}/licenses/LICENSE.md +0 -0
- {mojentic-1.0.1.dist-info → mojentic-1.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,561 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Audit script that probes OpenAI models for their actual capabilities
|
|
3
|
+
and compares against our hardcoded model registry.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
OPENAI_API_KEY=sk-... python src/_examples/audit_openai_capabilities.py
|
|
7
|
+
OPENAI_API_KEY=sk-... python src/_examples/audit_openai_capabilities.py --cheap
|
|
8
|
+
|
|
9
|
+
The --cheap flag skips expensive model families and infers capabilities
|
|
10
|
+
from their -mini variants instead.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import base64
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import sys
|
|
17
|
+
import time
|
|
18
|
+
from datetime import datetime, timezone
|
|
19
|
+
from typing import Optional
|
|
20
|
+
|
|
21
|
+
from openai import OpenAI, BadRequestError, APIError, RateLimitError
|
|
22
|
+
|
|
23
|
+
from mojentic.llm.gateways.openai_model_registry import (
|
|
24
|
+
OpenAIModelRegistry, ModelType
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Models that use different API endpoints (not chat-compatible)
|
|
28
|
+
SKIP_PREFIXES = [
|
|
29
|
+
"tts-", "whisper-", "dall-e-", "text-moderation-",
|
|
30
|
+
"davinci-", "babbage-", "canary-",
|
|
31
|
+
"codex-", "computer-",
|
|
32
|
+
]
|
|
33
|
+
SKIP_CONTAINS = [
|
|
34
|
+
"-realtime-", "-transcribe", "-tts",
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
# Expensive model families to skip in --cheap mode
|
|
38
|
+
EXPENSIVE_FAMILIES = [
|
|
39
|
+
"o1-pro", "o3-pro", "o3-deep-research", "o4-mini-deep-research",
|
|
40
|
+
"gpt-5-codex",
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
# 1x1 white PNG for vision testing
|
|
44
|
+
TINY_PNG_B64 = (
|
|
45
|
+
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4"
|
|
46
|
+
"nGP4z8BQDwAEgAF/pooBPQAAAABJRU5ErkJggg=="
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
MINIMAL_TOOL = {
|
|
50
|
+
"type": "function",
|
|
51
|
+
"function": {
|
|
52
|
+
"name": "get_weather",
|
|
53
|
+
"description": "Get weather for a city",
|
|
54
|
+
"parameters": {
|
|
55
|
+
"type": "object",
|
|
56
|
+
"properties": {
|
|
57
|
+
"city": {"type": "string", "description": "City name"}
|
|
58
|
+
},
|
|
59
|
+
"required": ["city"]
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def should_skip_model(model_id: str) -> bool:
|
|
66
|
+
"""Check if a model should be skipped (non-chat endpoint)."""
|
|
67
|
+
model_lower = model_id.lower()
|
|
68
|
+
for prefix in SKIP_PREFIXES:
|
|
69
|
+
if model_lower.startswith(prefix):
|
|
70
|
+
return True
|
|
71
|
+
for pattern in SKIP_CONTAINS:
|
|
72
|
+
if pattern in model_lower:
|
|
73
|
+
return True
|
|
74
|
+
return False
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def is_chat_model_candidate(model_id: str) -> bool:
|
|
78
|
+
"""Check if a model is a candidate for chat API probing."""
|
|
79
|
+
model_lower = model_id.lower()
|
|
80
|
+
chat_patterns = [
|
|
81
|
+
"gpt-3.5", "gpt-4", "gpt-5",
|
|
82
|
+
"o1", "o3", "o4",
|
|
83
|
+
"chatgpt",
|
|
84
|
+
]
|
|
85
|
+
return any(p in model_lower for p in chat_patterns)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def is_embedding_model(model_id: str) -> bool:
|
|
89
|
+
"""Check if a model is an embedding model."""
|
|
90
|
+
return "embedding" in model_id.lower()
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def is_expensive(model_id: str) -> bool:
|
|
94
|
+
"""Check if a model is in an expensive family."""
|
|
95
|
+
model_lower = model_id.lower()
|
|
96
|
+
return any(family in model_lower for family in EXPENSIVE_FAMILIES)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def rate_limited_call(func, *args, **kwargs):
|
|
100
|
+
"""Call a function with rate limit handling and backoff."""
|
|
101
|
+
max_retries = 3
|
|
102
|
+
delay = 1.0
|
|
103
|
+
for attempt in range(max_retries):
|
|
104
|
+
try:
|
|
105
|
+
return func(*args, **kwargs)
|
|
106
|
+
except RateLimitError:
|
|
107
|
+
if attempt < max_retries - 1:
|
|
108
|
+
print(f" Rate limited, waiting {delay}s...")
|
|
109
|
+
time.sleep(delay)
|
|
110
|
+
delay *= 2
|
|
111
|
+
else:
|
|
112
|
+
raise
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def probe_basic_chat(client: OpenAI, model_id: str) -> dict:
|
|
116
|
+
"""Test basic chat completion and determine token parameter name."""
|
|
117
|
+
result = {"works": False, "uses_max_tokens": None, "error": None}
|
|
118
|
+
|
|
119
|
+
# Try with max_tokens first (standard chat models)
|
|
120
|
+
try:
|
|
121
|
+
response = rate_limited_call(
|
|
122
|
+
client.chat.completions.create,
|
|
123
|
+
model=model_id,
|
|
124
|
+
messages=[{"role": "user", "content": "Say hi"}],
|
|
125
|
+
max_tokens=10,
|
|
126
|
+
)
|
|
127
|
+
result["works"] = True
|
|
128
|
+
result["uses_max_tokens"] = True
|
|
129
|
+
return result
|
|
130
|
+
except BadRequestError as e:
|
|
131
|
+
error_msg = str(e).lower()
|
|
132
|
+
if "max_completion_tokens" in error_msg:
|
|
133
|
+
# Reasoning model - retry with max_completion_tokens
|
|
134
|
+
try:
|
|
135
|
+
response = rate_limited_call(
|
|
136
|
+
client.chat.completions.create,
|
|
137
|
+
model=model_id,
|
|
138
|
+
messages=[{"role": "user", "content": "Say hi"}],
|
|
139
|
+
max_completion_tokens=10,
|
|
140
|
+
)
|
|
141
|
+
result["works"] = True
|
|
142
|
+
result["uses_max_tokens"] = False
|
|
143
|
+
return result
|
|
144
|
+
except (BadRequestError, APIError) as e2:
|
|
145
|
+
result["error"] = str(e2)
|
|
146
|
+
return result
|
|
147
|
+
else:
|
|
148
|
+
result["error"] = str(e)
|
|
149
|
+
return result
|
|
150
|
+
except APIError as e:
|
|
151
|
+
result["error"] = str(e)
|
|
152
|
+
return result
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def probe_tool_calling(client: OpenAI, model_id: str, uses_max_tokens: bool) -> dict:
|
|
156
|
+
"""Test if a model supports tool calling."""
|
|
157
|
+
result = {"supports_tools": False, "error": None}
|
|
158
|
+
|
|
159
|
+
token_kwargs = {}
|
|
160
|
+
if uses_max_tokens:
|
|
161
|
+
token_kwargs["max_tokens"] = 10
|
|
162
|
+
else:
|
|
163
|
+
token_kwargs["max_completion_tokens"] = 100
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
response = rate_limited_call(
|
|
167
|
+
client.chat.completions.create,
|
|
168
|
+
model=model_id,
|
|
169
|
+
messages=[{"role": "user", "content": "What is the weather in London?"}],
|
|
170
|
+
tools=[MINIMAL_TOOL],
|
|
171
|
+
**token_kwargs,
|
|
172
|
+
)
|
|
173
|
+
result["supports_tools"] = True
|
|
174
|
+
return result
|
|
175
|
+
except BadRequestError as e:
|
|
176
|
+
error_msg = str(e).lower()
|
|
177
|
+
if "tool" in error_msg or "function" in error_msg:
|
|
178
|
+
result["supports_tools"] = False
|
|
179
|
+
else:
|
|
180
|
+
result["error"] = str(e)
|
|
181
|
+
result["supports_tools"] = False
|
|
182
|
+
return result
|
|
183
|
+
except APIError as e:
|
|
184
|
+
result["error"] = str(e)
|
|
185
|
+
return result
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def probe_streaming(client: OpenAI, model_id: str, uses_max_tokens: bool) -> dict:
|
|
189
|
+
"""Test if a model supports streaming."""
|
|
190
|
+
result = {"supports_streaming": False, "error": None}
|
|
191
|
+
|
|
192
|
+
token_kwargs = {}
|
|
193
|
+
if uses_max_tokens:
|
|
194
|
+
token_kwargs["max_tokens"] = 10
|
|
195
|
+
else:
|
|
196
|
+
token_kwargs["max_completion_tokens"] = 50
|
|
197
|
+
|
|
198
|
+
try:
|
|
199
|
+
stream = rate_limited_call(
|
|
200
|
+
client.chat.completions.create,
|
|
201
|
+
model=model_id,
|
|
202
|
+
messages=[{"role": "user", "content": "Say hi"}],
|
|
203
|
+
stream=True,
|
|
204
|
+
**token_kwargs,
|
|
205
|
+
)
|
|
206
|
+
# Consume the stream to verify it works
|
|
207
|
+
for chunk in stream:
|
|
208
|
+
pass
|
|
209
|
+
result["supports_streaming"] = True
|
|
210
|
+
return result
|
|
211
|
+
except BadRequestError as e:
|
|
212
|
+
error_msg = str(e).lower()
|
|
213
|
+
if "stream" in error_msg:
|
|
214
|
+
result["supports_streaming"] = False
|
|
215
|
+
else:
|
|
216
|
+
result["error"] = str(e)
|
|
217
|
+
result["supports_streaming"] = False
|
|
218
|
+
return result
|
|
219
|
+
except APIError as e:
|
|
220
|
+
result["error"] = str(e)
|
|
221
|
+
return result
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def probe_vision(client: OpenAI, model_id: str, uses_max_tokens: bool) -> dict:
|
|
225
|
+
"""Test if a model supports vision (image input)."""
|
|
226
|
+
result = {"supports_vision": False, "error": None}
|
|
227
|
+
|
|
228
|
+
token_kwargs = {}
|
|
229
|
+
if uses_max_tokens:
|
|
230
|
+
token_kwargs["max_tokens"] = 10
|
|
231
|
+
else:
|
|
232
|
+
token_kwargs["max_completion_tokens"] = 50
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
response = rate_limited_call(
|
|
236
|
+
client.chat.completions.create,
|
|
237
|
+
model=model_id,
|
|
238
|
+
messages=[{
|
|
239
|
+
"role": "user",
|
|
240
|
+
"content": [
|
|
241
|
+
{"type": "text", "text": "Describe this image in one word."},
|
|
242
|
+
{
|
|
243
|
+
"type": "image_url",
|
|
244
|
+
"image_url": {
|
|
245
|
+
"url": f"data:image/png;base64,{TINY_PNG_B64}",
|
|
246
|
+
"detail": "low"
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
]
|
|
250
|
+
}],
|
|
251
|
+
**token_kwargs,
|
|
252
|
+
)
|
|
253
|
+
result["supports_vision"] = True
|
|
254
|
+
return result
|
|
255
|
+
except BadRequestError as e:
|
|
256
|
+
error_msg = str(e).lower()
|
|
257
|
+
if "image" in error_msg or "vision" in error_msg or "content" in error_msg:
|
|
258
|
+
result["supports_vision"] = False
|
|
259
|
+
else:
|
|
260
|
+
result["error"] = str(e)
|
|
261
|
+
result["supports_vision"] = False
|
|
262
|
+
return result
|
|
263
|
+
except APIError as e:
|
|
264
|
+
result["error"] = str(e)
|
|
265
|
+
return result
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def probe_temperature(client: OpenAI, model_id: str, uses_max_tokens: bool) -> dict:
|
|
269
|
+
"""Test which temperature values a model supports."""
|
|
270
|
+
result = {"supported_temperatures": None, "error": None}
|
|
271
|
+
test_temps = [0.0, 0.5, 1.0]
|
|
272
|
+
supported = []
|
|
273
|
+
|
|
274
|
+
token_kwargs = {}
|
|
275
|
+
if uses_max_tokens:
|
|
276
|
+
token_kwargs["max_tokens"] = 5
|
|
277
|
+
else:
|
|
278
|
+
token_kwargs["max_completion_tokens"] = 20
|
|
279
|
+
|
|
280
|
+
for temp in test_temps:
|
|
281
|
+
try:
|
|
282
|
+
response = rate_limited_call(
|
|
283
|
+
client.chat.completions.create,
|
|
284
|
+
model=model_id,
|
|
285
|
+
messages=[{"role": "user", "content": "Say ok"}],
|
|
286
|
+
temperature=temp,
|
|
287
|
+
**token_kwargs,
|
|
288
|
+
)
|
|
289
|
+
supported.append(temp)
|
|
290
|
+
except BadRequestError as e:
|
|
291
|
+
error_msg = str(e).lower()
|
|
292
|
+
if "temperature" in error_msg:
|
|
293
|
+
pass # This temperature not supported
|
|
294
|
+
else:
|
|
295
|
+
result["error"] = str(e)
|
|
296
|
+
break
|
|
297
|
+
except APIError as e:
|
|
298
|
+
result["error"] = str(e)
|
|
299
|
+
break
|
|
300
|
+
time.sleep(0.3)
|
|
301
|
+
|
|
302
|
+
if len(supported) == len(test_temps):
|
|
303
|
+
result["supported_temperatures"] = None # All supported
|
|
304
|
+
elif len(supported) == 0:
|
|
305
|
+
result["supported_temperatures"] = [] # None supported
|
|
306
|
+
else:
|
|
307
|
+
result["supported_temperatures"] = supported
|
|
308
|
+
|
|
309
|
+
return result
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def probe_embedding(client: OpenAI, model_id: str) -> dict:
|
|
313
|
+
"""Test if a model works as an embedding model."""
|
|
314
|
+
result = {"is_embedding": False, "error": None}
|
|
315
|
+
try:
|
|
316
|
+
response = rate_limited_call(
|
|
317
|
+
client.embeddings.create,
|
|
318
|
+
model=model_id,
|
|
319
|
+
input="test",
|
|
320
|
+
)
|
|
321
|
+
result["is_embedding"] = True
|
|
322
|
+
return result
|
|
323
|
+
except (BadRequestError, APIError) as e:
|
|
324
|
+
result["error"] = str(e)
|
|
325
|
+
return result
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def probe_model(client: OpenAI, model_id: str, cheap_mode: bool = False) -> Optional[dict]:
|
|
329
|
+
"""Run all capability probes against a single model."""
|
|
330
|
+
if should_skip_model(model_id):
|
|
331
|
+
return None
|
|
332
|
+
|
|
333
|
+
if cheap_mode and is_expensive(model_id):
|
|
334
|
+
print(f" Skipping {model_id} (expensive, --cheap mode)")
|
|
335
|
+
return None
|
|
336
|
+
|
|
337
|
+
# Handle embedding models separately
|
|
338
|
+
if is_embedding_model(model_id):
|
|
339
|
+
print(f" Probing {model_id} (embedding)...")
|
|
340
|
+
embed_result = probe_embedding(client, model_id)
|
|
341
|
+
return {
|
|
342
|
+
"model_type": "embedding" if embed_result["is_embedding"] else "unknown",
|
|
343
|
+
"supports_tools": False,
|
|
344
|
+
"supports_streaming": False,
|
|
345
|
+
"supports_vision": False,
|
|
346
|
+
"supported_temperatures": None,
|
|
347
|
+
"uses_max_tokens": None,
|
|
348
|
+
"errors": [embed_result["error"]] if embed_result["error"] else []
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
if not is_chat_model_candidate(model_id):
|
|
352
|
+
return None
|
|
353
|
+
|
|
354
|
+
print(f" Probing {model_id}...")
|
|
355
|
+
|
|
356
|
+
# Test 1: Basic chat
|
|
357
|
+
basic = probe_basic_chat(client, model_id)
|
|
358
|
+
if not basic["works"]:
|
|
359
|
+
print(f" Basic chat failed: {basic['error']}")
|
|
360
|
+
return {
|
|
361
|
+
"model_type": "unknown",
|
|
362
|
+
"supports_tools": False,
|
|
363
|
+
"supports_streaming": False,
|
|
364
|
+
"supports_vision": False,
|
|
365
|
+
"supported_temperatures": None,
|
|
366
|
+
"uses_max_tokens": None,
|
|
367
|
+
"errors": [basic["error"]]
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
uses_max_tokens = basic["uses_max_tokens"]
|
|
371
|
+
model_type = "chat" if uses_max_tokens else "reasoning"
|
|
372
|
+
time.sleep(0.5)
|
|
373
|
+
|
|
374
|
+
# Test 2: Tool calling
|
|
375
|
+
tools_result = probe_tool_calling(client, model_id, uses_max_tokens)
|
|
376
|
+
time.sleep(0.5)
|
|
377
|
+
|
|
378
|
+
# Test 3: Streaming
|
|
379
|
+
stream_result = probe_streaming(client, model_id, uses_max_tokens)
|
|
380
|
+
time.sleep(0.5)
|
|
381
|
+
|
|
382
|
+
# Test 4: Vision
|
|
383
|
+
vision_result = probe_vision(client, model_id, uses_max_tokens)
|
|
384
|
+
time.sleep(0.5)
|
|
385
|
+
|
|
386
|
+
# Test 5: Temperature
|
|
387
|
+
temp_result = probe_temperature(client, model_id, uses_max_tokens)
|
|
388
|
+
|
|
389
|
+
errors = [r["error"] for r in [tools_result, stream_result, vision_result, temp_result]
|
|
390
|
+
if r.get("error")]
|
|
391
|
+
|
|
392
|
+
return {
|
|
393
|
+
"model_type": model_type,
|
|
394
|
+
"supports_tools": tools_result["supports_tools"],
|
|
395
|
+
"supports_streaming": stream_result["supports_streaming"],
|
|
396
|
+
"supports_vision": vision_result["supports_vision"],
|
|
397
|
+
"supported_temperatures": temp_result["supported_temperatures"],
|
|
398
|
+
"uses_max_tokens": uses_max_tokens,
|
|
399
|
+
"errors": errors if errors else []
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def compare_with_registry(probed_models: dict, registry: OpenAIModelRegistry) -> dict:
|
|
404
|
+
"""Compare probed results with current registry."""
|
|
405
|
+
registered_models = set(registry.get_registered_models())
|
|
406
|
+
probed_model_names = set(probed_models.keys())
|
|
407
|
+
|
|
408
|
+
# Find new models (in API but not registry)
|
|
409
|
+
new_models = sorted(probed_model_names - registered_models)
|
|
410
|
+
|
|
411
|
+
# Find removed models (in registry but not in any API model)
|
|
412
|
+
removed_models = sorted(registered_models - probed_model_names)
|
|
413
|
+
|
|
414
|
+
# Find capability changes for models in both sets
|
|
415
|
+
capability_changes = {}
|
|
416
|
+
for model_name in sorted(probed_model_names & registered_models):
|
|
417
|
+
probed = probed_models[model_name]
|
|
418
|
+
registered_caps = registry.get_model_capabilities(model_name)
|
|
419
|
+
|
|
420
|
+
changes = {}
|
|
421
|
+
|
|
422
|
+
# Compare model type
|
|
423
|
+
reg_type = registered_caps.model_type.value
|
|
424
|
+
if probed["model_type"] != reg_type and probed["model_type"] != "unknown":
|
|
425
|
+
changes["model_type"] = {"was": reg_type, "now": probed["model_type"]}
|
|
426
|
+
|
|
427
|
+
# Compare tools support
|
|
428
|
+
if probed["supports_tools"] != registered_caps.supports_tools:
|
|
429
|
+
changes["supports_tools"] = {
|
|
430
|
+
"was": registered_caps.supports_tools,
|
|
431
|
+
"now": probed["supports_tools"]
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
# Compare streaming support
|
|
435
|
+
if probed["supports_streaming"] != registered_caps.supports_streaming:
|
|
436
|
+
changes["supports_streaming"] = {
|
|
437
|
+
"was": registered_caps.supports_streaming,
|
|
438
|
+
"now": probed["supports_streaming"]
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
# Compare vision support
|
|
442
|
+
if probed["supports_vision"] != registered_caps.supports_vision:
|
|
443
|
+
changes["supports_vision"] = {
|
|
444
|
+
"was": registered_caps.supports_vision,
|
|
445
|
+
"now": probed["supports_vision"]
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
# Compare temperature support
|
|
449
|
+
reg_temps = registered_caps.supported_temperatures
|
|
450
|
+
probed_temps = probed["supported_temperatures"]
|
|
451
|
+
if reg_temps != probed_temps:
|
|
452
|
+
changes["supported_temperatures"] = {
|
|
453
|
+
"was": reg_temps,
|
|
454
|
+
"now": probed_temps
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
if changes:
|
|
458
|
+
capability_changes[model_name] = changes
|
|
459
|
+
|
|
460
|
+
return {
|
|
461
|
+
"new_models": new_models,
|
|
462
|
+
"removed_models": removed_models,
|
|
463
|
+
"capability_changes": capability_changes
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
def main():
|
|
468
|
+
cheap_mode = "--cheap" in sys.argv
|
|
469
|
+
|
|
470
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
471
|
+
if not api_key:
|
|
472
|
+
print("ERROR: OPENAI_API_KEY environment variable not set")
|
|
473
|
+
sys.exit(1)
|
|
474
|
+
|
|
475
|
+
client = OpenAI(api_key=api_key)
|
|
476
|
+
|
|
477
|
+
print("Fetching available OpenAI models...")
|
|
478
|
+
all_models = sorted([m.id for m in client.models.list()])
|
|
479
|
+
print(f"Found {len(all_models)} models total")
|
|
480
|
+
|
|
481
|
+
# Separate into categories
|
|
482
|
+
models_to_probe = []
|
|
483
|
+
models_skipped = []
|
|
484
|
+
|
|
485
|
+
for model_id in all_models:
|
|
486
|
+
if should_skip_model(model_id):
|
|
487
|
+
models_skipped.append(model_id)
|
|
488
|
+
elif is_chat_model_candidate(model_id) or is_embedding_model(model_id):
|
|
489
|
+
models_to_probe.append(model_id)
|
|
490
|
+
else:
|
|
491
|
+
models_skipped.append(model_id)
|
|
492
|
+
|
|
493
|
+
print(f"\nWill probe {len(models_to_probe)} models, skipping {len(models_skipped)}")
|
|
494
|
+
if cheap_mode:
|
|
495
|
+
print("Running in --cheap mode (skipping expensive model families)")
|
|
496
|
+
|
|
497
|
+
# Probe each model
|
|
498
|
+
probed_results = {}
|
|
499
|
+
for model_id in models_to_probe:
|
|
500
|
+
result = probe_model(client, model_id, cheap_mode)
|
|
501
|
+
if result is not None:
|
|
502
|
+
probed_results[model_id] = result
|
|
503
|
+
time.sleep(0.5) # Rate limit between models
|
|
504
|
+
|
|
505
|
+
# Compare with registry
|
|
506
|
+
print("\nComparing with current registry...")
|
|
507
|
+
registry = OpenAIModelRegistry()
|
|
508
|
+
comparison = compare_with_registry(probed_results, registry)
|
|
509
|
+
|
|
510
|
+
# Build report
|
|
511
|
+
report = {
|
|
512
|
+
"audit_date": datetime.now(timezone.utc).isoformat(),
|
|
513
|
+
"cheap_mode": cheap_mode,
|
|
514
|
+
"api_models_available": all_models,
|
|
515
|
+
"models_skipped": models_skipped,
|
|
516
|
+
"models_probed": probed_results,
|
|
517
|
+
"comparison": comparison
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
# Write report
|
|
521
|
+
report_path = os.path.join(
|
|
522
|
+
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
|
|
523
|
+
"openai_model_audit_report.json"
|
|
524
|
+
)
|
|
525
|
+
with open(report_path, "w") as f:
|
|
526
|
+
json.dump(report, f, indent=2, default=str)
|
|
527
|
+
|
|
528
|
+
print(f"\nReport written to: {report_path}")
|
|
529
|
+
|
|
530
|
+
# Print summary
|
|
531
|
+
print("\n=== AUDIT SUMMARY ===")
|
|
532
|
+
print(f"Models in API: {len(all_models)}")
|
|
533
|
+
print(f"Models probed: {len(probed_results)}")
|
|
534
|
+
print(f"Models skipped: {len(models_skipped)}")
|
|
535
|
+
|
|
536
|
+
if comparison["new_models"]:
|
|
537
|
+
print(f"\nNew models (not in registry): {len(comparison['new_models'])}")
|
|
538
|
+
for m in comparison["new_models"]:
|
|
539
|
+
caps = probed_results.get(m, {})
|
|
540
|
+
print(f" + {m} (type={caps.get('model_type', '?')}, "
|
|
541
|
+
f"tools={caps.get('supports_tools', '?')}, "
|
|
542
|
+
f"stream={caps.get('supports_streaming', '?')})")
|
|
543
|
+
|
|
544
|
+
if comparison["removed_models"]:
|
|
545
|
+
print(f"\nRemoved models (in registry, not in API): {len(comparison['removed_models'])}")
|
|
546
|
+
for m in comparison["removed_models"]:
|
|
547
|
+
print(f" - {m}")
|
|
548
|
+
|
|
549
|
+
if comparison["capability_changes"]:
|
|
550
|
+
print(f"\nCapability changes: {len(comparison['capability_changes'])}")
|
|
551
|
+
for model, changes in comparison["capability_changes"].items():
|
|
552
|
+
print(f" ~ {model}:")
|
|
553
|
+
for field, diff in changes.items():
|
|
554
|
+
print(f" {field}: {diff['was']} -> {diff['now']}")
|
|
555
|
+
|
|
556
|
+
if not comparison["new_models"] and not comparison["removed_models"] and not comparison["capability_changes"]:
|
|
557
|
+
print("\nNo discrepancies found - registry is up to date!")
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
if __name__ == "__main__":
|
|
561
|
+
main()
|
|
@@ -35,6 +35,9 @@ class ModelCapabilities:
|
|
|
35
35
|
max_context_tokens: Optional[int] = None
|
|
36
36
|
max_output_tokens: Optional[int] = None
|
|
37
37
|
supported_temperatures: Optional[List[float]] = None # None means all temperatures supported
|
|
38
|
+
supports_chat_api: bool = True
|
|
39
|
+
supports_completions_api: bool = False
|
|
40
|
+
supports_responses_api: bool = False
|
|
38
41
|
|
|
39
42
|
def get_token_limit_param(self) -> str:
|
|
40
43
|
"""Get the correct parameter name for token limits based on model type."""
|
|
@@ -67,30 +70,34 @@ class OpenAIModelRegistry:
|
|
|
67
70
|
def _initialize_default_models(self):
|
|
68
71
|
"""Initialize the registry with known OpenAI models and their capabilities."""
|
|
69
72
|
|
|
70
|
-
# Reasoning Models (o1, o3, o4, gpt-5 series) - Updated
|
|
73
|
+
# Reasoning Models (o1, o3, o4, gpt-5 series) - Updated 2026-02-04
|
|
71
74
|
reasoning_models = [
|
|
72
|
-
"o1", "o1-2024-12-17",
|
|
75
|
+
"o1", "o1-2024-12-17",
|
|
73
76
|
"o1-pro", "o1-pro-2025-03-19",
|
|
74
77
|
"o3", "o3-2025-04-16", "o3-deep-research", "o3-deep-research-2025-06-26",
|
|
75
78
|
"o3-mini", "o3-mini-2025-01-31", "o3-pro", "o3-pro-2025-06-10",
|
|
76
79
|
"o4-mini", "o4-mini-2025-04-16", "o4-mini-deep-research",
|
|
77
80
|
"o4-mini-deep-research-2025-06-26",
|
|
78
|
-
"gpt-5", "gpt-5-2025-08-07", "gpt-5-
|
|
79
|
-
"gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07"
|
|
81
|
+
"gpt-5", "gpt-5-2025-08-07", "gpt-5-codex",
|
|
82
|
+
"gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07",
|
|
83
|
+
"gpt-5-pro", "gpt-5-pro-2025-10-06",
|
|
84
|
+
"gpt-5.1", "gpt-5.1-2025-11-13", "gpt-5.1-chat-latest",
|
|
85
|
+
"gpt-5.2", "gpt-5.2-2025-12-11", "gpt-5.2-chat-latest"
|
|
80
86
|
]
|
|
81
87
|
|
|
82
88
|
for model in reasoning_models:
|
|
83
89
|
# Deep research models and GPT-5 might have different capabilities
|
|
84
90
|
is_deep_research = "deep-research" in model
|
|
85
91
|
is_gpt5 = "gpt-5" in model
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
92
|
+
is_gpt5_1 = "gpt-5.1" in model
|
|
93
|
+
is_gpt5_2 = "gpt-5.2" in model
|
|
94
|
+
is_gpt5_pro = "gpt-5-pro" in model and not is_gpt5_2
|
|
89
95
|
is_mini_or_nano = ("mini" in model or "nano" in model)
|
|
96
|
+
is_chat_latest = "chat-latest" in model
|
|
90
97
|
|
|
91
|
-
#
|
|
92
|
-
supports_tools =
|
|
93
|
-
supports_streaming =
|
|
98
|
+
# ALL reasoning models now support tools and streaming (except gpt-5-pro which is responses-only)
|
|
99
|
+
supports_tools = not is_gpt5_pro
|
|
100
|
+
supports_streaming = not is_gpt5_pro
|
|
94
101
|
|
|
95
102
|
# Set context and output tokens based on model tier
|
|
96
103
|
if is_gpt5:
|
|
@@ -104,15 +111,20 @@ class OpenAIModelRegistry:
|
|
|
104
111
|
output_tokens = 32768
|
|
105
112
|
|
|
106
113
|
# Temperature restrictions based on model series
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
else:
|
|
114
|
-
# Other reasoning models support all temperatures
|
|
114
|
+
# All reasoning models now support only temperature=1.0 (o3 now supports it too)
|
|
115
|
+
if is_gpt5_1 and not is_chat_latest:
|
|
116
|
+
# gpt-5.1 and gpt-5.1-2025-11-13 support all temperatures
|
|
117
|
+
supported_temps = None
|
|
118
|
+
elif is_gpt5_2 and not is_chat_latest:
|
|
119
|
+
# gpt-5.2 and gpt-5.2-2025-12-11 support all temperatures
|
|
115
120
|
supported_temps = None
|
|
121
|
+
else:
|
|
122
|
+
# All other reasoning models (including o1, o3, o4, gpt-5, and *-chat-latest) support only temp=1.0
|
|
123
|
+
supported_temps = [1.0]
|
|
124
|
+
|
|
125
|
+
# API endpoint support flags
|
|
126
|
+
is_responses_only = "pro" in model or "deep-research" in model or model == "gpt-5-codex"
|
|
127
|
+
is_both_endpoint = model in ("gpt-5.1", "gpt-5.1-2025-11-13")
|
|
116
128
|
|
|
117
129
|
self._models[model] = ModelCapabilities(
|
|
118
130
|
model_type=ModelType.REASONING,
|
|
@@ -121,11 +133,14 @@ class OpenAIModelRegistry:
|
|
|
121
133
|
supports_vision=False, # Vision support would need to be confirmed for GPT-5
|
|
122
134
|
max_context_tokens=context_tokens,
|
|
123
135
|
max_output_tokens=output_tokens,
|
|
124
|
-
supported_temperatures=supported_temps
|
|
136
|
+
supported_temperatures=supported_temps,
|
|
137
|
+
supports_chat_api=not is_responses_only,
|
|
138
|
+
supports_completions_api=is_both_endpoint,
|
|
139
|
+
supports_responses_api=is_responses_only
|
|
125
140
|
)
|
|
126
141
|
|
|
127
|
-
# Chat Models (GPT-4 and GPT-4.1 series) - Updated
|
|
128
|
-
# Note: GPT-5 series moved to reasoning models
|
|
142
|
+
# Chat Models (GPT-4 and GPT-4.1 series) - Updated 2026-02-04
|
|
143
|
+
# Note: Most GPT-5 series moved to reasoning models; gpt-5-chat-latest is chat
|
|
129
144
|
gpt4_and_newer_models = [
|
|
130
145
|
"chatgpt-4o-latest",
|
|
131
146
|
"gpt-4", "gpt-4-0125-preview", "gpt-4-0613", "gpt-4-1106-preview",
|
|
@@ -133,7 +148,7 @@ class OpenAIModelRegistry:
|
|
|
133
148
|
"gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14",
|
|
134
149
|
"gpt-4.1-nano", "gpt-4.1-nano-2025-04-14",
|
|
135
150
|
"gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20",
|
|
136
|
-
"gpt-4o-audio-preview",
|
|
151
|
+
"gpt-4o-audio-preview",
|
|
137
152
|
"gpt-4o-audio-preview-2024-12-17", "gpt-4o-audio-preview-2025-06-03",
|
|
138
153
|
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
|
|
139
154
|
"gpt-4o-mini-audio-preview", "gpt-4o-mini-audio-preview-2024-12-17",
|
|
@@ -143,18 +158,37 @@ class OpenAIModelRegistry:
|
|
|
143
158
|
"gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01",
|
|
144
159
|
"gpt-4o-realtime-preview-2024-12-17", "gpt-4o-realtime-preview-2025-06-03",
|
|
145
160
|
"gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11",
|
|
146
|
-
"gpt-4o-transcribe"
|
|
161
|
+
"gpt-4o-transcribe",
|
|
162
|
+
"gpt-5-chat-latest",
|
|
163
|
+
"gpt-5-search-api", "gpt-5-search-api-2025-10-14"
|
|
147
164
|
]
|
|
148
165
|
|
|
149
166
|
for model in gpt4_and_newer_models:
|
|
150
167
|
# Determine capabilities based on model features
|
|
151
|
-
|
|
168
|
+
is_chatgpt_latest = model == "chatgpt-4o-latest"
|
|
152
169
|
is_mini_or_nano = ("mini" in model or "nano" in model)
|
|
153
|
-
is_audio = "audio" in model or "realtime" in model or "transcribe" in model
|
|
170
|
+
is_audio = "audio" in model or "realtime" in model or "transcribe" in model or "tts" in model
|
|
171
|
+
is_search = "search" in model
|
|
154
172
|
is_gpt41 = "gpt-4.1" in model
|
|
173
|
+
is_gpt5_chat = model == "gpt-5-chat-latest"
|
|
174
|
+
is_gpt5_search = "gpt-5-search-api" in model
|
|
175
|
+
is_nano_base = model == "gpt-4.1-nano"
|
|
176
|
+
# Note: Keep vision=True for gpt-4o models (audit probe limitation with 1x1 PNG)
|
|
177
|
+
# Exclude chatgpt-4o-latest, audio/search/transcribe/tts/realtime variants
|
|
178
|
+
vision_support = ("gpt-4o" in model and not is_chatgpt_latest and not is_audio and not is_search)
|
|
179
|
+
|
|
180
|
+
# Tool support: disabled for audio models, search models, chatgpt-4o-latest, and gpt-4.1-nano
|
|
181
|
+
supports_tools = not (is_audio or is_search or is_chatgpt_latest or is_nano_base)
|
|
182
|
+
# Streaming: disabled for audio models only
|
|
183
|
+
supports_streaming = not is_audio
|
|
184
|
+
# Temperature restrictions for search models
|
|
185
|
+
supported_temps = [] if is_search else None
|
|
155
186
|
|
|
156
187
|
# Set context and output tokens based on model tier
|
|
157
|
-
if
|
|
188
|
+
if is_gpt5_chat or is_gpt5_search:
|
|
189
|
+
context_tokens = 300000
|
|
190
|
+
output_tokens = 50000
|
|
191
|
+
elif is_gpt41:
|
|
158
192
|
context_tokens = 200000 if not is_mini_or_nano else 128000
|
|
159
193
|
output_tokens = 32768 if not is_mini_or_nano else 16384
|
|
160
194
|
elif "gpt-4o" in model:
|
|
@@ -164,33 +198,42 @@ class OpenAIModelRegistry:
|
|
|
164
198
|
context_tokens = 32000
|
|
165
199
|
output_tokens = 8192
|
|
166
200
|
|
|
201
|
+
# API endpoint support flags
|
|
202
|
+
is_both_endpoint = model in ("gpt-4.1-nano", "gpt-4.1-nano-2025-04-14",
|
|
203
|
+
"gpt-4o-mini", "gpt-4o-mini-2024-07-18")
|
|
204
|
+
|
|
167
205
|
self._models[model] = ModelCapabilities(
|
|
168
206
|
model_type=ModelType.CHAT,
|
|
169
|
-
supports_tools=
|
|
170
|
-
supports_streaming=
|
|
207
|
+
supports_tools=supports_tools,
|
|
208
|
+
supports_streaming=supports_streaming,
|
|
171
209
|
supports_vision=vision_support,
|
|
172
210
|
max_context_tokens=context_tokens,
|
|
173
|
-
max_output_tokens=output_tokens
|
|
211
|
+
max_output_tokens=output_tokens,
|
|
212
|
+
supported_temperatures=supported_temps,
|
|
213
|
+
supports_completions_api=is_both_endpoint
|
|
174
214
|
)
|
|
175
215
|
|
|
176
|
-
# Chat Models (GPT-3.5 series) - Updated
|
|
216
|
+
# Chat Models (GPT-3.5 series) - Updated 2026-02-04
|
|
177
217
|
gpt35_models = [
|
|
178
218
|
"gpt-3.5-turbo", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-1106",
|
|
179
219
|
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-instruct", "gpt-3.5-turbo-instruct-0914"
|
|
180
220
|
]
|
|
181
221
|
|
|
182
222
|
for model in gpt35_models:
|
|
223
|
+
is_instruct = "instruct" in model
|
|
183
224
|
context_tokens = 16385 if "16k" not in model else 16385
|
|
184
225
|
self._models[model] = ModelCapabilities(
|
|
185
226
|
model_type=ModelType.CHAT,
|
|
186
|
-
supports_tools=
|
|
187
|
-
supports_streaming=
|
|
227
|
+
supports_tools=not is_instruct, # Instruct models don't support tools
|
|
228
|
+
supports_streaming=not is_instruct, # Instruct models don't support streaming
|
|
188
229
|
supports_vision=False,
|
|
189
230
|
max_context_tokens=context_tokens,
|
|
190
|
-
max_output_tokens=4096
|
|
231
|
+
max_output_tokens=4096,
|
|
232
|
+
supports_chat_api=not is_instruct,
|
|
233
|
+
supports_completions_api=is_instruct
|
|
191
234
|
)
|
|
192
235
|
|
|
193
|
-
# Embedding Models - Updated
|
|
236
|
+
# Embedding Models - Updated 2026-02-04
|
|
194
237
|
embedding_models = [
|
|
195
238
|
"text-embedding-3-large", "text-embedding-3-small", "text-embedding-ada-002"
|
|
196
239
|
]
|
|
@@ -200,15 +243,61 @@ class OpenAIModelRegistry:
|
|
|
200
243
|
model_type=ModelType.EMBEDDING,
|
|
201
244
|
supports_tools=False,
|
|
202
245
|
supports_streaming=False,
|
|
203
|
-
supports_vision=False
|
|
246
|
+
supports_vision=False,
|
|
247
|
+
supports_chat_api=False
|
|
204
248
|
)
|
|
205
249
|
|
|
206
|
-
#
|
|
250
|
+
# Legacy & Codex Models - Updated 2026-02-05
|
|
251
|
+
self._models["babbage-002"] = ModelCapabilities(
|
|
252
|
+
model_type=ModelType.CHAT,
|
|
253
|
+
supports_tools=False,
|
|
254
|
+
supports_streaming=False,
|
|
255
|
+
supports_vision=False,
|
|
256
|
+
supports_chat_api=False,
|
|
257
|
+
supports_completions_api=True
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
self._models["davinci-002"] = ModelCapabilities(
|
|
261
|
+
model_type=ModelType.CHAT,
|
|
262
|
+
supports_tools=False,
|
|
263
|
+
supports_streaming=False,
|
|
264
|
+
supports_vision=False,
|
|
265
|
+
supports_chat_api=False,
|
|
266
|
+
supports_completions_api=True
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
self._models["gpt-5.1-codex-mini"] = ModelCapabilities(
|
|
270
|
+
model_type=ModelType.REASONING,
|
|
271
|
+
supports_tools=False,
|
|
272
|
+
supports_streaming=False,
|
|
273
|
+
supports_vision=False,
|
|
274
|
+
max_context_tokens=128000,
|
|
275
|
+
max_output_tokens=32768,
|
|
276
|
+
supported_temperatures=[1.0],
|
|
277
|
+
supports_chat_api=False,
|
|
278
|
+
supports_completions_api=True
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
self._models["codex-mini-latest"] = ModelCapabilities(
|
|
282
|
+
model_type=ModelType.REASONING,
|
|
283
|
+
supports_tools=False,
|
|
284
|
+
supports_streaming=False,
|
|
285
|
+
supports_vision=False,
|
|
286
|
+
max_context_tokens=128000,
|
|
287
|
+
max_output_tokens=32768,
|
|
288
|
+
supported_temperatures=[1.0],
|
|
289
|
+
supports_chat_api=False,
|
|
290
|
+
supports_responses_api=True
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# Pattern mappings for unknown models - Updated 2026-02-04
|
|
207
294
|
self._pattern_mappings = {
|
|
208
295
|
"o1": ModelType.REASONING,
|
|
209
296
|
"o3": ModelType.REASONING,
|
|
210
297
|
"o4": ModelType.REASONING,
|
|
211
298
|
"gpt-5": ModelType.REASONING, # GPT-5 is a reasoning model
|
|
299
|
+
"gpt-5.1": ModelType.REASONING,
|
|
300
|
+
"gpt-5.2": ModelType.REASONING,
|
|
212
301
|
"gpt-4": ModelType.CHAT,
|
|
213
302
|
"gpt-4.1": ModelType.CHAT,
|
|
214
303
|
"gpt-3.5": ModelType.CHAT,
|
|
@@ -262,28 +351,40 @@ class OpenAIModelRegistry:
|
|
|
262
351
|
model_type=ModelType.REASONING,
|
|
263
352
|
supports_tools=False,
|
|
264
353
|
supports_streaming=False,
|
|
265
|
-
supports_vision=False
|
|
354
|
+
supports_vision=False,
|
|
355
|
+
supports_chat_api=True,
|
|
356
|
+
supports_completions_api=False,
|
|
357
|
+
supports_responses_api=False
|
|
266
358
|
)
|
|
267
359
|
elif model_type == ModelType.CHAT:
|
|
268
360
|
return ModelCapabilities(
|
|
269
361
|
model_type=ModelType.CHAT,
|
|
270
362
|
supports_tools=True,
|
|
271
363
|
supports_streaming=True,
|
|
272
|
-
supports_vision=False
|
|
364
|
+
supports_vision=False,
|
|
365
|
+
supports_chat_api=True,
|
|
366
|
+
supports_completions_api=False,
|
|
367
|
+
supports_responses_api=False
|
|
273
368
|
)
|
|
274
369
|
elif model_type == ModelType.EMBEDDING:
|
|
275
370
|
return ModelCapabilities(
|
|
276
371
|
model_type=ModelType.EMBEDDING,
|
|
277
372
|
supports_tools=False,
|
|
278
373
|
supports_streaming=False,
|
|
279
|
-
supports_vision=False
|
|
374
|
+
supports_vision=False,
|
|
375
|
+
supports_chat_api=False,
|
|
376
|
+
supports_completions_api=False,
|
|
377
|
+
supports_responses_api=False
|
|
280
378
|
)
|
|
281
379
|
else: # MODERATION
|
|
282
380
|
return ModelCapabilities(
|
|
283
381
|
model_type=ModelType.MODERATION,
|
|
284
382
|
supports_tools=False,
|
|
285
383
|
supports_streaming=False,
|
|
286
|
-
supports_vision=False
|
|
384
|
+
supports_vision=False,
|
|
385
|
+
supports_chat_api=False,
|
|
386
|
+
supports_completions_api=False,
|
|
387
|
+
supports_responses_api=False
|
|
287
388
|
)
|
|
288
389
|
|
|
289
390
|
def is_reasoning_model(self, model_name: str) -> bool:
|
|
@@ -22,9 +22,10 @@ class DescribeOpenAIModelRegistry:
|
|
|
22
22
|
registry = OpenAIModelRegistry()
|
|
23
23
|
registered_models = registry.get_registered_models()
|
|
24
24
|
|
|
25
|
-
# Check that we have reasoning models
|
|
25
|
+
# Check that we have reasoning models (o1-mini removed in 2026-02-04 audit)
|
|
26
26
|
assert "o1" in registered_models
|
|
27
|
-
assert "
|
|
27
|
+
assert "o3" in registered_models
|
|
28
|
+
assert "gpt-5" in registered_models
|
|
28
29
|
|
|
29
30
|
# Check that we have chat models
|
|
30
31
|
assert "gpt-4o" in registered_models
|
|
@@ -43,15 +44,17 @@ class DescribeOpenAIModelRegistry:
|
|
|
43
44
|
"""
|
|
44
45
|
registry = OpenAIModelRegistry()
|
|
45
46
|
|
|
46
|
-
# Test known reasoning models
|
|
47
|
-
assert registry.is_reasoning_model("o1
|
|
48
|
-
assert registry.is_reasoning_model("o1-mini") is True
|
|
47
|
+
# Test known reasoning models (o1-mini removed, o1-preview uses pattern matching)
|
|
48
|
+
assert registry.is_reasoning_model("o1") is True
|
|
49
49
|
assert registry.is_reasoning_model("o3-mini") is True
|
|
50
|
+
assert registry.is_reasoning_model("gpt-5") is True
|
|
51
|
+
assert registry.is_reasoning_model("gpt-5.1") is True
|
|
50
52
|
|
|
51
53
|
# Test chat models
|
|
52
54
|
assert registry.is_reasoning_model("gpt-4o") is False
|
|
53
55
|
assert registry.is_reasoning_model("gpt-4o-mini") is False
|
|
54
56
|
assert registry.is_reasoning_model("gpt-3.5-turbo") is False
|
|
57
|
+
assert registry.is_reasoning_model("gpt-5-chat-latest") is False
|
|
55
58
|
|
|
56
59
|
def should_use_pattern_matching_for_unknown_models(self):
|
|
57
60
|
"""
|
|
@@ -80,7 +83,7 @@ class DescribeOpenAIModelRegistry:
|
|
|
80
83
|
registry = OpenAIModelRegistry()
|
|
81
84
|
|
|
82
85
|
# Reasoning models should use max_completion_tokens
|
|
83
|
-
o1_capabilities = registry.get_model_capabilities("o1
|
|
86
|
+
o1_capabilities = registry.get_model_capabilities("o1")
|
|
84
87
|
assert o1_capabilities.get_token_limit_param() == "max_completion_tokens"
|
|
85
88
|
|
|
86
89
|
# Chat models should use max_tokens
|
|
@@ -178,3 +181,167 @@ class DescribeOpenAIModelRegistry:
|
|
|
178
181
|
assert ModelType.CHAT.value == "chat"
|
|
179
182
|
assert ModelType.EMBEDDING.value == "embedding"
|
|
180
183
|
assert ModelType.MODERATION.value == "moderation"
|
|
184
|
+
|
|
185
|
+
def should_support_tools_and_streaming_for_all_reasoning_models(self):
|
|
186
|
+
"""
|
|
187
|
+
Given reasoning models as of 2026-02-04 audit
|
|
188
|
+
When checking their capabilities
|
|
189
|
+
Then all should support tools and streaming except gpt-5-pro
|
|
190
|
+
"""
|
|
191
|
+
registry = OpenAIModelRegistry()
|
|
192
|
+
|
|
193
|
+
# All o1/o3/o4 models now support tools and streaming
|
|
194
|
+
o1_caps = registry.get_model_capabilities("o1")
|
|
195
|
+
assert o1_caps.supports_tools is True
|
|
196
|
+
assert o1_caps.supports_streaming is True
|
|
197
|
+
|
|
198
|
+
o3_caps = registry.get_model_capabilities("o3")
|
|
199
|
+
assert o3_caps.supports_tools is True
|
|
200
|
+
assert o3_caps.supports_streaming is True
|
|
201
|
+
|
|
202
|
+
o3_mini_caps = registry.get_model_capabilities("o3-mini")
|
|
203
|
+
assert o3_mini_caps.supports_tools is True
|
|
204
|
+
assert o3_mini_caps.supports_streaming is True
|
|
205
|
+
|
|
206
|
+
# GPT-5 family (except gpt-5-pro) supports tools and streaming
|
|
207
|
+
gpt5_caps = registry.get_model_capabilities("gpt-5")
|
|
208
|
+
assert gpt5_caps.supports_tools is True
|
|
209
|
+
assert gpt5_caps.supports_streaming is True
|
|
210
|
+
|
|
211
|
+
# gpt-5-pro is responses-only, no tools/streaming
|
|
212
|
+
gpt5_pro_caps = registry.get_model_capabilities("gpt-5-pro")
|
|
213
|
+
assert gpt5_pro_caps.supports_tools is False
|
|
214
|
+
assert gpt5_pro_caps.supports_streaming is False
|
|
215
|
+
|
|
216
|
+
def should_support_temperature_1_0_only_for_most_reasoning_models(self):
|
|
217
|
+
"""
|
|
218
|
+
Given reasoning models as of 2026-02-04 audit
|
|
219
|
+
When checking temperature support
|
|
220
|
+
Then most should support only temperature=1.0, except gpt-5.1/5.2 base models
|
|
221
|
+
"""
|
|
222
|
+
registry = OpenAIModelRegistry()
|
|
223
|
+
|
|
224
|
+
# o1/o3/o4 series support only temperature=1.0
|
|
225
|
+
o1_caps = registry.get_model_capabilities("o1")
|
|
226
|
+
assert o1_caps.supported_temperatures == [1.0]
|
|
227
|
+
assert o1_caps.supports_temperature(1.0) is True
|
|
228
|
+
assert o1_caps.supports_temperature(0.7) is False
|
|
229
|
+
|
|
230
|
+
o3_caps = registry.get_model_capabilities("o3")
|
|
231
|
+
assert o3_caps.supported_temperatures == [1.0]
|
|
232
|
+
|
|
233
|
+
# gpt-5.1 and gpt-5.1-2025-11-13 support all temperatures
|
|
234
|
+
gpt5_1_caps = registry.get_model_capabilities("gpt-5.1")
|
|
235
|
+
assert gpt5_1_caps.supported_temperatures is None
|
|
236
|
+
assert gpt5_1_caps.supports_temperature(0.7) is True
|
|
237
|
+
assert gpt5_1_caps.supports_temperature(1.0) is True
|
|
238
|
+
|
|
239
|
+
# gpt-5.1-chat-latest supports only temperature=1.0
|
|
240
|
+
gpt5_1_chat_caps = registry.get_model_capabilities("gpt-5.1-chat-latest")
|
|
241
|
+
assert gpt5_1_chat_caps.supported_temperatures == [1.0]
|
|
242
|
+
|
|
243
|
+
# gpt-5.2 and gpt-5.2-2025-12-11 support all temperatures
|
|
244
|
+
gpt5_2_caps = registry.get_model_capabilities("gpt-5.2")
|
|
245
|
+
assert gpt5_2_caps.supported_temperatures is None
|
|
246
|
+
|
|
247
|
+
# gpt-5.2-chat-latest supports only temperature=1.0
|
|
248
|
+
gpt5_2_chat_caps = registry.get_model_capabilities("gpt-5.2-chat-latest")
|
|
249
|
+
assert gpt5_2_chat_caps.supported_temperatures == [1.0]
|
|
250
|
+
|
|
251
|
+
def should_disable_tools_for_specific_chat_models(self):
|
|
252
|
+
"""
|
|
253
|
+
Given chat models as of 2026-02-04 audit
|
|
254
|
+
When checking tool support
|
|
255
|
+
Then specific models should have tools disabled
|
|
256
|
+
"""
|
|
257
|
+
registry = OpenAIModelRegistry()
|
|
258
|
+
|
|
259
|
+
# chatgpt-4o-latest does not support tools
|
|
260
|
+
chatgpt_caps = registry.get_model_capabilities("chatgpt-4o-latest")
|
|
261
|
+
assert chatgpt_caps.supports_tools is False
|
|
262
|
+
assert chatgpt_caps.supports_vision is False
|
|
263
|
+
|
|
264
|
+
# gpt-4.1-nano does not support tools
|
|
265
|
+
nano_caps = registry.get_model_capabilities("gpt-4.1-nano")
|
|
266
|
+
assert nano_caps.supports_tools is False
|
|
267
|
+
|
|
268
|
+
# Search models do not support tools and have no temperature support
|
|
269
|
+
search_caps = registry.get_model_capabilities("gpt-4o-search-preview")
|
|
270
|
+
assert search_caps.supports_tools is False
|
|
271
|
+
assert search_caps.supported_temperatures == []
|
|
272
|
+
assert search_caps.supports_temperature(1.0) is False
|
|
273
|
+
|
|
274
|
+
gpt5_search_caps = registry.get_model_capabilities("gpt-5-search-api")
|
|
275
|
+
assert gpt5_search_caps.supports_tools is False
|
|
276
|
+
assert gpt5_search_caps.supported_temperatures == []
|
|
277
|
+
|
|
278
|
+
# Audio models do not support tools or streaming
|
|
279
|
+
audio_caps = registry.get_model_capabilities("gpt-4o-audio-preview")
|
|
280
|
+
assert audio_caps.supports_tools is False
|
|
281
|
+
assert audio_caps.supports_streaming is False
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
class DescribeAPIEndpointSupport:
|
|
285
|
+
|
|
286
|
+
def should_flag_chat_only_model(self):
|
|
287
|
+
registry = OpenAIModelRegistry()
|
|
288
|
+
caps = registry.get_model_capabilities("gpt-4")
|
|
289
|
+
assert caps.supports_chat_api is True
|
|
290
|
+
assert caps.supports_completions_api is False
|
|
291
|
+
assert caps.supports_responses_api is False
|
|
292
|
+
|
|
293
|
+
def should_flag_both_endpoint_model(self):
|
|
294
|
+
registry = OpenAIModelRegistry()
|
|
295
|
+
caps = registry.get_model_capabilities("gpt-4o-mini")
|
|
296
|
+
assert caps.supports_chat_api is True
|
|
297
|
+
assert caps.supports_completions_api is True
|
|
298
|
+
assert caps.supports_responses_api is False
|
|
299
|
+
|
|
300
|
+
def should_flag_completions_only_model(self):
|
|
301
|
+
registry = OpenAIModelRegistry()
|
|
302
|
+
caps = registry.get_model_capabilities("gpt-3.5-turbo-instruct")
|
|
303
|
+
assert caps.supports_chat_api is False
|
|
304
|
+
assert caps.supports_completions_api is True
|
|
305
|
+
assert caps.supports_responses_api is False
|
|
306
|
+
|
|
307
|
+
def should_flag_responses_only_model(self):
|
|
308
|
+
registry = OpenAIModelRegistry()
|
|
309
|
+
caps = registry.get_model_capabilities("gpt-5-pro")
|
|
310
|
+
assert caps.supports_chat_api is False
|
|
311
|
+
assert caps.supports_completions_api is False
|
|
312
|
+
assert caps.supports_responses_api is True
|
|
313
|
+
|
|
314
|
+
def should_flag_legacy_completions_model(self):
|
|
315
|
+
registry = OpenAIModelRegistry()
|
|
316
|
+
caps = registry.get_model_capabilities("babbage-002")
|
|
317
|
+
assert caps.supports_chat_api is False
|
|
318
|
+
assert caps.supports_completions_api is True
|
|
319
|
+
assert caps.supports_responses_api is False
|
|
320
|
+
|
|
321
|
+
def should_flag_embedding_model_with_no_endpoints(self):
|
|
322
|
+
registry = OpenAIModelRegistry()
|
|
323
|
+
caps = registry.get_model_capabilities("text-embedding-3-large")
|
|
324
|
+
assert caps.supports_chat_api is False
|
|
325
|
+
assert caps.supports_completions_api is False
|
|
326
|
+
assert caps.supports_responses_api is False
|
|
327
|
+
|
|
328
|
+
def should_flag_codex_mini_latest_as_responses_only(self):
|
|
329
|
+
registry = OpenAIModelRegistry()
|
|
330
|
+
caps = registry.get_model_capabilities("codex-mini-latest")
|
|
331
|
+
assert caps.supports_chat_api is False
|
|
332
|
+
assert caps.supports_completions_api is False
|
|
333
|
+
assert caps.supports_responses_api is True
|
|
334
|
+
|
|
335
|
+
def should_flag_gpt51_as_both_chat_and_completions(self):
|
|
336
|
+
registry = OpenAIModelRegistry()
|
|
337
|
+
caps = registry.get_model_capabilities("gpt-5.1")
|
|
338
|
+
assert caps.supports_chat_api is True
|
|
339
|
+
assert caps.supports_completions_api is True
|
|
340
|
+
assert caps.supports_responses_api is False
|
|
341
|
+
|
|
342
|
+
def should_include_endpoint_flags_in_default_capabilities(self):
|
|
343
|
+
registry = OpenAIModelRegistry()
|
|
344
|
+
caps = registry.get_model_capabilities("completely-unknown-model-xyz")
|
|
345
|
+
assert caps.supports_chat_api is True
|
|
346
|
+
assert caps.supports_completions_api is False
|
|
347
|
+
assert caps.supports_responses_api is False
|
|
@@ -88,16 +88,16 @@ class DescribeOpenAIGatewayTemperatureHandling:
|
|
|
88
88
|
call_args = mock_openai_client.chat.completions.create.call_args
|
|
89
89
|
assert call_args[1]['temperature'] == 0.1
|
|
90
90
|
|
|
91
|
-
def
|
|
91
|
+
def should_automatically_adjust_unsupported_temperature_for_o1(self, openai_gateway, mock_openai_client):
|
|
92
92
|
"""
|
|
93
|
-
Given an o1
|
|
93
|
+
Given an o1 model that only supports temperature=1.0
|
|
94
94
|
When calling complete with temperature=0.1 (unsupported)
|
|
95
95
|
Then it should automatically adjust to temperature=1.0
|
|
96
96
|
"""
|
|
97
97
|
messages = [LLMMessage(role=MessageRole.User, content="Test message")]
|
|
98
98
|
|
|
99
99
|
openai_gateway.complete(
|
|
100
|
-
model="o1
|
|
100
|
+
model="o1",
|
|
101
101
|
messages=messages,
|
|
102
102
|
temperature=0.1
|
|
103
103
|
)
|
|
@@ -124,11 +124,11 @@ class DescribeOpenAIGatewayTemperatureHandling:
|
|
|
124
124
|
call_args = mock_openai_client.chat.completions.create.call_args
|
|
125
125
|
assert call_args[1]['temperature'] == 1.0
|
|
126
126
|
|
|
127
|
-
def
|
|
127
|
+
def should_automatically_adjust_unsupported_temperature_for_o3_mini(self, openai_gateway, mock_openai_client):
|
|
128
128
|
"""
|
|
129
|
-
Given an o3-mini model that
|
|
130
|
-
When calling complete with temperature=0.1
|
|
131
|
-
Then it should
|
|
129
|
+
Given an o3-mini model that only supports temperature=1.0
|
|
130
|
+
When calling complete with temperature=0.1 (unsupported)
|
|
131
|
+
Then it should automatically adjust to temperature=1.0
|
|
132
132
|
"""
|
|
133
133
|
messages = [LLMMessage(role=MessageRole.User, content="Test message")]
|
|
134
134
|
|
|
@@ -138,9 +138,9 @@ class DescribeOpenAIGatewayTemperatureHandling:
|
|
|
138
138
|
temperature=0.1
|
|
139
139
|
)
|
|
140
140
|
|
|
141
|
-
# Verify the API was called
|
|
141
|
+
# Verify the API was called with temperature=1.0, not 0.1
|
|
142
142
|
call_args = mock_openai_client.chat.completions.create.call_args
|
|
143
|
-
assert 'temperature'
|
|
143
|
+
assert call_args[1]['temperature'] == 1.0
|
|
144
144
|
|
|
145
145
|
|
|
146
146
|
class DescribeModelCapabilitiesTemperatureRestrictions:
|
|
@@ -178,14 +178,14 @@ class DescribeModelCapabilitiesTemperatureRestrictions:
|
|
|
178
178
|
def should_identify_all_gpt5_variants_temperature_restrictions(self):
|
|
179
179
|
"""
|
|
180
180
|
Given the model registry
|
|
181
|
-
When checking
|
|
182
|
-
Then they should
|
|
181
|
+
When checking GPT-5 reasoning variant models
|
|
182
|
+
Then they should have temperature restrictions to 1.0 only
|
|
183
|
+
Note: gpt-5-chat-latest is a CHAT model and supports all temperatures
|
|
183
184
|
"""
|
|
184
185
|
registry = get_model_registry()
|
|
185
|
-
|
|
186
|
+
gpt5_reasoning_models = [
|
|
186
187
|
"gpt-5",
|
|
187
188
|
"gpt-5-2025-08-07",
|
|
188
|
-
"gpt-5-chat-latest",
|
|
189
189
|
"gpt-5-codex",
|
|
190
190
|
"gpt-5-mini",
|
|
191
191
|
"gpt-5-mini-2025-08-07",
|
|
@@ -193,7 +193,7 @@ class DescribeModelCapabilitiesTemperatureRestrictions:
|
|
|
193
193
|
"gpt-5-nano-2025-08-07"
|
|
194
194
|
]
|
|
195
195
|
|
|
196
|
-
for model in
|
|
196
|
+
for model in gpt5_reasoning_models:
|
|
197
197
|
capabilities = registry.get_model_capabilities(model)
|
|
198
198
|
assert capabilities.supports_temperature(1.0) is True
|
|
199
199
|
assert capabilities.supports_temperature(0.1) is False
|
|
@@ -204,9 +204,10 @@ class DescribeModelCapabilitiesTemperatureRestrictions:
|
|
|
204
204
|
Given the model registry
|
|
205
205
|
When checking o1 series models
|
|
206
206
|
Then they should have temperature restrictions to 1.0 only
|
|
207
|
+
Note: o1-mini removed from API as of 2026-02-04 audit
|
|
207
208
|
"""
|
|
208
209
|
registry = get_model_registry()
|
|
209
|
-
o1_models = ["o1", "o1-
|
|
210
|
+
o1_models = ["o1", "o1-pro", "o1-2024-12-17", "o1-pro-2025-03-19"]
|
|
210
211
|
|
|
211
212
|
for model in o1_models:
|
|
212
213
|
capabilities = registry.get_model_capabilities(model)
|
|
@@ -214,20 +215,21 @@ class DescribeModelCapabilitiesTemperatureRestrictions:
|
|
|
214
215
|
assert capabilities.supports_temperature(0.1) is False
|
|
215
216
|
assert capabilities.supported_temperatures == [1.0]
|
|
216
217
|
|
|
217
|
-
def
|
|
218
|
+
def should_identify_o3_series_temperature_restrictions(self):
|
|
218
219
|
"""
|
|
219
220
|
Given the model registry
|
|
220
221
|
When checking o3 series models
|
|
221
|
-
Then they should
|
|
222
|
+
Then they should now support temperature=1.0 only (as of 2026-02-04 audit)
|
|
222
223
|
"""
|
|
223
224
|
registry = get_model_registry()
|
|
224
|
-
o3_models = ["o3", "o3-mini", "o3-pro", "o3-deep-research"
|
|
225
|
+
o3_models = ["o3", "o3-mini", "o3-pro", "o3-deep-research", "o3-2025-04-16",
|
|
226
|
+
"o3-mini-2025-01-31", "o3-pro-2025-06-10", "o3-deep-research-2025-06-26"]
|
|
225
227
|
|
|
226
228
|
for model in o3_models:
|
|
227
229
|
capabilities = registry.get_model_capabilities(model)
|
|
228
|
-
assert capabilities.supports_temperature(1.0) is
|
|
230
|
+
assert capabilities.supports_temperature(1.0) is True
|
|
229
231
|
assert capabilities.supports_temperature(0.1) is False
|
|
230
|
-
assert capabilities.supported_temperatures == []
|
|
232
|
+
assert capabilities.supported_temperatures == [1.0]
|
|
231
233
|
|
|
232
234
|
def should_identify_o4_series_temperature_restrictions(self):
|
|
233
235
|
"""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mojentic
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.1
|
|
4
4
|
Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
|
|
5
5
|
Author-email: Stacey Vetzal <stacey@vetzal.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/svetzal/mojentic
|
|
@@ -13,15 +13,17 @@ Description-Content-Type: text/markdown
|
|
|
13
13
|
License-File: LICENSE.md
|
|
14
14
|
Requires-Dist: pydantic>=2.12.5
|
|
15
15
|
Requires-Dist: structlog>=25.5.0
|
|
16
|
-
Requires-Dist: numpy>=2.
|
|
16
|
+
Requires-Dist: numpy>=2.4.2
|
|
17
17
|
Requires-Dist: ollama>=0.6.1
|
|
18
|
-
Requires-Dist: openai>=2.
|
|
19
|
-
Requires-Dist: anthropic>=0.
|
|
18
|
+
Requires-Dist: openai>=2.16.0
|
|
19
|
+
Requires-Dist: anthropic>=0.77.1
|
|
20
20
|
Requires-Dist: tiktoken>=0.12.0
|
|
21
21
|
Requires-Dist: parsedatetime>=2.6
|
|
22
22
|
Requires-Dist: pytz>=2025.2
|
|
23
23
|
Requires-Dist: serpapi>=0.1.5
|
|
24
24
|
Requires-Dist: colorama>=0.4.6
|
|
25
|
+
Requires-Dist: filelock>=3.20.1
|
|
26
|
+
Requires-Dist: urllib3>=2.6.0
|
|
25
27
|
Provides-Extra: dev
|
|
26
28
|
Requires-Dist: pytest>=9.0.2; extra == "dev"
|
|
27
29
|
Requires-Dist: pytest-asyncio>=1.3.0; extra == "dev"
|
|
@@ -29,14 +31,14 @@ Requires-Dist: pytest-spec>=5.2.0; extra == "dev"
|
|
|
29
31
|
Requires-Dist: pytest-cov>=7.0.0; extra == "dev"
|
|
30
32
|
Requires-Dist: pytest-mock>=3.15.0; extra == "dev"
|
|
31
33
|
Requires-Dist: flake8>=7.3.0; extra == "dev"
|
|
32
|
-
Requires-Dist: bandit>=1.9.
|
|
34
|
+
Requires-Dist: bandit>=1.9.3; extra == "dev"
|
|
33
35
|
Requires-Dist: pip-audit>=2.10.0; extra == "dev"
|
|
34
36
|
Requires-Dist: mkdocs>=1.6.1; extra == "dev"
|
|
35
|
-
Requires-Dist: mkdocs-material>=9.7.
|
|
37
|
+
Requires-Dist: mkdocs-material>=9.7.1; extra == "dev"
|
|
36
38
|
Requires-Dist: mkdocs-llmstxt>=0.4.0; extra == "dev"
|
|
37
|
-
Requires-Dist: mkdocstrings[python]>=0.
|
|
38
|
-
Requires-Dist: griffe-fieldz>=0.
|
|
39
|
-
Requires-Dist: pymdown-extensions>=10.
|
|
39
|
+
Requires-Dist: mkdocstrings[python]>=1.0.2; extra == "dev"
|
|
40
|
+
Requires-Dist: griffe-fieldz>=0.4.0; extra == "dev"
|
|
41
|
+
Requires-Dist: pymdown-extensions>=10.20.1; extra == "dev"
|
|
40
42
|
Dynamic: license-file
|
|
41
43
|
|
|
42
44
|
# Mojentic
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
_examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
_examples/async_dispatcher_example.py,sha256=Iy01lFd3NTKdgeFc6wMRGFD9FTNH7LEd9nGMtzrbXVo,8592
|
|
3
3
|
_examples/async_llm_example.py,sha256=WPb6ML_xXCSuRMbYWs-w2X28tzj9EosFiQv31gAAPBo,8627
|
|
4
|
+
_examples/audit_openai_capabilities.py,sha256=RqTveROc1EZRi-2ilO79Qn7qRzTwpfeIr6VjycZ70zc,18492
|
|
4
5
|
_examples/broker_as_tool.py,sha256=ixcz8wuhdTZS-0OR9L1CeBMsrKz6SBBAaDhwAoD60e4,3195
|
|
5
6
|
_examples/broker_examples.py,sha256=eheXbzoslI9x_sPEt399hkxpDRI_VkscHL-br1LJqD4,2388
|
|
6
7
|
_examples/broker_image_examples.py,sha256=NGuZQ0x28winv0Sxw7ClBvqj8oD1tdbADITVjXIF0yk,1126
|
|
@@ -93,10 +94,10 @@ mojentic/llm/gateways/ollama_messages_adapter_spec.py,sha256=gVRbWDrHOa1EiZ0CkEW
|
|
|
93
94
|
mojentic/llm/gateways/openai.py,sha256=S19AIooYoBZYELVPrSeOwKslpYc7jrhu-sLjDXUFF3w,23161
|
|
94
95
|
mojentic/llm/gateways/openai_message_adapter_spec.py,sha256=3nObWsf6cPuWuCK_IhrQoRdQdz7gndqeSSvJIxtQkp8,6609
|
|
95
96
|
mojentic/llm/gateways/openai_messages_adapter.py,sha256=Scal68JKKdBHB35ok1c5DeWYdD6Wra5oXSsPxJyyXSQ,3947
|
|
96
|
-
mojentic/llm/gateways/openai_model_registry.py,sha256=
|
|
97
|
-
mojentic/llm/gateways/openai_model_registry_spec.py,sha256=
|
|
97
|
+
mojentic/llm/gateways/openai_model_registry.py,sha256=2tIT_L8g4opEgLRvhpOy_w47W83Xp_slki2rl3xnteo,18585
|
|
98
|
+
mojentic/llm/gateways/openai_model_registry_spec.py,sha256=w9nkWRnND_Bhzb061O1UKD9nCsUJsqgclP0tPbmbHzs,14192
|
|
98
99
|
mojentic/llm/gateways/openai_spec.py,sha256=eazIk8bLQ2d9CNPGhcw0WedX7CZz-TEkmwGz74c39CM,4161
|
|
99
|
-
mojentic/llm/gateways/openai_temperature_handling_spec.py,sha256=
|
|
100
|
+
mojentic/llm/gateways/openai_temperature_handling_spec.py,sha256=jK4R3w6xhxEB8C-vdAGFjGNSH7qdrq3B5OABjZf8868,10199
|
|
100
101
|
mojentic/llm/gateways/tokenizer_gateway.py,sha256=ztuqfunlJ6xmyUPPHcC_69-kegiNJD6jdSEde7hDh2w,485
|
|
101
102
|
mojentic/llm/registry/__init__.py,sha256=FFokdoVYhtHslv-hZekONfG8ZdNA11Tto9chFLFVImE,71
|
|
102
103
|
mojentic/llm/registry/llm_registry.py,sha256=beyrgGrkXx5ZckUJzC1nQ461vra0fF6s_qRaEdi5bsg,2508
|
|
@@ -142,8 +143,8 @@ mojentic/tracer/tracer_system.py,sha256=KPSVIfGVOjSx6Vj_SvrisqJXKT6ddwBc_UCMQC6D
|
|
|
142
143
|
mojentic/tracer/tracer_system_spec.py,sha256=8hpQlmAWyjUvk7ihy339L0buQ-eH5rluaFvyMl-mSH4,8830
|
|
143
144
|
mojentic/utils/__init__.py,sha256=WvNYbtVeliMZn2sMX53CrOQlQLJBXi4mJNoocG7s_kI,116
|
|
144
145
|
mojentic/utils/formatting.py,sha256=YtXh0aYzLB9GKP8ZD6u1By1OBqPOXUtHirtq0GmHNag,948
|
|
145
|
-
mojentic-1.
|
|
146
|
-
mojentic-1.
|
|
147
|
-
mojentic-1.
|
|
148
|
-
mojentic-1.
|
|
149
|
-
mojentic-1.
|
|
146
|
+
mojentic-1.1.1.dist-info/licenses/LICENSE.md,sha256=txSgV8n5zY1W3NiF5HHsCwlaW0e8We1cSC6TuJUqxXA,1060
|
|
147
|
+
mojentic-1.1.1.dist-info/METADATA,sha256=eb6l0htqFLfD-plqA90rn9UrdN_bHSUVSGsoB7-ILuY,8775
|
|
148
|
+
mojentic-1.1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
149
|
+
mojentic-1.1.1.dist-info/top_level.txt,sha256=Q-BvPQ8Eu1jnEqK8Xkr6A9C8Xa1z38oPZRHuA5MCTqg,19
|
|
150
|
+
mojentic-1.1.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|