janito 2.6.0__py3-none-any.whl → 2.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janito/__init__.py +1 -0
- janito/__main__.py +1 -0
- janito/_version.py +1 -0
- janito/agent/setup_agent.py +240 -230
- janito/agent/templates/profiles/{system_prompt_template_software_developer.txt.j2 → system_prompt_template_plain_software_developer.txt.j2} +39 -39
- janito/cli/__init__.py +1 -0
- janito/cli/chat_mode/bindings.py +1 -0
- janito/cli/chat_mode/chat_entry.py +1 -0
- janito/cli/chat_mode/prompt_style.py +1 -0
- janito/cli/chat_mode/script_runner.py +1 -0
- janito/cli/chat_mode/session.py +282 -282
- janito/cli/chat_mode/session_profile_select.py +5 -5
- janito/cli/single_shot_mode/handler.py +95 -95
- janito/drivers/driver_registry.py +27 -27
- janito/drivers/openai/driver.py +435 -435
- janito/provider_registry.py +178 -178
- janito/providers/__init__.py +1 -0
- janito/providers/anthropic/model_info.py +41 -41
- janito/providers/anthropic/provider.py +80 -80
- janito/providers/moonshotai/__init__.py +1 -0
- janito/providers/moonshotai/model_info.py +15 -0
- janito/providers/moonshotai/provider.py +82 -0
- janito/providers/openai/model_info.py +1 -0
- janito/providers/provider_static_info.py +21 -18
- janito/tools/adapters/local/__init__.py +66 -66
- janito/tools/adapters/local/move_file.py +3 -3
- janito/tools/adapters/local/read_files.py +40 -40
- {janito-2.6.0.dist-info → janito-2.7.0.dist-info}/METADATA +419 -412
- {janito-2.6.0.dist-info → janito-2.7.0.dist-info}/RECORD +33 -30
- {janito-2.6.0.dist-info → janito-2.7.0.dist-info}/WHEEL +0 -0
- {janito-2.6.0.dist-info → janito-2.7.0.dist-info}/entry_points.txt +0 -0
- {janito-2.6.0.dist-info → janito-2.7.0.dist-info}/licenses/LICENSE +0 -0
- {janito-2.6.0.dist-info → janito-2.7.0.dist-info}/top_level.txt +0 -0
janito/drivers/openai/driver.py
CHANGED
@@ -1,436 +1,436 @@
|
|
1
|
-
import uuid
|
2
|
-
import traceback
|
3
|
-
import re
|
4
|
-
import json
|
5
|
-
import math
|
6
|
-
import time
|
7
|
-
import os
|
8
|
-
import logging
|
9
|
-
from rich import pretty
|
10
|
-
from janito.llm.driver import LLMDriver
|
11
|
-
from janito.llm.driver_input import DriverInput
|
12
|
-
from janito.driver_events import RequestFinished, RequestStatus, RateLimitRetry
|
13
|
-
from janito.llm.message_parts import TextMessagePart, FunctionCallMessagePart
|
14
|
-
|
15
|
-
# Safe import of openai SDK
|
16
|
-
try:
|
17
|
-
import openai
|
18
|
-
|
19
|
-
DRIVER_AVAILABLE = True
|
20
|
-
DRIVER_UNAVAILABLE_REASON = None
|
21
|
-
except ImportError:
|
22
|
-
DRIVER_AVAILABLE = False
|
23
|
-
DRIVER_UNAVAILABLE_REASON = "Missing dependency: openai (pip install openai)"
|
24
|
-
|
25
|
-
|
26
|
-
class OpenAIModelDriver(LLMDriver):
|
27
|
-
def _get_message_from_result(self, result):
|
28
|
-
"""Extract the message object from the provider result (OpenAI-specific)."""
|
29
|
-
if hasattr(result, "choices") and result.choices:
|
30
|
-
return result.choices[0].message
|
31
|
-
return None
|
32
|
-
|
33
|
-
"""
|
34
|
-
OpenAI LLM driver (threaded, queue-based, stateless). Uses input/output queues accessible via instance attributes.
|
35
|
-
"""
|
36
|
-
available = DRIVER_AVAILABLE
|
37
|
-
unavailable_reason = DRIVER_UNAVAILABLE_REASON
|
38
|
-
|
39
|
-
def __init__(self, tools_adapter=None, provider_name=None):
|
40
|
-
super().__init__(tools_adapter=tools_adapter, provider_name=provider_name)
|
41
|
-
|
42
|
-
def _prepare_api_kwargs(self, config, conversation):
|
43
|
-
"""
|
44
|
-
Prepares API kwargs for OpenAI, including tool schemas if tools_adapter is present,
|
45
|
-
and OpenAI-specific arguments (model, max_tokens, temperature, etc.).
|
46
|
-
"""
|
47
|
-
api_kwargs = {}
|
48
|
-
# Tool schemas (moved from base)
|
49
|
-
if self.tools_adapter:
|
50
|
-
try:
|
51
|
-
from janito.providers.openai.schema_generator import (
|
52
|
-
generate_tool_schemas,
|
53
|
-
)
|
54
|
-
|
55
|
-
tool_classes = self.tools_adapter.get_tool_classes()
|
56
|
-
tool_schemas = generate_tool_schemas(tool_classes)
|
57
|
-
api_kwargs["tools"] = tool_schemas
|
58
|
-
except Exception as e:
|
59
|
-
api_kwargs["tools"] = []
|
60
|
-
if hasattr(config, "verbose_api") and config.verbose_api:
|
61
|
-
print(f"[OpenAIModelDriver] Tool schema generation failed: {e}")
|
62
|
-
# OpenAI-specific parameters
|
63
|
-
if config.model:
|
64
|
-
api_kwargs["model"] = config.model
|
65
|
-
# Prefer max_completion_tokens if present, else fallback to max_tokens (for backward compatibility)
|
66
|
-
if (
|
67
|
-
hasattr(config, "max_completion_tokens")
|
68
|
-
and config.max_completion_tokens is not None
|
69
|
-
):
|
70
|
-
api_kwargs["max_completion_tokens"] = int(config.max_completion_tokens)
|
71
|
-
elif hasattr(config, "max_tokens") and config.max_tokens is not None:
|
72
|
-
# For models that do not support 'max_tokens', map to 'max_completion_tokens'
|
73
|
-
api_kwargs["max_completion_tokens"] = int(config.max_tokens)
|
74
|
-
for p in (
|
75
|
-
"temperature",
|
76
|
-
"top_p",
|
77
|
-
"presence_penalty",
|
78
|
-
"frequency_penalty",
|
79
|
-
"stop",
|
80
|
-
"reasoning_effort",
|
81
|
-
):
|
82
|
-
v = getattr(config, p, None)
|
83
|
-
if v is not None:
|
84
|
-
api_kwargs[p] = v
|
85
|
-
api_kwargs["messages"] = conversation
|
86
|
-
api_kwargs["stream"] = False
|
87
|
-
return api_kwargs
|
88
|
-
|
89
|
-
def _call_api(self, driver_input: DriverInput):
|
90
|
-
"""Call the OpenAI-compatible chat completion endpoint with retry and error handling."""
|
91
|
-
cancel_event = getattr(driver_input, "cancel_event", None)
|
92
|
-
config = driver_input.config
|
93
|
-
conversation = self.convert_history_to_api_messages(driver_input.conversation_history)
|
94
|
-
request_id = getattr(config, "request_id", None)
|
95
|
-
self._print_api_call_start(config)
|
96
|
-
client = self._instantiate_openai_client(config)
|
97
|
-
api_kwargs = self._prepare_api_kwargs(config, conversation)
|
98
|
-
max_retries = getattr(config, "max_retries", 3)
|
99
|
-
attempt = 1
|
100
|
-
while True:
|
101
|
-
try:
|
102
|
-
self._print_api_attempt(config, attempt, max_retries, api_kwargs)
|
103
|
-
if self._check_cancel(cancel_event, request_id, before_call=True):
|
104
|
-
return None
|
105
|
-
result = client.chat.completions.create(**api_kwargs)
|
106
|
-
if self._check_cancel(cancel_event, request_id, before_call=False):
|
107
|
-
return None
|
108
|
-
self._handle_api_success(config, result, request_id)
|
109
|
-
return result
|
110
|
-
except Exception as e:
|
111
|
-
if self._handle_api_exception(e, config, api_kwargs, attempt, max_retries, request_id):
|
112
|
-
attempt += 1
|
113
|
-
continue
|
114
|
-
raise
|
115
|
-
|
116
|
-
def _print_api_call_start(self, config):
|
117
|
-
if getattr(config, "verbose_api", False):
|
118
|
-
tool_adapter_name = type(self.tools_adapter).__name__ if self.tools_adapter else None
|
119
|
-
tool_names = []
|
120
|
-
if self.tools_adapter and hasattr(self.tools_adapter, "list_tools"):
|
121
|
-
try:
|
122
|
-
tool_names = self.tools_adapter.list_tools()
|
123
|
-
except Exception:
|
124
|
-
tool_names = ["<error retrieving tools>"]
|
125
|
-
print(
|
126
|
-
f"[verbose-api] OpenAI API call about to be sent. Model: {config.model}, max_tokens: {config.max_tokens}, tools_adapter: {tool_adapter_name}, tool_names: {tool_names}",
|
127
|
-
flush=True,
|
128
|
-
)
|
129
|
-
|
130
|
-
def _print_api_attempt(self, config, attempt, max_retries, api_kwargs):
|
131
|
-
if getattr(config, "verbose_api", False):
|
132
|
-
print(
|
133
|
-
f"[OpenAI] API CALL (attempt {attempt}/{max_retries}): chat.completions.create(**{api_kwargs})",
|
134
|
-
flush=True,
|
135
|
-
)
|
136
|
-
|
137
|
-
def _handle_api_success(self, config, result, request_id):
|
138
|
-
self._print_verbose_result(config, result)
|
139
|
-
usage_dict = self._extract_usage(result)
|
140
|
-
if getattr(config, "verbose_api", False):
|
141
|
-
print(
|
142
|
-
f"[OpenAI][DEBUG] Attaching usage info to RequestFinished: {usage_dict}",
|
143
|
-
flush=True,
|
144
|
-
)
|
145
|
-
self.output_queue.put(
|
146
|
-
RequestFinished(
|
147
|
-
driver_name=self.__class__.__name__,
|
148
|
-
request_id=request_id,
|
149
|
-
response=result,
|
150
|
-
status=RequestStatus.SUCCESS,
|
151
|
-
usage=usage_dict,
|
152
|
-
)
|
153
|
-
)
|
154
|
-
if getattr(config, "verbose_api", False):
|
155
|
-
pretty.install()
|
156
|
-
print("[OpenAI] API RESPONSE:", flush=True)
|
157
|
-
pretty.pprint(result)
|
158
|
-
|
159
|
-
def _handle_api_exception(self, e, config, api_kwargs, attempt, max_retries, request_id):
|
160
|
-
status_code = getattr(e, "status_code", None)
|
161
|
-
err_str = str(e)
|
162
|
-
lower_err = err_str.lower()
|
163
|
-
is_insufficient_quota = (
|
164
|
-
"insufficient_quota" in lower_err or "exceeded your current quota" in lower_err
|
165
|
-
)
|
166
|
-
is_rate_limit = (
|
167
|
-
(status_code == 429 or "error code: 429" in lower_err or "resource_exhausted" in lower_err)
|
168
|
-
and not is_insufficient_quota
|
169
|
-
)
|
170
|
-
if not is_rate_limit or attempt > max_retries:
|
171
|
-
self._handle_fatal_exception(e, config, api_kwargs)
|
172
|
-
retry_delay = self._extract_retry_delay_seconds(e)
|
173
|
-
if retry_delay is None:
|
174
|
-
retry_delay = min(2 ** (attempt - 1), 30)
|
175
|
-
self.output_queue.put(
|
176
|
-
RateLimitRetry(
|
177
|
-
driver_name=self.__class__.__name__,
|
178
|
-
request_id=request_id,
|
179
|
-
attempt=attempt,
|
180
|
-
retry_delay=retry_delay,
|
181
|
-
error=err_str,
|
182
|
-
details={},
|
183
|
-
)
|
184
|
-
)
|
185
|
-
if getattr(config, "verbose_api", False):
|
186
|
-
print(
|
187
|
-
f"[OpenAI][RateLimit] Attempt {attempt}/{max_retries} failed with rate-limit. Waiting {retry_delay}s before retry.",
|
188
|
-
flush=True,
|
189
|
-
)
|
190
|
-
start_wait = time.time()
|
191
|
-
while time.time() - start_wait < retry_delay:
|
192
|
-
if self._check_cancel(getattr(config, "cancel_event", None), request_id, before_call=False):
|
193
|
-
return False
|
194
|
-
time.sleep(0.1)
|
195
|
-
return True
|
196
|
-
|
197
|
-
def _extract_retry_delay_seconds(self, exception) -> float | None:
|
198
|
-
"""Extract the retry delay in seconds from the provider error response.
|
199
|
-
|
200
|
-
Handles both the Google Gemini style ``RetryInfo`` protobuf (where it's a
|
201
|
-
``retryDelay: '41s'`` string in JSON) and any number found after the word
|
202
|
-
``retryDelay``. Returns ``None`` if no delay could be parsed.
|
203
|
-
"""
|
204
|
-
try:
|
205
|
-
# Some SDKs expose the raw response JSON on e.args[0]
|
206
|
-
if hasattr(exception, "response") and hasattr(exception.response, "text"):
|
207
|
-
payload = exception.response.text
|
208
|
-
else:
|
209
|
-
payload = str(exception)
|
210
|
-
# Look for 'retryDelay': '41s' or similar
|
211
|
-
m = re.search(r"retryDelay['\"]?\s*[:=]\s*['\"]?(\d+(?:\.\d+)?)(s)?", payload)
|
212
|
-
if m:
|
213
|
-
return float(m.group(1))
|
214
|
-
# Fallback: generic number of seconds in the message
|
215
|
-
m2 = re.search(r"(\d+(?:\.\d+)?)\s*s(?:econds)?", payload)
|
216
|
-
if m2:
|
217
|
-
return float(m2.group(1))
|
218
|
-
except Exception:
|
219
|
-
pass
|
220
|
-
return None
|
221
|
-
|
222
|
-
def _handle_fatal_exception(self, e, config, api_kwargs):
|
223
|
-
"""Common path for unrecoverable exceptions.
|
224
|
-
|
225
|
-
Prints diagnostics (respecting ``verbose_api``) then re-raises the
|
226
|
-
exception so standard error handling in ``LLMDriver`` continues.
|
227
|
-
"""
|
228
|
-
is_verbose = getattr(config, "verbose_api", False)
|
229
|
-
if is_verbose:
|
230
|
-
print(f"[ERROR] Exception during OpenAI API call: {e}", flush=True)
|
231
|
-
print(f"[ERROR] config: {config}", flush=True)
|
232
|
-
print(
|
233
|
-
f"[ERROR] api_kwargs: {api_kwargs if 'api_kwargs' in locals() else 'N/A'}",
|
234
|
-
flush=True,
|
235
|
-
)
|
236
|
-
print("[ERROR] Full stack trace:", flush=True)
|
237
|
-
print(traceback.format_exc(), flush=True)
|
238
|
-
raise
|
239
|
-
|
240
|
-
def _instantiate_openai_client(self, config):
|
241
|
-
try:
|
242
|
-
api_key_display = str(config.api_key)
|
243
|
-
if api_key_display and len(api_key_display) > 8:
|
244
|
-
api_key_display = api_key_display[:4] + "..." + api_key_display[-4:]
|
245
|
-
client_kwargs = {"api_key": config.api_key}
|
246
|
-
if getattr(config, "base_url", None):
|
247
|
-
client_kwargs["base_url"] = config.base_url
|
248
|
-
|
249
|
-
# HTTP debug wrapper
|
250
|
-
if os.environ.get("OPENAI_DEBUG_HTTP", "0") == "1":
|
251
|
-
from http.client import HTTPConnection
|
252
|
-
HTTPConnection.debuglevel = 1
|
253
|
-
logging.basicConfig()
|
254
|
-
logging.getLogger().setLevel(logging.DEBUG)
|
255
|
-
requests_log = logging.getLogger("http.client")
|
256
|
-
requests_log.setLevel(logging.DEBUG)
|
257
|
-
requests_log.propagate = True
|
258
|
-
print("[OpenAIModelDriver] HTTP debug enabled via OPENAI_DEBUG_HTTP=1", flush=True)
|
259
|
-
|
260
|
-
client = openai.OpenAI(**client_kwargs)
|
261
|
-
return client
|
262
|
-
except Exception as e:
|
263
|
-
print(
|
264
|
-
f"[ERROR] Exception during OpenAI client instantiation: {e}", flush=True
|
265
|
-
)
|
266
|
-
print(traceback.format_exc(), flush=True)
|
267
|
-
raise
|
268
|
-
|
269
|
-
def _check_cancel(self, cancel_event, request_id, before_call=True):
|
270
|
-
if cancel_event is not None and cancel_event.is_set():
|
271
|
-
status = RequestStatus.CANCELLED
|
272
|
-
reason = (
|
273
|
-
"Cancelled before API call"
|
274
|
-
if before_call
|
275
|
-
else "Cancelled during API call"
|
276
|
-
)
|
277
|
-
self.output_queue.put(
|
278
|
-
RequestFinished(
|
279
|
-
driver_name=self.__class__.__name__,
|
280
|
-
request_id=request_id,
|
281
|
-
status=status,
|
282
|
-
reason=reason,
|
283
|
-
)
|
284
|
-
)
|
285
|
-
return True
|
286
|
-
return False
|
287
|
-
|
288
|
-
def _print_verbose_result(self, config, result):
|
289
|
-
if config.verbose_api:
|
290
|
-
print("[OpenAI] API RAW RESULT:", flush=True)
|
291
|
-
pretty.pprint(result)
|
292
|
-
if hasattr(result, "__dict__"):
|
293
|
-
print("[OpenAI] API RESULT __dict__:", flush=True)
|
294
|
-
pretty.pprint(result.__dict__)
|
295
|
-
try:
|
296
|
-
print("[OpenAI] API RESULT as dict:", dict(result), flush=True)
|
297
|
-
except Exception:
|
298
|
-
pass
|
299
|
-
print(
|
300
|
-
f"[OpenAI] API RESULT .usage: {getattr(result, 'usage', None)}",
|
301
|
-
flush=True,
|
302
|
-
)
|
303
|
-
try:
|
304
|
-
print(f"[OpenAI] API RESULT ['usage']: {result['usage']}", flush=True)
|
305
|
-
except Exception:
|
306
|
-
pass
|
307
|
-
if not hasattr(result, "usage") or getattr(result, "usage", None) is None:
|
308
|
-
print(
|
309
|
-
"[OpenAI][WARNING] No usage info found in API response.", flush=True
|
310
|
-
)
|
311
|
-
|
312
|
-
def _extract_usage(self, result):
|
313
|
-
usage = getattr(result, "usage", None)
|
314
|
-
if usage is not None:
|
315
|
-
usage_dict = self._usage_to_dict(usage)
|
316
|
-
if usage_dict is None:
|
317
|
-
print(
|
318
|
-
"[OpenAI][WARNING] Could not convert usage to dict, using string fallback.",
|
319
|
-
flush=True,
|
320
|
-
)
|
321
|
-
usage_dict = str(usage)
|
322
|
-
else:
|
323
|
-
usage_dict = self._extract_usage_from_result_dict(result)
|
324
|
-
return usage_dict
|
325
|
-
|
326
|
-
def _usage_to_dict(self, usage):
|
327
|
-
if hasattr(usage, "model_dump") and callable(getattr(usage, "model_dump")):
|
328
|
-
try:
|
329
|
-
return usage.model_dump()
|
330
|
-
except Exception:
|
331
|
-
pass
|
332
|
-
if hasattr(usage, "dict") and callable(getattr(usage, "dict")):
|
333
|
-
try:
|
334
|
-
return usage.dict()
|
335
|
-
except Exception:
|
336
|
-
pass
|
337
|
-
try:
|
338
|
-
return dict(usage)
|
339
|
-
except Exception:
|
340
|
-
try:
|
341
|
-
return vars(usage)
|
342
|
-
except Exception:
|
343
|
-
pass
|
344
|
-
return None
|
345
|
-
|
346
|
-
def _extract_usage_from_result_dict(self, result):
|
347
|
-
try:
|
348
|
-
return result["usage"]
|
349
|
-
except Exception:
|
350
|
-
return None
|
351
|
-
|
352
|
-
def convert_history_to_api_messages(self, conversation_history):
|
353
|
-
"""
|
354
|
-
Convert LLMConversationHistory to the list of dicts required by OpenAI's API.
|
355
|
-
Handles 'tool_results' and 'tool_calls' roles for compliance.
|
356
|
-
"""
|
357
|
-
api_messages = []
|
358
|
-
for msg in conversation_history.get_history():
|
359
|
-
self._append_api_message(api_messages, msg)
|
360
|
-
self._replace_none_content(api_messages)
|
361
|
-
return api_messages
|
362
|
-
|
363
|
-
def _append_api_message(self, api_messages, msg):
|
364
|
-
role = msg.get("role")
|
365
|
-
content = msg.get("content")
|
366
|
-
if role == "tool_results":
|
367
|
-
self._handle_tool_results(api_messages, content)
|
368
|
-
elif role == "tool_calls":
|
369
|
-
self._handle_tool_calls(api_messages, content)
|
370
|
-
else:
|
371
|
-
self._handle_other_roles(api_messages, msg, role, content)
|
372
|
-
|
373
|
-
def _handle_tool_results(self, api_messages, content):
|
374
|
-
try:
|
375
|
-
results = json.loads(content) if isinstance(content, str) else content
|
376
|
-
except Exception:
|
377
|
-
results = [content]
|
378
|
-
for result in results:
|
379
|
-
if isinstance(result, dict):
|
380
|
-
api_messages.append({
|
381
|
-
"role": "tool",
|
382
|
-
"content": result.get("content", ""),
|
383
|
-
"name": result.get("name", ""),
|
384
|
-
"tool_call_id": result.get("tool_call_id", ""),
|
385
|
-
})
|
386
|
-
else:
|
387
|
-
api_messages.append({
|
388
|
-
"role": "tool",
|
389
|
-
"content": str(result),
|
390
|
-
"name": "",
|
391
|
-
"tool_call_id": "",
|
392
|
-
})
|
393
|
-
|
394
|
-
def _handle_tool_calls(self, api_messages, content):
|
395
|
-
try:
|
396
|
-
tool_calls = json.loads(content) if isinstance(content, str) else content
|
397
|
-
except Exception:
|
398
|
-
tool_calls = []
|
399
|
-
api_messages.append({"role": "assistant", "content": "", "tool_calls": tool_calls})
|
400
|
-
|
401
|
-
def _handle_other_roles(self, api_messages, msg, role, content):
|
402
|
-
if role == "function":
|
403
|
-
name = ""
|
404
|
-
if isinstance(msg, dict):
|
405
|
-
metadata = msg.get("metadata", {})
|
406
|
-
name = metadata.get("name", "") if isinstance(metadata, dict) else ""
|
407
|
-
api_messages.append({"role": "tool", "content": content, "name": name})
|
408
|
-
else:
|
409
|
-
api_messages.append(msg)
|
410
|
-
|
411
|
-
def _replace_none_content(self, api_messages):
|
412
|
-
for m in api_messages:
|
413
|
-
if m.get("content", None) is None:
|
414
|
-
m["content"] = ""
|
415
|
-
|
416
|
-
def _convert_completion_message_to_parts(self, message):
|
417
|
-
"""
|
418
|
-
Convert an OpenAI completion message object to a list of MessagePart objects.
|
419
|
-
Handles text, tool calls, and can be extended for other types.
|
420
|
-
"""
|
421
|
-
parts = []
|
422
|
-
# Text content
|
423
|
-
content = getattr(message, "content", None)
|
424
|
-
if content:
|
425
|
-
parts.append(TextMessagePart(content=content))
|
426
|
-
# Tool calls
|
427
|
-
tool_calls = getattr(message, "tool_calls", None) or []
|
428
|
-
for tool_call in tool_calls:
|
429
|
-
parts.append(
|
430
|
-
FunctionCallMessagePart(
|
431
|
-
tool_call_id=getattr(tool_call, "id", ""),
|
432
|
-
function=getattr(tool_call, "function", None),
|
433
|
-
)
|
434
|
-
)
|
435
|
-
# Extend here for other message part types if needed
|
1
|
+
import uuid
|
2
|
+
import traceback
|
3
|
+
import re
|
4
|
+
import json
|
5
|
+
import math
|
6
|
+
import time
|
7
|
+
import os
|
8
|
+
import logging
|
9
|
+
from rich import pretty
|
10
|
+
from janito.llm.driver import LLMDriver
|
11
|
+
from janito.llm.driver_input import DriverInput
|
12
|
+
from janito.driver_events import RequestFinished, RequestStatus, RateLimitRetry
|
13
|
+
from janito.llm.message_parts import TextMessagePart, FunctionCallMessagePart
|
14
|
+
|
15
|
+
# Safe import of openai SDK
|
16
|
+
try:
|
17
|
+
import openai
|
18
|
+
|
19
|
+
DRIVER_AVAILABLE = True
|
20
|
+
DRIVER_UNAVAILABLE_REASON = None
|
21
|
+
except ImportError:
|
22
|
+
DRIVER_AVAILABLE = False
|
23
|
+
DRIVER_UNAVAILABLE_REASON = "Missing dependency: openai (pip install openai)"
|
24
|
+
|
25
|
+
|
26
|
+
class OpenAIModelDriver(LLMDriver):
|
27
|
+
def _get_message_from_result(self, result):
|
28
|
+
"""Extract the message object from the provider result (OpenAI-specific)."""
|
29
|
+
if hasattr(result, "choices") and result.choices:
|
30
|
+
return result.choices[0].message
|
31
|
+
return None
|
32
|
+
|
33
|
+
"""
|
34
|
+
OpenAI LLM driver (threaded, queue-based, stateless). Uses input/output queues accessible via instance attributes.
|
35
|
+
"""
|
36
|
+
available = DRIVER_AVAILABLE
|
37
|
+
unavailable_reason = DRIVER_UNAVAILABLE_REASON
|
38
|
+
|
39
|
+
def __init__(self, tools_adapter=None, provider_name=None):
|
40
|
+
super().__init__(tools_adapter=tools_adapter, provider_name=provider_name)
|
41
|
+
|
42
|
+
def _prepare_api_kwargs(self, config, conversation):
|
43
|
+
"""
|
44
|
+
Prepares API kwargs for OpenAI, including tool schemas if tools_adapter is present,
|
45
|
+
and OpenAI-specific arguments (model, max_tokens, temperature, etc.).
|
46
|
+
"""
|
47
|
+
api_kwargs = {}
|
48
|
+
# Tool schemas (moved from base)
|
49
|
+
if self.tools_adapter:
|
50
|
+
try:
|
51
|
+
from janito.providers.openai.schema_generator import (
|
52
|
+
generate_tool_schemas,
|
53
|
+
)
|
54
|
+
|
55
|
+
tool_classes = self.tools_adapter.get_tool_classes()
|
56
|
+
tool_schemas = generate_tool_schemas(tool_classes)
|
57
|
+
api_kwargs["tools"] = tool_schemas
|
58
|
+
except Exception as e:
|
59
|
+
api_kwargs["tools"] = []
|
60
|
+
if hasattr(config, "verbose_api") and config.verbose_api:
|
61
|
+
print(f"[OpenAIModelDriver] Tool schema generation failed: {e}")
|
62
|
+
# OpenAI-specific parameters
|
63
|
+
if config.model:
|
64
|
+
api_kwargs["model"] = config.model
|
65
|
+
# Prefer max_completion_tokens if present, else fallback to max_tokens (for backward compatibility)
|
66
|
+
if (
|
67
|
+
hasattr(config, "max_completion_tokens")
|
68
|
+
and config.max_completion_tokens is not None
|
69
|
+
):
|
70
|
+
api_kwargs["max_completion_tokens"] = int(config.max_completion_tokens)
|
71
|
+
elif hasattr(config, "max_tokens") and config.max_tokens is not None:
|
72
|
+
# For models that do not support 'max_tokens', map to 'max_completion_tokens'
|
73
|
+
api_kwargs["max_completion_tokens"] = int(config.max_tokens)
|
74
|
+
for p in (
|
75
|
+
"temperature",
|
76
|
+
"top_p",
|
77
|
+
"presence_penalty",
|
78
|
+
"frequency_penalty",
|
79
|
+
"stop",
|
80
|
+
"reasoning_effort",
|
81
|
+
):
|
82
|
+
v = getattr(config, p, None)
|
83
|
+
if v is not None:
|
84
|
+
api_kwargs[p] = v
|
85
|
+
api_kwargs["messages"] = conversation
|
86
|
+
api_kwargs["stream"] = False
|
87
|
+
return api_kwargs
|
88
|
+
|
89
|
+
def _call_api(self, driver_input: DriverInput):
|
90
|
+
"""Call the OpenAI-compatible chat completion endpoint with retry and error handling."""
|
91
|
+
cancel_event = getattr(driver_input, "cancel_event", None)
|
92
|
+
config = driver_input.config
|
93
|
+
conversation = self.convert_history_to_api_messages(driver_input.conversation_history)
|
94
|
+
request_id = getattr(config, "request_id", None)
|
95
|
+
self._print_api_call_start(config)
|
96
|
+
client = self._instantiate_openai_client(config)
|
97
|
+
api_kwargs = self._prepare_api_kwargs(config, conversation)
|
98
|
+
max_retries = getattr(config, "max_retries", 3)
|
99
|
+
attempt = 1
|
100
|
+
while True:
|
101
|
+
try:
|
102
|
+
self._print_api_attempt(config, attempt, max_retries, api_kwargs)
|
103
|
+
if self._check_cancel(cancel_event, request_id, before_call=True):
|
104
|
+
return None
|
105
|
+
result = client.chat.completions.create(**api_kwargs)
|
106
|
+
if self._check_cancel(cancel_event, request_id, before_call=False):
|
107
|
+
return None
|
108
|
+
self._handle_api_success(config, result, request_id)
|
109
|
+
return result
|
110
|
+
except Exception as e:
|
111
|
+
if self._handle_api_exception(e, config, api_kwargs, attempt, max_retries, request_id):
|
112
|
+
attempt += 1
|
113
|
+
continue
|
114
|
+
raise
|
115
|
+
|
116
|
+
def _print_api_call_start(self, config):
|
117
|
+
if getattr(config, "verbose_api", False):
|
118
|
+
tool_adapter_name = type(self.tools_adapter).__name__ if self.tools_adapter else None
|
119
|
+
tool_names = []
|
120
|
+
if self.tools_adapter and hasattr(self.tools_adapter, "list_tools"):
|
121
|
+
try:
|
122
|
+
tool_names = self.tools_adapter.list_tools()
|
123
|
+
except Exception:
|
124
|
+
tool_names = ["<error retrieving tools>"]
|
125
|
+
print(
|
126
|
+
f"[verbose-api] OpenAI API call about to be sent. Model: {config.model}, max_tokens: {config.max_tokens}, tools_adapter: {tool_adapter_name}, tool_names: {tool_names}",
|
127
|
+
flush=True,
|
128
|
+
)
|
129
|
+
|
130
|
+
def _print_api_attempt(self, config, attempt, max_retries, api_kwargs):
|
131
|
+
if getattr(config, "verbose_api", False):
|
132
|
+
print(
|
133
|
+
f"[OpenAI] API CALL (attempt {attempt}/{max_retries}): chat.completions.create(**{api_kwargs})",
|
134
|
+
flush=True,
|
135
|
+
)
|
136
|
+
|
137
|
+
def _handle_api_success(self, config, result, request_id):
|
138
|
+
self._print_verbose_result(config, result)
|
139
|
+
usage_dict = self._extract_usage(result)
|
140
|
+
if getattr(config, "verbose_api", False):
|
141
|
+
print(
|
142
|
+
f"[OpenAI][DEBUG] Attaching usage info to RequestFinished: {usage_dict}",
|
143
|
+
flush=True,
|
144
|
+
)
|
145
|
+
self.output_queue.put(
|
146
|
+
RequestFinished(
|
147
|
+
driver_name=self.__class__.__name__,
|
148
|
+
request_id=request_id,
|
149
|
+
response=result,
|
150
|
+
status=RequestStatus.SUCCESS,
|
151
|
+
usage=usage_dict,
|
152
|
+
)
|
153
|
+
)
|
154
|
+
if getattr(config, "verbose_api", False):
|
155
|
+
pretty.install()
|
156
|
+
print("[OpenAI] API RESPONSE:", flush=True)
|
157
|
+
pretty.pprint(result)
|
158
|
+
|
159
|
+
def _handle_api_exception(self, e, config, api_kwargs, attempt, max_retries, request_id):
|
160
|
+
status_code = getattr(e, "status_code", None)
|
161
|
+
err_str = str(e)
|
162
|
+
lower_err = err_str.lower()
|
163
|
+
is_insufficient_quota = (
|
164
|
+
"insufficient_quota" in lower_err or "exceeded your current quota" in lower_err
|
165
|
+
)
|
166
|
+
is_rate_limit = (
|
167
|
+
(status_code == 429 or "error code: 429" in lower_err or "resource_exhausted" in lower_err)
|
168
|
+
and not is_insufficient_quota
|
169
|
+
)
|
170
|
+
if not is_rate_limit or attempt > max_retries:
|
171
|
+
self._handle_fatal_exception(e, config, api_kwargs)
|
172
|
+
retry_delay = self._extract_retry_delay_seconds(e)
|
173
|
+
if retry_delay is None:
|
174
|
+
retry_delay = min(2 ** (attempt - 1), 30)
|
175
|
+
self.output_queue.put(
|
176
|
+
RateLimitRetry(
|
177
|
+
driver_name=self.__class__.__name__,
|
178
|
+
request_id=request_id,
|
179
|
+
attempt=attempt,
|
180
|
+
retry_delay=retry_delay,
|
181
|
+
error=err_str,
|
182
|
+
details={},
|
183
|
+
)
|
184
|
+
)
|
185
|
+
if getattr(config, "verbose_api", False):
|
186
|
+
print(
|
187
|
+
f"[OpenAI][RateLimit] Attempt {attempt}/{max_retries} failed with rate-limit. Waiting {retry_delay}s before retry.",
|
188
|
+
flush=True,
|
189
|
+
)
|
190
|
+
start_wait = time.time()
|
191
|
+
while time.time() - start_wait < retry_delay:
|
192
|
+
if self._check_cancel(getattr(config, "cancel_event", None), request_id, before_call=False):
|
193
|
+
return False
|
194
|
+
time.sleep(0.1)
|
195
|
+
return True
|
196
|
+
|
197
|
+
def _extract_retry_delay_seconds(self, exception) -> float | None:
|
198
|
+
"""Extract the retry delay in seconds from the provider error response.
|
199
|
+
|
200
|
+
Handles both the Google Gemini style ``RetryInfo`` protobuf (where it's a
|
201
|
+
``retryDelay: '41s'`` string in JSON) and any number found after the word
|
202
|
+
``retryDelay``. Returns ``None`` if no delay could be parsed.
|
203
|
+
"""
|
204
|
+
try:
|
205
|
+
# Some SDKs expose the raw response JSON on e.args[0]
|
206
|
+
if hasattr(exception, "response") and hasattr(exception.response, "text"):
|
207
|
+
payload = exception.response.text
|
208
|
+
else:
|
209
|
+
payload = str(exception)
|
210
|
+
# Look for 'retryDelay': '41s' or similar
|
211
|
+
m = re.search(r"retryDelay['\"]?\s*[:=]\s*['\"]?(\d+(?:\.\d+)?)(s)?", payload)
|
212
|
+
if m:
|
213
|
+
return float(m.group(1))
|
214
|
+
# Fallback: generic number of seconds in the message
|
215
|
+
m2 = re.search(r"(\d+(?:\.\d+)?)\s*s(?:econds)?", payload)
|
216
|
+
if m2:
|
217
|
+
return float(m2.group(1))
|
218
|
+
except Exception:
|
219
|
+
pass
|
220
|
+
return None
|
221
|
+
|
222
|
+
def _handle_fatal_exception(self, e, config, api_kwargs):
|
223
|
+
"""Common path for unrecoverable exceptions.
|
224
|
+
|
225
|
+
Prints diagnostics (respecting ``verbose_api``) then re-raises the
|
226
|
+
exception so standard error handling in ``LLMDriver`` continues.
|
227
|
+
"""
|
228
|
+
is_verbose = getattr(config, "verbose_api", False)
|
229
|
+
if is_verbose:
|
230
|
+
print(f"[ERROR] Exception during OpenAI API call: {e}", flush=True)
|
231
|
+
print(f"[ERROR] config: {config}", flush=True)
|
232
|
+
print(
|
233
|
+
f"[ERROR] api_kwargs: {api_kwargs if 'api_kwargs' in locals() else 'N/A'}",
|
234
|
+
flush=True,
|
235
|
+
)
|
236
|
+
print("[ERROR] Full stack trace:", flush=True)
|
237
|
+
print(traceback.format_exc(), flush=True)
|
238
|
+
raise
|
239
|
+
|
240
|
+
def _instantiate_openai_client(self, config):
|
241
|
+
try:
|
242
|
+
api_key_display = str(config.api_key)
|
243
|
+
if api_key_display and len(api_key_display) > 8:
|
244
|
+
api_key_display = api_key_display[:4] + "..." + api_key_display[-4:]
|
245
|
+
client_kwargs = {"api_key": config.api_key}
|
246
|
+
if getattr(config, "base_url", None):
|
247
|
+
client_kwargs["base_url"] = config.base_url
|
248
|
+
|
249
|
+
# HTTP debug wrapper
|
250
|
+
if os.environ.get("OPENAI_DEBUG_HTTP", "0") == "1":
|
251
|
+
from http.client import HTTPConnection
|
252
|
+
HTTPConnection.debuglevel = 1
|
253
|
+
logging.basicConfig()
|
254
|
+
logging.getLogger().setLevel(logging.DEBUG)
|
255
|
+
requests_log = logging.getLogger("http.client")
|
256
|
+
requests_log.setLevel(logging.DEBUG)
|
257
|
+
requests_log.propagate = True
|
258
|
+
print("[OpenAIModelDriver] HTTP debug enabled via OPENAI_DEBUG_HTTP=1", flush=True)
|
259
|
+
|
260
|
+
client = openai.OpenAI(**client_kwargs)
|
261
|
+
return client
|
262
|
+
except Exception as e:
|
263
|
+
print(
|
264
|
+
f"[ERROR] Exception during OpenAI client instantiation: {e}", flush=True
|
265
|
+
)
|
266
|
+
print(traceback.format_exc(), flush=True)
|
267
|
+
raise
|
268
|
+
|
269
|
+
def _check_cancel(self, cancel_event, request_id, before_call=True):
|
270
|
+
if cancel_event is not None and cancel_event.is_set():
|
271
|
+
status = RequestStatus.CANCELLED
|
272
|
+
reason = (
|
273
|
+
"Cancelled before API call"
|
274
|
+
if before_call
|
275
|
+
else "Cancelled during API call"
|
276
|
+
)
|
277
|
+
self.output_queue.put(
|
278
|
+
RequestFinished(
|
279
|
+
driver_name=self.__class__.__name__,
|
280
|
+
request_id=request_id,
|
281
|
+
status=status,
|
282
|
+
reason=reason,
|
283
|
+
)
|
284
|
+
)
|
285
|
+
return True
|
286
|
+
return False
|
287
|
+
|
288
|
+
def _print_verbose_result(self, config, result):
|
289
|
+
if config.verbose_api:
|
290
|
+
print("[OpenAI] API RAW RESULT:", flush=True)
|
291
|
+
pretty.pprint(result)
|
292
|
+
if hasattr(result, "__dict__"):
|
293
|
+
print("[OpenAI] API RESULT __dict__:", flush=True)
|
294
|
+
pretty.pprint(result.__dict__)
|
295
|
+
try:
|
296
|
+
print("[OpenAI] API RESULT as dict:", dict(result), flush=True)
|
297
|
+
except Exception:
|
298
|
+
pass
|
299
|
+
print(
|
300
|
+
f"[OpenAI] API RESULT .usage: {getattr(result, 'usage', None)}",
|
301
|
+
flush=True,
|
302
|
+
)
|
303
|
+
try:
|
304
|
+
print(f"[OpenAI] API RESULT ['usage']: {result['usage']}", flush=True)
|
305
|
+
except Exception:
|
306
|
+
pass
|
307
|
+
if not hasattr(result, "usage") or getattr(result, "usage", None) is None:
|
308
|
+
print(
|
309
|
+
"[OpenAI][WARNING] No usage info found in API response.", flush=True
|
310
|
+
)
|
311
|
+
|
312
|
+
def _extract_usage(self, result):
|
313
|
+
usage = getattr(result, "usage", None)
|
314
|
+
if usage is not None:
|
315
|
+
usage_dict = self._usage_to_dict(usage)
|
316
|
+
if usage_dict is None:
|
317
|
+
print(
|
318
|
+
"[OpenAI][WARNING] Could not convert usage to dict, using string fallback.",
|
319
|
+
flush=True,
|
320
|
+
)
|
321
|
+
usage_dict = str(usage)
|
322
|
+
else:
|
323
|
+
usage_dict = self._extract_usage_from_result_dict(result)
|
324
|
+
return usage_dict
|
325
|
+
|
326
|
+
def _usage_to_dict(self, usage):
|
327
|
+
if hasattr(usage, "model_dump") and callable(getattr(usage, "model_dump")):
|
328
|
+
try:
|
329
|
+
return usage.model_dump()
|
330
|
+
except Exception:
|
331
|
+
pass
|
332
|
+
if hasattr(usage, "dict") and callable(getattr(usage, "dict")):
|
333
|
+
try:
|
334
|
+
return usage.dict()
|
335
|
+
except Exception:
|
336
|
+
pass
|
337
|
+
try:
|
338
|
+
return dict(usage)
|
339
|
+
except Exception:
|
340
|
+
try:
|
341
|
+
return vars(usage)
|
342
|
+
except Exception:
|
343
|
+
pass
|
344
|
+
return None
|
345
|
+
|
346
|
+
def _extract_usage_from_result_dict(self, result):
|
347
|
+
try:
|
348
|
+
return result["usage"]
|
349
|
+
except Exception:
|
350
|
+
return None
|
351
|
+
|
352
|
+
def convert_history_to_api_messages(self, conversation_history):
|
353
|
+
"""
|
354
|
+
Convert LLMConversationHistory to the list of dicts required by OpenAI's API.
|
355
|
+
Handles 'tool_results' and 'tool_calls' roles for compliance.
|
356
|
+
"""
|
357
|
+
api_messages = []
|
358
|
+
for msg in conversation_history.get_history():
|
359
|
+
self._append_api_message(api_messages, msg)
|
360
|
+
self._replace_none_content(api_messages)
|
361
|
+
return api_messages
|
362
|
+
|
363
|
+
def _append_api_message(self, api_messages, msg):
|
364
|
+
role = msg.get("role")
|
365
|
+
content = msg.get("content")
|
366
|
+
if role == "tool_results":
|
367
|
+
self._handle_tool_results(api_messages, content)
|
368
|
+
elif role == "tool_calls":
|
369
|
+
self._handle_tool_calls(api_messages, content)
|
370
|
+
else:
|
371
|
+
self._handle_other_roles(api_messages, msg, role, content)
|
372
|
+
|
373
|
+
def _handle_tool_results(self, api_messages, content):
|
374
|
+
try:
|
375
|
+
results = json.loads(content) if isinstance(content, str) else content
|
376
|
+
except Exception:
|
377
|
+
results = [content]
|
378
|
+
for result in results:
|
379
|
+
if isinstance(result, dict):
|
380
|
+
api_messages.append({
|
381
|
+
"role": "tool",
|
382
|
+
"content": result.get("content", ""),
|
383
|
+
"name": result.get("name", ""),
|
384
|
+
"tool_call_id": result.get("tool_call_id", ""),
|
385
|
+
})
|
386
|
+
else:
|
387
|
+
api_messages.append({
|
388
|
+
"role": "tool",
|
389
|
+
"content": str(result),
|
390
|
+
"name": "",
|
391
|
+
"tool_call_id": "",
|
392
|
+
})
|
393
|
+
|
394
|
+
def _handle_tool_calls(self, api_messages, content):
|
395
|
+
try:
|
396
|
+
tool_calls = json.loads(content) if isinstance(content, str) else content
|
397
|
+
except Exception:
|
398
|
+
tool_calls = []
|
399
|
+
api_messages.append({"role": "assistant", "content": "", "tool_calls": tool_calls})
|
400
|
+
|
401
|
+
def _handle_other_roles(self, api_messages, msg, role, content):
|
402
|
+
if role == "function":
|
403
|
+
name = ""
|
404
|
+
if isinstance(msg, dict):
|
405
|
+
metadata = msg.get("metadata", {})
|
406
|
+
name = metadata.get("name", "") if isinstance(metadata, dict) else ""
|
407
|
+
api_messages.append({"role": "tool", "content": content, "name": name})
|
408
|
+
else:
|
409
|
+
api_messages.append(msg)
|
410
|
+
|
411
|
+
def _replace_none_content(self, api_messages):
|
412
|
+
for m in api_messages:
|
413
|
+
if m.get("content", None) is None:
|
414
|
+
m["content"] = ""
|
415
|
+
|
416
|
+
def _convert_completion_message_to_parts(self, message):
|
417
|
+
"""
|
418
|
+
Convert an OpenAI completion message object to a list of MessagePart objects.
|
419
|
+
Handles text, tool calls, and can be extended for other types.
|
420
|
+
"""
|
421
|
+
parts = []
|
422
|
+
# Text content
|
423
|
+
content = getattr(message, "content", None)
|
424
|
+
if content:
|
425
|
+
parts.append(TextMessagePart(content=content))
|
426
|
+
# Tool calls
|
427
|
+
tool_calls = getattr(message, "tool_calls", None) or []
|
428
|
+
for tool_call in tool_calls:
|
429
|
+
parts.append(
|
430
|
+
FunctionCallMessagePart(
|
431
|
+
tool_call_id=getattr(tool_call, "id", ""),
|
432
|
+
function=getattr(tool_call, "function", None),
|
433
|
+
)
|
434
|
+
)
|
435
|
+
# Extend here for other message part types if needed
|
436
436
|
return parts
|