janito 1.9.0__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. janito/__init__.py +1 -1
  2. janito/agent/api_exceptions.py +4 -0
  3. janito/agent/config.py +1 -1
  4. janito/agent/config_defaults.py +2 -26
  5. janito/agent/conversation.py +163 -122
  6. janito/agent/conversation_api.py +149 -159
  7. janito/agent/{conversation_history.py → llm_conversation_history.py} +18 -1
  8. janito/agent/openai_client.py +38 -23
  9. janito/agent/openai_schema_generator.py +162 -129
  10. janito/agent/platform_discovery.py +134 -77
  11. janito/agent/profile_manager.py +5 -5
  12. janito/agent/rich_message_handler.py +80 -31
  13. janito/agent/templates/profiles/system_prompt_template_base.txt.j2 +5 -4
  14. janito/agent/test_openai_schema_generator.py +93 -0
  15. janito/agent/tool_base.py +7 -2
  16. janito/agent/tool_executor.py +54 -49
  17. janito/agent/tool_registry.py +5 -2
  18. janito/agent/tool_use_tracker.py +26 -5
  19. janito/agent/tools/__init__.py +6 -3
  20. janito/agent/tools/create_directory.py +3 -1
  21. janito/agent/tools/create_file.py +7 -1
  22. janito/agent/tools/fetch_url.py +40 -3
  23. janito/agent/tools/find_files.py +3 -1
  24. janito/agent/tools/get_file_outline/core.py +6 -7
  25. janito/agent/tools/get_file_outline/search_outline.py +3 -1
  26. janito/agent/tools/get_lines.py +7 -2
  27. janito/agent/tools/move_file.py +3 -1
  28. janito/agent/tools/present_choices.py +3 -1
  29. janito/agent/tools/python_command_runner.py +150 -0
  30. janito/agent/tools/python_file_runner.py +148 -0
  31. janito/agent/tools/python_stdin_runner.py +154 -0
  32. janito/agent/tools/remove_directory.py +3 -1
  33. janito/agent/tools/remove_file.py +5 -1
  34. janito/agent/tools/replace_file.py +12 -2
  35. janito/agent/tools/replace_text_in_file.py +4 -2
  36. janito/agent/tools/run_bash_command.py +30 -69
  37. janito/agent/tools/run_powershell_command.py +134 -105
  38. janito/agent/tools/search_text.py +172 -122
  39. janito/agent/tools/validate_file_syntax/core.py +3 -1
  40. janito/agent/tools_utils/action_type.py +7 -0
  41. janito/agent/tools_utils/dir_walk_utils.py +3 -2
  42. janito/agent/tools_utils/formatting.py +47 -21
  43. janito/agent/tools_utils/gitignore_utils.py +66 -40
  44. janito/agent/tools_utils/test_gitignore_utils.py +46 -0
  45. janito/cli/_print_config.py +63 -61
  46. janito/cli/arg_parser.py +13 -12
  47. janito/cli/cli_main.py +137 -147
  48. janito/cli/main.py +152 -174
  49. janito/cli/one_shot.py +40 -26
  50. janito/i18n/__init__.py +1 -1
  51. janito/rich_utils.py +46 -8
  52. janito/shell/commands/__init__.py +2 -4
  53. janito/shell/commands/conversation_restart.py +3 -1
  54. janito/shell/commands/edit.py +3 -0
  55. janito/shell/commands/history_view.py +3 -3
  56. janito/shell/commands/lang.py +3 -0
  57. janito/shell/commands/livelogs.py +5 -3
  58. janito/shell/commands/prompt.py +6 -0
  59. janito/shell/commands/session.py +3 -0
  60. janito/shell/commands/session_control.py +3 -0
  61. janito/shell/commands/termweb_log.py +8 -0
  62. janito/shell/commands/tools.py +3 -0
  63. janito/shell/commands/track.py +36 -0
  64. janito/shell/commands/utility.py +13 -18
  65. janito/shell/commands/verbose.py +3 -4
  66. janito/shell/input_history.py +62 -0
  67. janito/shell/main.py +117 -181
  68. janito/shell/session/manager.py +0 -21
  69. janito/shell/ui/interactive.py +0 -2
  70. janito/termweb/static/editor.css +0 -4
  71. janito/tests/test_rich_utils.py +44 -0
  72. janito/web/app.py +0 -75
  73. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/METADATA +61 -42
  74. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/RECORD +78 -71
  75. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/WHEEL +1 -1
  76. janito/agent/providers.py +0 -77
  77. janito/agent/tools/run_python_command.py +0 -161
  78. janito/shell/commands/sum.py +0 -49
  79. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/entry_points.txt +0 -0
  80. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/licenses/LICENSE +0 -0
  81. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/top_level.txt +0 -0
@@ -7,7 +7,8 @@ from janito.i18n import tr
7
7
  import json
8
8
  from janito.agent.runtime_config import runtime_config
9
9
  from janito.agent.tool_registry import get_tool_schemas
10
- from janito.agent.conversation_exceptions import NoToolSupportError
10
+ from janito.agent.conversation_exceptions import NoToolSupportError, EmptyResponseError
11
+ from janito.agent.api_exceptions import ApiError
11
12
 
12
13
 
13
14
  def _sanitize_utf8_surrogates(obj):
@@ -28,7 +29,7 @@ def _sanitize_utf8_surrogates(obj):
28
29
  def get_openai_response(
29
30
  client, model, messages, max_tokens, tools=None, tool_choice=None, temperature=None
30
31
  ):
31
- """Non-streaming OpenAI API call."""
32
+ """OpenAI API call."""
32
33
  messages = _sanitize_utf8_surrogates(messages)
33
34
  from janito.agent.conversation_exceptions import ProviderError
34
35
 
@@ -53,176 +54,165 @@ def get_openai_response(
53
54
  or response.choices is None
54
55
  or len(response.choices) == 0
55
56
  ):
57
+ # Always check for error before raising ProviderError
58
+ error = getattr(response, "error", None)
59
+ if error:
60
+ print(f"ApiError: {error.get('message', error)}")
61
+ print(f"Full error object: {error}")
62
+ print(f"Raw response: {response}")
63
+ raise ApiError(error.get("message", str(error)))
56
64
  raise ProviderError(
57
- "No choices in response; possible API or LLM error.",
65
+ f"No choices in response; possible API or LLM error. Raw response: {response!r}",
58
66
  {"code": 502, "raw_response": str(response)},
59
67
  )
60
68
  return response
61
69
 
62
70
 
63
- def get_openai_stream_response(
64
- client,
65
- model,
66
- messages,
67
- max_tokens,
68
- tools=None,
69
- tool_choice=None,
70
- temperature=None,
71
- verbose_stream=False,
72
- message_handler=None,
73
- ):
74
- """Streaming OpenAI API call."""
75
- messages = _sanitize_utf8_surrogates(messages)
76
- openai_args = dict(
77
- model=model,
78
- messages=messages,
79
- max_tokens=max_tokens,
80
- stream=True,
71
+ def _extract_status_and_retry_after(e, error_message):
72
+ status_code = None
73
+ retry_after = None
74
+ if hasattr(e, "status_code"):
75
+ status_code = getattr(e, "status_code")
76
+ elif hasattr(e, "response") and hasattr(e.response, "status_code"):
77
+ status_code = getattr(e.response, "status_code")
78
+ if hasattr(e.response, "headers") and e.response.headers:
79
+ retry_after = e.response.headers.get("Retry-After")
80
+ else:
81
+ import re
82
+
83
+ match = re.search(r"[Ee]rror code: (\d{3})", error_message)
84
+ if match:
85
+ status_code = int(match.group(1))
86
+ retry_after_match = re.search(r"Retry-After\['\"]?:?\s*(\d+)", error_message)
87
+ if retry_after_match:
88
+ retry_after = retry_after_match.group(1)
89
+ return status_code, retry_after
90
+
91
+
92
+ def _calculate_wait_time(status_code, retry_after, attempt):
93
+ if status_code == 429 and retry_after is not None:
94
+ try:
95
+ return int(float(retry_after))
96
+ except Exception:
97
+ return 2**attempt
98
+ return 2**attempt
99
+
100
+
101
+ def _log_and_sleep(message, attempt, max_retries, e=None, wait_time=None):
102
+ print(
103
+ tr(
104
+ message,
105
+ attempt=attempt,
106
+ max_retries=max_retries,
107
+ e=e,
108
+ wait_time=wait_time,
109
+ )
81
110
  )
82
- if not runtime_config.get("vanilla_mode", False):
83
- openai_args.update(
84
- tools=tools or get_tool_schemas(),
85
- tool_choice=tool_choice or "auto",
86
- temperature=temperature if temperature is not None else 0.2,
111
+ time.sleep(wait_time)
112
+
113
+
114
+ def _handle_json_decode_error(e, attempt, max_retries):
115
+ if attempt < max_retries:
116
+ wait_time = 2**attempt
117
+ _log_and_sleep(
118
+ "Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds...",
119
+ attempt,
120
+ max_retries,
121
+ wait_time=wait_time,
87
122
  )
88
- response_stream = client.chat.completions.create(**openai_args)
89
- content_accum = ""
90
- for event in response_stream:
91
- if verbose_stream or runtime_config.get("verbose_stream", False):
92
- print(repr(event), flush=True)
93
- delta = getattr(event.choices[0], "delta", None)
94
- if delta and getattr(delta, "content", None):
95
- chunk = delta.content
96
- content_accum += chunk
97
- if message_handler:
98
- message_handler.handle_message({"type": "stream", "content": chunk})
99
- if message_handler:
100
- message_handler.handle_message({"type": "stream_end", "content": content_accum})
101
- return None
102
-
103
-
104
- def retry_api_call(api_func, max_retries=5, *args, **kwargs):
105
- last_exception = None
106
- for attempt in range(1, max_retries + 1):
107
- try:
108
- return api_func(*args, **kwargs)
109
- except json.JSONDecodeError as e:
110
- last_exception = e
123
+ return None
124
+ else:
125
+ print(tr("Max retries for invalid response reached. Raising error."))
126
+ raise e
127
+
128
+
129
+ def _handle_general_exception(e, attempt, max_retries):
130
+ error_message = str(e)
131
+ if "No endpoints found that support tool use" in error_message:
132
+ print(tr("API does not support tool use."))
133
+ raise NoToolSupportError(error_message)
134
+ status_code, retry_after = _extract_status_and_retry_after(e, error_message)
135
+ if status_code is not None:
136
+ if status_code == 429:
137
+ wait_time = _calculate_wait_time(status_code, retry_after, attempt)
111
138
  if attempt < max_retries:
112
- wait_time = 2**attempt
113
- print(
114
- tr(
115
- "Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds...",
116
- attempt=attempt,
117
- max_retries=max_retries,
118
- wait_time=wait_time,
119
- )
139
+ _log_and_sleep(
140
+ "OpenAI API rate limit (429) (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
141
+ attempt,
142
+ max_retries,
143
+ e=e,
144
+ wait_time=wait_time,
120
145
  )
121
- time.sleep(wait_time)
146
+ return None
122
147
  else:
123
- print(tr("Max retries for invalid response reached. Raising error."))
124
- raise last_exception
125
- except Exception as e:
126
- last_exception = e
127
- status_code = None
128
- error_message = str(e)
129
- retry_after = None
130
- # Detect specific tool support error
131
- if "No endpoints found that support tool use" in error_message:
132
- print(tr("API does not support tool use."))
133
- raise NoToolSupportError(error_message)
134
- # Try to extract status code and Retry-After from known exception types or message
135
- if hasattr(e, "status_code"):
136
- status_code = getattr(e, "status_code")
137
- elif hasattr(e, "response") and hasattr(e.response, "status_code"):
138
- status_code = getattr(e.response, "status_code")
139
- # Check for Retry-After header
140
- if hasattr(e.response, "headers") and e.response.headers:
141
- retry_after = e.response.headers.get("Retry-After")
148
+ print("Max retries for OpenAI API rate limit reached. Raising error.")
149
+ raise e
150
+ elif 500 <= status_code < 600:
151
+ wait_time = 2**attempt
152
+ if attempt < max_retries:
153
+ _log_and_sleep(
154
+ "OpenAI API server error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
155
+ attempt,
156
+ max_retries,
157
+ e=e,
158
+ wait_time=wait_time,
159
+ )
160
+ return None
142
161
  else:
143
- # Try to parse from error message
144
- import re
145
-
146
- match = re.search(r"[Ee]rror code: (\d{3})", error_message)
147
- if match:
148
- status_code = int(match.group(1))
149
- # Try to find Retry-After in message
150
- retry_after_match = re.search(
151
- r"Retry-After['\"]?:?\s*(\d+)", error_message
162
+ print("Max retries for OpenAI API server error reached. Raising error.")
163
+ raise e
164
+ elif 400 <= status_code < 500:
165
+ print(
166
+ tr(
167
+ "OpenAI API client error {status_code}: {e}. Not retrying.",
168
+ status_code=status_code,
169
+ e=e,
152
170
  )
153
- if retry_after_match:
154
- retry_after = retry_after_match.group(1)
155
- # Decide retry logic based on status code
156
- if status_code is not None:
157
- if status_code == 429:
158
- # Use Retry-After if available, else exponential backoff
159
- if retry_after is not None:
160
- try:
161
- wait_time = int(float(retry_after))
162
- except Exception:
163
- wait_time = 2**attempt
164
- else:
165
- wait_time = 2**attempt
166
- if attempt < max_retries:
167
- print(
168
- tr(
169
- "OpenAI API rate limit (429) (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
170
- attempt=attempt,
171
- max_retries=max_retries,
172
- e=e,
173
- wait_time=wait_time,
174
- )
175
- )
176
- time.sleep(wait_time)
177
- continue
178
- else:
179
- print(
180
- "Max retries for OpenAI API rate limit reached. Raising error."
181
- )
182
- raise last_exception
183
- elif 500 <= status_code < 600:
184
- # Retry on server errors
185
- if attempt < max_retries:
186
- wait_time = 2**attempt
187
- print(
188
- tr(
189
- "OpenAI API server error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
190
- attempt=attempt,
191
- max_retries=max_retries,
192
- e=e,
193
- wait_time=wait_time,
194
- )
195
- )
196
- time.sleep(wait_time)
197
- continue
198
- else:
199
- print(
200
- "Max retries for OpenAI API server error reached. Raising error."
201
- )
202
- raise last_exception
203
- elif 400 <= status_code < 500:
204
- # Do not retry on client errors (except 429)
205
- print(
206
- tr(
207
- "OpenAI API client error {status_code}: {e}. Not retrying.",
208
- status_code=status_code,
209
- e=e,
210
- )
211
- )
212
- raise last_exception
213
- # If status code not detected, fallback to previous behavior
214
- if attempt < max_retries:
215
- wait_time = 2**attempt
171
+ )
172
+ raise e
173
+ if attempt < max_retries:
174
+ wait_time = 2**attempt
175
+ _log_and_sleep(
176
+ "OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
177
+ attempt,
178
+ max_retries,
179
+ e=e,
180
+ wait_time=wait_time,
181
+ )
182
+ print(f"[DEBUG] Exception repr: {repr(e)}")
183
+ return None
184
+ else:
185
+ print(tr("Max retries for OpenAI API error reached. Raising error."))
186
+ raise e
187
+
188
+
189
+ def retry_api_call(
190
+ api_func, max_retries=5, *args, history=None, user_message_on_empty=None, **kwargs
191
+ ):
192
+ for attempt in range(1, max_retries + 1):
193
+ try:
194
+ response = api_func(*args, **kwargs)
195
+ error = getattr(response, "error", None)
196
+ if error:
197
+ print(f"ApiError: {error.get('message', error)}")
198
+ raise ApiError(error.get("message", str(error)))
199
+ return response
200
+ except ApiError:
201
+ raise
202
+ except EmptyResponseError:
203
+ if history is not None and user_message_on_empty is not None:
216
204
  print(
217
- tr(
218
- "OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
219
- attempt=attempt,
220
- max_retries=max_retries,
221
- e=e,
222
- wait_time=wait_time,
223
- )
205
+ f"[DEBUG] Adding user message to history: {user_message_on_empty}"
224
206
  )
225
- time.sleep(wait_time)
207
+ history.add_message({"role": "user", "content": user_message_on_empty})
208
+ continue # Retry with updated history
226
209
  else:
227
- print(tr("Max retries for OpenAI API error reached. Raising error."))
228
- raise last_exception
210
+ raise
211
+ except json.JSONDecodeError as e:
212
+ result = _handle_json_decode_error(e, attempt, max_retries)
213
+ if result is not None:
214
+ return result
215
+ except Exception as e:
216
+ result = _handle_general_exception(e, attempt, max_retries)
217
+ if result is not None:
218
+ return result
@@ -1,8 +1,10 @@
1
1
  from typing import List, Dict, Optional
2
2
  import json
3
+ import sys
4
+ import traceback
3
5
 
4
6
 
5
- class ConversationHistory:
7
+ class LLMConversationHistory:
6
8
  """
7
9
  Manages the message history for a conversation, supporting OpenAI-style roles.
8
10
  Intended to be used by ConversationHandler and chat loop for all history operations.
@@ -13,6 +15,14 @@ class ConversationHistory:
13
15
 
14
16
  def add_message(self, message: Dict):
15
17
  """Append a message dict to the history."""
18
+ content = message.get("content")
19
+ if isinstance(content, str) and any(
20
+ 0xD800 <= ord(ch) <= 0xDFFF for ch in content
21
+ ):
22
+ print(
23
+ f"Surrogate code point detected in message content: {content!r}\nStack trace:\n{''.join(traceback.format_stack())}",
24
+ file=sys.stderr,
25
+ )
16
26
  self._messages.append(message)
17
27
 
18
28
  def get_messages(self, role: Optional[str] = None) -> List[Dict]:
@@ -36,6 +46,13 @@ class ConversationHistory:
36
46
  (i for i, m in enumerate(self._messages) if m.get("role") == "system"), None
37
47
  )
38
48
  system_msg = {"role": "system", "content": content}
49
+ if isinstance(content, str) and any(
50
+ 0xD800 <= ord(ch) <= 0xDFFF for ch in content
51
+ ):
52
+ print(
53
+ f"Surrogate code point detected in system message content: {content!r}\nStack trace:\n{''.join(traceback.format_stack())}",
54
+ file=sys.stderr,
55
+ )
39
56
  if system_idx is not None:
40
57
  self._messages[system_idx] = system_msg
41
58
  else:
@@ -4,6 +4,7 @@ import time
4
4
  from openai import OpenAI
5
5
  from janito.agent.conversation import ConversationHandler
6
6
  from janito.agent.conversation_exceptions import ProviderError
7
+ from janito.agent.llm_conversation_history import LLMConversationHistory
7
8
 
8
9
 
9
10
  class Agent:
@@ -18,7 +19,7 @@ class Agent:
18
19
  model: str = None,
19
20
  system_prompt_template: str | None = None,
20
21
  verbose_tools: bool = False,
21
- base_url: str = "https://openrouter.ai/api/v1",
22
+ base_url: str = None,
22
23
  azure_openai_api_version: str = "2023-05-15",
23
24
  use_azure_openai: bool = False,
24
25
  ):
@@ -41,17 +42,35 @@ class Agent:
41
42
  # Import inside conditional to avoid requiring AzureOpenAI unless needed
42
43
  from openai import AzureOpenAI
43
44
 
44
- self.client = AzureOpenAI(
45
- api_key=api_key,
46
- azure_endpoint=base_url,
47
- api_version=azure_openai_api_version,
48
- )
45
+ if base_url:
46
+ self.client = AzureOpenAI(
47
+ api_key=api_key,
48
+ azure_endpoint=base_url,
49
+ api_version=azure_openai_api_version,
50
+ )
51
+ else:
52
+ self.client = AzureOpenAI(
53
+ api_key=api_key,
54
+ api_version=azure_openai_api_version,
55
+ )
49
56
  else:
50
- self.client = OpenAI(
51
- base_url=base_url,
52
- api_key=api_key,
53
- default_headers={"HTTP-Referer": self.REFERER, "X-Title": self.TITLE},
54
- )
57
+ if base_url:
58
+ self.client = OpenAI(
59
+ base_url=base_url,
60
+ api_key=api_key,
61
+ default_headers={
62
+ "HTTP-Referer": self.REFERER,
63
+ "X-Title": self.TITLE,
64
+ },
65
+ )
66
+ else:
67
+ self.client = OpenAI(
68
+ api_key=api_key,
69
+ default_headers={
70
+ "HTTP-Referer": self.REFERER,
71
+ "X-Title": self.TITLE,
72
+ },
73
+ )
55
74
 
56
75
  self.conversation_handler = ConversationHandler(
57
76
  self.client,
@@ -69,30 +88,27 @@ class Agent:
69
88
  spinner=False,
70
89
  max_tokens=None,
71
90
  max_rounds=100,
72
- stream=False,
91
+ tool_user=False,
73
92
  ):
74
93
  """
75
94
  Start a chat conversation with the agent.
76
95
 
77
96
  Args:
78
- messages: ConversationHistory instance or None.
79
- message_handler: Optional handler for streaming or event messages.
97
+ messages: LLMConversationHistory instance or None.
98
+ message_handler: Optional handler for event messages.
80
99
  spinner: Show spinner during request.
81
100
  max_tokens: Max tokens for completion.
82
101
  max_rounds: Max conversation rounds.
83
- stream: If True, enable OpenAI streaming mode (yields tokens incrementally).
84
102
  Returns:
85
- If stream=False: dict with 'content', 'usage', and 'usage_history'.
86
- If stream=True: generator yielding content chunks or events.
103
+ dict with 'content', 'usage', and 'usage_history'.
87
104
  """
88
105
  from janito.agent.runtime_config import runtime_config
89
- from janito.agent.conversation_history import ConversationHistory
90
106
 
91
107
  if messages is None:
92
- messages = ConversationHistory()
93
- elif not isinstance(messages, ConversationHistory):
108
+ messages = LLMConversationHistory()
109
+ elif not isinstance(messages, LLMConversationHistory):
94
110
  raise TypeError(
95
- "Agent.chat expects a ConversationHistory instance or None."
111
+ "Agent.chat expects a LLMConversationHistory instance or None."
96
112
  )
97
113
 
98
114
  max_retries = 5
@@ -106,8 +122,7 @@ class Agent:
106
122
  spinner=spinner,
107
123
  max_tokens=max_tokens,
108
124
  verbose_events=runtime_config.get("verbose_events", False),
109
- stream=stream,
110
- verbose_stream=runtime_config.get("verbose_stream", False),
125
+ tool_user=tool_user,
111
126
  )
112
127
  except ProviderError as e:
113
128
  error_data = getattr(e, "error_data", {}) or {}