janito 1.9.0__py3-none-any.whl → 1.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. janito/__init__.py +1 -1
  2. janito/agent/api_exceptions.py +4 -0
  3. janito/agent/config.py +1 -1
  4. janito/agent/config_defaults.py +2 -26
  5. janito/agent/conversation.py +163 -122
  6. janito/agent/conversation_api.py +246 -168
  7. janito/agent/conversation_ui.py +1 -1
  8. janito/agent/{conversation_history.py → llm_conversation_history.py} +30 -1
  9. janito/agent/openai_client.py +38 -23
  10. janito/agent/openai_schema_generator.py +162 -129
  11. janito/agent/platform_discovery.py +134 -77
  12. janito/agent/profile_manager.py +5 -5
  13. janito/agent/rich_message_handler.py +80 -31
  14. janito/agent/templates/profiles/system_prompt_template_base.txt.j2 +20 -4
  15. janito/agent/test_openai_schema_generator.py +93 -0
  16. janito/agent/tool_base.py +7 -2
  17. janito/agent/tool_executor.py +54 -49
  18. janito/agent/tool_registry.py +5 -2
  19. janito/agent/tool_use_tracker.py +26 -5
  20. janito/agent/tools/__init__.py +8 -3
  21. janito/agent/tools/create_directory.py +3 -1
  22. janito/agent/tools/create_file.py +7 -1
  23. janito/agent/tools/fetch_url.py +40 -3
  24. janito/agent/tools/find_files.py +29 -14
  25. janito/agent/tools/get_file_outline/core.py +7 -8
  26. janito/agent/tools/get_file_outline/python_outline.py +139 -95
  27. janito/agent/tools/get_file_outline/search_outline.py +3 -1
  28. janito/agent/tools/get_lines.py +98 -64
  29. janito/agent/tools/move_file.py +59 -31
  30. janito/agent/tools/open_url.py +31 -0
  31. janito/agent/tools/present_choices.py +3 -1
  32. janito/agent/tools/python_command_runner.py +149 -0
  33. janito/agent/tools/python_file_runner.py +147 -0
  34. janito/agent/tools/python_stdin_runner.py +153 -0
  35. janito/agent/tools/remove_directory.py +3 -1
  36. janito/agent/tools/remove_file.py +5 -1
  37. janito/agent/tools/replace_file.py +12 -2
  38. janito/agent/tools/replace_text_in_file.py +195 -149
  39. janito/agent/tools/run_bash_command.py +30 -69
  40. janito/agent/tools/run_powershell_command.py +138 -105
  41. janito/agent/tools/search_text/__init__.py +1 -0
  42. janito/agent/tools/search_text/core.py +176 -0
  43. janito/agent/tools/search_text/match_lines.py +58 -0
  44. janito/agent/tools/search_text/pattern_utils.py +65 -0
  45. janito/agent/tools/search_text/traverse_directory.py +127 -0
  46. janito/agent/tools/validate_file_syntax/core.py +43 -30
  47. janito/agent/tools/validate_file_syntax/html_validator.py +21 -5
  48. janito/agent/tools/validate_file_syntax/markdown_validator.py +77 -34
  49. janito/agent/tools_utils/action_type.py +7 -0
  50. janito/agent/tools_utils/dir_walk_utils.py +3 -2
  51. janito/agent/tools_utils/formatting.py +47 -21
  52. janito/agent/tools_utils/gitignore_utils.py +89 -40
  53. janito/agent/tools_utils/test_gitignore_utils.py +46 -0
  54. janito/agent/tools_utils/utils.py +7 -1
  55. janito/cli/_print_config.py +63 -61
  56. janito/cli/arg_parser.py +13 -12
  57. janito/cli/cli_main.py +137 -147
  58. janito/cli/config_commands.py +112 -109
  59. janito/cli/main.py +152 -174
  60. janito/cli/one_shot.py +40 -26
  61. janito/i18n/__init__.py +1 -1
  62. janito/rich_utils.py +46 -8
  63. janito/shell/commands/__init__.py +2 -4
  64. janito/shell/commands/conversation_restart.py +3 -1
  65. janito/shell/commands/edit.py +3 -0
  66. janito/shell/commands/history_view.py +3 -3
  67. janito/shell/commands/lang.py +3 -0
  68. janito/shell/commands/livelogs.py +5 -3
  69. janito/shell/commands/prompt.py +6 -0
  70. janito/shell/commands/session.py +3 -0
  71. janito/shell/commands/session_control.py +3 -0
  72. janito/shell/commands/termweb_log.py +8 -0
  73. janito/shell/commands/tools.py +3 -0
  74. janito/shell/commands/track.py +36 -0
  75. janito/shell/commands/utility.py +13 -18
  76. janito/shell/commands/verbose.py +3 -4
  77. janito/shell/input_history.py +62 -0
  78. janito/shell/main.py +160 -181
  79. janito/shell/session/config.py +83 -75
  80. janito/shell/session/manager.py +0 -21
  81. janito/shell/ui/interactive.py +97 -75
  82. janito/termweb/static/editor.css +32 -33
  83. janito/termweb/static/editor.css.bak +140 -22
  84. janito/termweb/static/editor.html +12 -7
  85. janito/termweb/static/editor.html.bak +16 -11
  86. janito/termweb/static/editor.js +94 -40
  87. janito/termweb/static/editor.js.bak +97 -65
  88. janito/termweb/static/index.html +1 -2
  89. janito/termweb/static/index.html.bak +1 -1
  90. janito/termweb/static/termweb.css +1 -22
  91. janito/termweb/static/termweb.css.bak +6 -4
  92. janito/termweb/static/termweb.js +0 -6
  93. janito/termweb/static/termweb.js.bak +1 -2
  94. janito/tests/test_rich_utils.py +44 -0
  95. janito/web/app.py +0 -75
  96. {janito-1.9.0.dist-info → janito-1.11.0.dist-info}/METADATA +61 -42
  97. janito-1.11.0.dist-info/RECORD +163 -0
  98. {janito-1.9.0.dist-info → janito-1.11.0.dist-info}/WHEEL +1 -1
  99. janito/agent/providers.py +0 -77
  100. janito/agent/tools/run_python_command.py +0 -161
  101. janito/agent/tools/search_text.py +0 -204
  102. janito/shell/commands/sum.py +0 -49
  103. janito-1.9.0.dist-info/RECORD +0 -151
  104. {janito-1.9.0.dist-info → janito-1.11.0.dist-info}/entry_points.txt +0 -0
  105. {janito-1.9.0.dist-info → janito-1.11.0.dist-info}/licenses/LICENSE +0 -0
  106. {janito-1.9.0.dist-info → janito-1.11.0.dist-info}/top_level.txt +0 -0
@@ -7,20 +7,21 @@ from janito.i18n import tr
7
7
  import json
8
8
  from janito.agent.runtime_config import runtime_config
9
9
  from janito.agent.tool_registry import get_tool_schemas
10
- from janito.agent.conversation_exceptions import NoToolSupportError
10
+ from janito.agent.conversation_exceptions import NoToolSupportError, EmptyResponseError
11
+ from janito.agent.api_exceptions import ApiError
12
+ from rich.console import Console
13
+ from rich.status import Status
14
+
15
+ console = Console()
11
16
 
12
17
 
13
18
  def _sanitize_utf8_surrogates(obj):
14
- """
15
- Recursively sanitize a dict/list/string by replacing surrogate codepoints with the unicode replacement character.
16
- """
17
- if isinstance(obj, str):
18
- # Encode with surrogatepass, then decode with 'utf-8', replacing errors
19
- return obj.encode("utf-8", "replace").decode("utf-8", "replace")
20
- elif isinstance(obj, dict):
19
+ if isinstance(obj, dict):
21
20
  return {k: _sanitize_utf8_surrogates(v) for k, v in obj.items()}
22
21
  elif isinstance(obj, list):
23
- return [_sanitize_utf8_surrogates(x) for x in obj]
22
+ return [_sanitize_utf8_surrogates(i) for i in obj]
23
+ elif isinstance(obj, str):
24
+ return obj.encode("utf-8", "surrogatepass").decode("utf-8", "ignore")
24
25
  else:
25
26
  return obj
26
27
 
@@ -28,7 +29,7 @@ def _sanitize_utf8_surrogates(obj):
28
29
  def get_openai_response(
29
30
  client, model, messages, max_tokens, tools=None, tool_choice=None, temperature=None
30
31
  ):
31
- """Non-streaming OpenAI API call."""
32
+ """OpenAI API call."""
32
33
  messages = _sanitize_utf8_surrogates(messages)
33
34
  from janito.agent.conversation_exceptions import ProviderError
34
35
 
@@ -53,176 +54,253 @@ def get_openai_response(
53
54
  or response.choices is None
54
55
  or len(response.choices) == 0
55
56
  ):
57
+ # Always check for error before raising ProviderError
58
+ error = getattr(response, "error", None)
59
+ if error:
60
+ print(f"ApiError: {error.get('message', error)}")
61
+ print(f"Full error object: {error}")
62
+ print(f"Raw response: {response}")
63
+ raise ApiError(error.get("message", str(error)))
56
64
  raise ProviderError(
57
- "No choices in response; possible API or LLM error.",
65
+ f"No choices in response; possible API or LLM error. Raw response: {response!r}",
58
66
  {"code": 502, "raw_response": str(response)},
59
67
  )
60
68
  return response
61
69
 
62
70
 
63
- def get_openai_stream_response(
64
- client,
65
- model,
66
- messages,
67
- max_tokens,
68
- tools=None,
69
- tool_choice=None,
70
- temperature=None,
71
- verbose_stream=False,
72
- message_handler=None,
71
+ def _extract_status_and_retry_after(e, error_message):
72
+ status_code = None
73
+ retry_after = None
74
+ if hasattr(e, "status_code"):
75
+ status_code = getattr(e, "status_code")
76
+ elif hasattr(e, "response") and hasattr(e.response, "status_code"):
77
+ status_code = getattr(e.response, "status_code")
78
+ elif "429" in error_message:
79
+ status_code = 429
80
+ import re
81
+
82
+ match = re.search(r"status[ _]?code[=: ]+([0-9]+)", error_message)
83
+ if match:
84
+ status_code = int(match.group(1))
85
+ match_retry = re.search(r"retry[-_ ]?after[=: ]+([0-9]+)", error_message)
86
+ if match_retry:
87
+ retry_after = int(match_retry.group(1))
88
+ return status_code, retry_after
89
+
90
+
91
+ def _calculate_wait_time(status_code, retry_after, attempt):
92
+ if status_code == 429 and retry_after:
93
+ return max(retry_after, 2**attempt)
94
+ return 2**attempt
95
+
96
+
97
+ def _log_and_sleep(
98
+ message,
99
+ attempt,
100
+ max_retries,
101
+ e=None,
102
+ wait_time=None,
103
+ status=None,
104
+ waiting_message=None,
105
+ restore_message=None,
73
106
  ):
74
- """Streaming OpenAI API call."""
75
- messages = _sanitize_utf8_surrogates(messages)
76
- openai_args = dict(
77
- model=model,
78
- messages=messages,
79
- max_tokens=max_tokens,
80
- stream=True,
107
+ status_message = tr(
108
+ message,
109
+ attempt=attempt,
110
+ max_retries=max_retries,
111
+ e=e,
112
+ wait_time=wait_time,
81
113
  )
82
- if not runtime_config.get("vanilla_mode", False):
83
- openai_args.update(
84
- tools=tools or get_tool_schemas(),
85
- tool_choice=tool_choice or "auto",
86
- temperature=temperature if temperature is not None else 0.2,
114
+ if (
115
+ status is not None
116
+ and waiting_message is not None
117
+ and restore_message is not None
118
+ ):
119
+ original_message = status.status
120
+ status.update(waiting_message)
121
+ time.sleep(wait_time)
122
+ status.update(restore_message)
123
+ else:
124
+ with Status(status_message, console=console, spinner="dots"):
125
+ time.sleep(wait_time)
126
+
127
+
128
+ def _handle_json_decode_error(e, attempt, max_retries, status=None):
129
+ if attempt < max_retries:
130
+ wait_time = 2**attempt
131
+ if status is not None:
132
+ _log_and_sleep(
133
+ "Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds...",
134
+ attempt,
135
+ max_retries,
136
+ wait_time=wait_time,
137
+ status=status,
138
+ waiting_message="Waiting after error...",
139
+ restore_message="Waiting for AI response...",
140
+ )
141
+ else:
142
+ _log_and_sleep(
143
+ "Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds...",
144
+ attempt,
145
+ max_retries,
146
+ wait_time=wait_time,
147
+ )
148
+ return None
149
+ else:
150
+ print(tr("Max retries for invalid response reached. Raising error."))
151
+ raise e
152
+
153
+
154
+ def _handle_no_tool_support(error_message):
155
+ if "No endpoints found that support tool use" in error_message:
156
+ print(tr("API does not support tool use."))
157
+ raise NoToolSupportError(error_message)
158
+
159
+
160
+ def _handle_rate_limit(e, attempt, max_retries, status, status_code, retry_after):
161
+ wait_time = _calculate_wait_time(status_code, retry_after, attempt)
162
+ if attempt < max_retries:
163
+ if status is not None:
164
+ _log_and_sleep(
165
+ "OpenAI API rate limit (429) (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
166
+ attempt,
167
+ max_retries,
168
+ e=e,
169
+ wait_time=wait_time,
170
+ status=status,
171
+ waiting_message="Waiting after rate limit reached...",
172
+ restore_message="Waiting for AI response...",
173
+ )
174
+ else:
175
+ _log_and_sleep(
176
+ "OpenAI API rate limit (429) (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
177
+ attempt,
178
+ max_retries,
179
+ e=e,
180
+ wait_time=wait_time,
181
+ )
182
+ return None
183
+ else:
184
+ raise e
185
+
186
+
187
+ def _handle_server_error(e, attempt, max_retries, status, status_code):
188
+ wait_time = 2**attempt
189
+ if attempt < max_retries:
190
+ if status is not None:
191
+ _log_and_sleep(
192
+ "OpenAI API server error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
193
+ attempt,
194
+ max_retries,
195
+ e=e,
196
+ wait_time=wait_time,
197
+ status=status,
198
+ waiting_message="Waiting after server error...",
199
+ restore_message="Waiting for AI response...",
200
+ )
201
+ else:
202
+ _log_and_sleep(
203
+ "OpenAI API server error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
204
+ attempt,
205
+ max_retries,
206
+ e=e,
207
+ wait_time=wait_time,
208
+ )
209
+ return None
210
+ else:
211
+ print("Max retries for OpenAI API server error reached. Raising error.")
212
+ raise e
213
+
214
+
215
+ def _handle_client_error(e, status_code):
216
+ print(
217
+ tr(
218
+ "OpenAI API client error {status_code}: {e}. Not retrying.",
219
+ status_code=status_code,
220
+ e=e,
87
221
  )
88
- response_stream = client.chat.completions.create(**openai_args)
89
- content_accum = ""
90
- for event in response_stream:
91
- if verbose_stream or runtime_config.get("verbose_stream", False):
92
- print(repr(event), flush=True)
93
- delta = getattr(event.choices[0], "delta", None)
94
- if delta and getattr(delta, "content", None):
95
- chunk = delta.content
96
- content_accum += chunk
97
- if message_handler:
98
- message_handler.handle_message({"type": "stream", "content": chunk})
99
- if message_handler:
100
- message_handler.handle_message({"type": "stream_end", "content": content_accum})
101
- return None
102
-
103
-
104
- def retry_api_call(api_func, max_retries=5, *args, **kwargs):
105
- last_exception = None
222
+ )
223
+ raise e
224
+
225
+
226
+ def _handle_generic_error(e, attempt, max_retries, status):
227
+ wait_time = 2**attempt
228
+ if attempt < max_retries:
229
+ if status is not None:
230
+ _log_and_sleep(
231
+ "OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
232
+ attempt,
233
+ max_retries,
234
+ e=e,
235
+ wait_time=wait_time,
236
+ status=status,
237
+ waiting_message="Waiting after error...",
238
+ restore_message="Waiting for AI response...",
239
+ )
240
+ else:
241
+ _log_and_sleep(
242
+ "OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
243
+ attempt,
244
+ max_retries,
245
+ e=e,
246
+ wait_time=wait_time,
247
+ )
248
+ print(f"[DEBUG] Exception repr: {repr(e)}")
249
+ return None
250
+ else:
251
+ print(tr("Max retries for OpenAI API error reached. Raising error."))
252
+ raise e
253
+
254
+
255
+ def _handle_general_exception(e, attempt, max_retries, status=None):
256
+ error_message = str(e)
257
+ _handle_no_tool_support(error_message)
258
+ status_code, retry_after = _extract_status_and_retry_after(e, error_message)
259
+ if status_code is not None:
260
+ if status_code == 429:
261
+ return _handle_rate_limit(
262
+ e, attempt, max_retries, status, status_code, retry_after
263
+ )
264
+ elif 500 <= status_code < 600:
265
+ return _handle_server_error(e, attempt, max_retries, status, status_code)
266
+ elif 400 <= status_code < 500:
267
+ _handle_client_error(e, status_code)
268
+ return _handle_generic_error(e, attempt, max_retries, status)
269
+
270
+
271
+ def retry_api_call(
272
+ api_func,
273
+ max_retries=5,
274
+ *args,
275
+ history=None,
276
+ user_message_on_empty=None,
277
+ status=None,
278
+ **kwargs,
279
+ ):
106
280
  for attempt in range(1, max_retries + 1):
107
281
  try:
108
- return api_func(*args, **kwargs)
109
- except json.JSONDecodeError as e:
110
- last_exception = e
111
- if attempt < max_retries:
112
- wait_time = 2**attempt
282
+ response = api_func(*args, **kwargs)
283
+ error = getattr(response, "error", None)
284
+ if error:
285
+ print(f"ApiError: {error.get('message', error)}")
286
+ raise ApiError(error.get("message", str(error)))
287
+ return response
288
+ except ApiError:
289
+ raise
290
+ except EmptyResponseError:
291
+ if history is not None and user_message_on_empty is not None:
113
292
  print(
114
- tr(
115
- "Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds...",
116
- attempt=attempt,
117
- max_retries=max_retries,
118
- wait_time=wait_time,
119
- )
293
+ f"[DEBUG] Adding user message to history: {user_message_on_empty}"
120
294
  )
121
- time.sleep(wait_time)
295
+ history.add_message({"role": "user", "content": user_message_on_empty})
296
+ continue # Retry with updated history
122
297
  else:
123
- print(tr("Max retries for invalid response reached. Raising error."))
124
- raise last_exception
298
+ raise
299
+ except json.JSONDecodeError as e:
300
+ result = _handle_json_decode_error(e, attempt, max_retries, status=status)
301
+ if result is not None:
302
+ return result
125
303
  except Exception as e:
126
- last_exception = e
127
- status_code = None
128
- error_message = str(e)
129
- retry_after = None
130
- # Detect specific tool support error
131
- if "No endpoints found that support tool use" in error_message:
132
- print(tr("API does not support tool use."))
133
- raise NoToolSupportError(error_message)
134
- # Try to extract status code and Retry-After from known exception types or message
135
- if hasattr(e, "status_code"):
136
- status_code = getattr(e, "status_code")
137
- elif hasattr(e, "response") and hasattr(e.response, "status_code"):
138
- status_code = getattr(e.response, "status_code")
139
- # Check for Retry-After header
140
- if hasattr(e.response, "headers") and e.response.headers:
141
- retry_after = e.response.headers.get("Retry-After")
142
- else:
143
- # Try to parse from error message
144
- import re
145
-
146
- match = re.search(r"[Ee]rror code: (\d{3})", error_message)
147
- if match:
148
- status_code = int(match.group(1))
149
- # Try to find Retry-After in message
150
- retry_after_match = re.search(
151
- r"Retry-After['\"]?:?\s*(\d+)", error_message
152
- )
153
- if retry_after_match:
154
- retry_after = retry_after_match.group(1)
155
- # Decide retry logic based on status code
156
- if status_code is not None:
157
- if status_code == 429:
158
- # Use Retry-After if available, else exponential backoff
159
- if retry_after is not None:
160
- try:
161
- wait_time = int(float(retry_after))
162
- except Exception:
163
- wait_time = 2**attempt
164
- else:
165
- wait_time = 2**attempt
166
- if attempt < max_retries:
167
- print(
168
- tr(
169
- "OpenAI API rate limit (429) (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
170
- attempt=attempt,
171
- max_retries=max_retries,
172
- e=e,
173
- wait_time=wait_time,
174
- )
175
- )
176
- time.sleep(wait_time)
177
- continue
178
- else:
179
- print(
180
- "Max retries for OpenAI API rate limit reached. Raising error."
181
- )
182
- raise last_exception
183
- elif 500 <= status_code < 600:
184
- # Retry on server errors
185
- if attempt < max_retries:
186
- wait_time = 2**attempt
187
- print(
188
- tr(
189
- "OpenAI API server error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
190
- attempt=attempt,
191
- max_retries=max_retries,
192
- e=e,
193
- wait_time=wait_time,
194
- )
195
- )
196
- time.sleep(wait_time)
197
- continue
198
- else:
199
- print(
200
- "Max retries for OpenAI API server error reached. Raising error."
201
- )
202
- raise last_exception
203
- elif 400 <= status_code < 500:
204
- # Do not retry on client errors (except 429)
205
- print(
206
- tr(
207
- "OpenAI API client error {status_code}: {e}. Not retrying.",
208
- status_code=status_code,
209
- e=e,
210
- )
211
- )
212
- raise last_exception
213
- # If status code not detected, fallback to previous behavior
214
- if attempt < max_retries:
215
- wait_time = 2**attempt
216
- print(
217
- tr(
218
- "OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
219
- attempt=attempt,
220
- max_retries=max_retries,
221
- e=e,
222
- wait_time=wait_time,
223
- )
224
- )
225
- time.sleep(wait_time)
226
- else:
227
- print(tr("Max retries for OpenAI API error reached. Raising error."))
228
- raise last_exception
304
+ result = _handle_general_exception(e, attempt, max_retries, status=status)
305
+ if result is not None:
306
+ return result
@@ -8,7 +8,7 @@ from rich.console import Console
8
8
  def show_spinner(message, func, *args, **kwargs):
9
9
  console = Console()
10
10
  with console.status(message, spinner="dots") as status:
11
- result = func(*args, **kwargs)
11
+ result = func(*args, status=status, **kwargs)
12
12
  status.stop()
13
13
  return result
14
14
 
@@ -1,8 +1,10 @@
1
1
  from typing import List, Dict, Optional
2
2
  import json
3
+ import sys
4
+ import traceback
3
5
 
4
6
 
5
- class ConversationHistory:
7
+ class LLMConversationHistory:
6
8
  """
7
9
  Manages the message history for a conversation, supporting OpenAI-style roles.
8
10
  Intended to be used by ConversationHandler and chat loop for all history operations.
@@ -13,6 +15,14 @@ class ConversationHistory:
13
15
 
14
16
  def add_message(self, message: Dict):
15
17
  """Append a message dict to the history."""
18
+ content = message.get("content")
19
+ if isinstance(content, str) and any(
20
+ 0xD800 <= ord(ch) <= 0xDFFF for ch in content
21
+ ):
22
+ print(
23
+ f"Surrogate code point detected in message content: {content!r}\nStack trace:\n{''.join(traceback.format_stack())}",
24
+ file=sys.stderr,
25
+ )
16
26
  self._messages.append(message)
17
27
 
18
28
  def get_messages(self, role: Optional[str] = None) -> List[Dict]:
@@ -36,6 +46,13 @@ class ConversationHistory:
36
46
  (i for i, m in enumerate(self._messages) if m.get("role") == "system"), None
37
47
  )
38
48
  system_msg = {"role": "system", "content": content}
49
+ if isinstance(content, str) and any(
50
+ 0xD800 <= ord(ch) <= 0xDFFF for ch in content
51
+ ):
52
+ print(
53
+ f"Surrogate code point detected in system message content: {content!r}\nStack trace:\n{''.join(traceback.format_stack())}",
54
+ file=sys.stderr,
55
+ )
39
56
  if system_idx is not None:
40
57
  self._messages[system_idx] = system_msg
41
58
  else:
@@ -51,3 +68,15 @@ class ConversationHistory:
51
68
 
52
69
  def __getitem__(self, idx):
53
70
  return self._messages[idx]
71
+
72
+ def remove_last_message(self):
73
+ """Remove and return the last message in the history, or None if empty."""
74
+ if self._messages:
75
+ return self._messages.pop()
76
+ return None
77
+
78
+ def last_message(self):
79
+ """Return the last message in the history, or None if empty."""
80
+ if self._messages:
81
+ return self._messages[-1]
82
+ return None
@@ -4,6 +4,7 @@ import time
4
4
  from openai import OpenAI
5
5
  from janito.agent.conversation import ConversationHandler
6
6
  from janito.agent.conversation_exceptions import ProviderError
7
+ from janito.agent.llm_conversation_history import LLMConversationHistory
7
8
 
8
9
 
9
10
  class Agent:
@@ -18,7 +19,7 @@ class Agent:
18
19
  model: str = None,
19
20
  system_prompt_template: str | None = None,
20
21
  verbose_tools: bool = False,
21
- base_url: str = "https://openrouter.ai/api/v1",
22
+ base_url: str = None,
22
23
  azure_openai_api_version: str = "2023-05-15",
23
24
  use_azure_openai: bool = False,
24
25
  ):
@@ -41,17 +42,35 @@ class Agent:
41
42
  # Import inside conditional to avoid requiring AzureOpenAI unless needed
42
43
  from openai import AzureOpenAI
43
44
 
44
- self.client = AzureOpenAI(
45
- api_key=api_key,
46
- azure_endpoint=base_url,
47
- api_version=azure_openai_api_version,
48
- )
45
+ if base_url:
46
+ self.client = AzureOpenAI(
47
+ api_key=api_key,
48
+ azure_endpoint=base_url,
49
+ api_version=azure_openai_api_version,
50
+ )
51
+ else:
52
+ self.client = AzureOpenAI(
53
+ api_key=api_key,
54
+ api_version=azure_openai_api_version,
55
+ )
49
56
  else:
50
- self.client = OpenAI(
51
- base_url=base_url,
52
- api_key=api_key,
53
- default_headers={"HTTP-Referer": self.REFERER, "X-Title": self.TITLE},
54
- )
57
+ if base_url:
58
+ self.client = OpenAI(
59
+ base_url=base_url,
60
+ api_key=api_key,
61
+ default_headers={
62
+ "HTTP-Referer": self.REFERER,
63
+ "X-Title": self.TITLE,
64
+ },
65
+ )
66
+ else:
67
+ self.client = OpenAI(
68
+ api_key=api_key,
69
+ default_headers={
70
+ "HTTP-Referer": self.REFERER,
71
+ "X-Title": self.TITLE,
72
+ },
73
+ )
55
74
 
56
75
  self.conversation_handler = ConversationHandler(
57
76
  self.client,
@@ -69,30 +88,27 @@ class Agent:
69
88
  spinner=False,
70
89
  max_tokens=None,
71
90
  max_rounds=100,
72
- stream=False,
91
+ tool_user=False,
73
92
  ):
74
93
  """
75
94
  Start a chat conversation with the agent.
76
95
 
77
96
  Args:
78
- messages: ConversationHistory instance or None.
79
- message_handler: Optional handler for streaming or event messages.
97
+ messages: LLMConversationHistory instance or None.
98
+ message_handler: Optional handler for event messages.
80
99
  spinner: Show spinner during request.
81
100
  max_tokens: Max tokens for completion.
82
101
  max_rounds: Max conversation rounds.
83
- stream: If True, enable OpenAI streaming mode (yields tokens incrementally).
84
102
  Returns:
85
- If stream=False: dict with 'content', 'usage', and 'usage_history'.
86
- If stream=True: generator yielding content chunks or events.
103
+ dict with 'content', 'usage', and 'usage_history'.
87
104
  """
88
105
  from janito.agent.runtime_config import runtime_config
89
- from janito.agent.conversation_history import ConversationHistory
90
106
 
91
107
  if messages is None:
92
- messages = ConversationHistory()
93
- elif not isinstance(messages, ConversationHistory):
108
+ messages = LLMConversationHistory()
109
+ elif not isinstance(messages, LLMConversationHistory):
94
110
  raise TypeError(
95
- "Agent.chat expects a ConversationHistory instance or None."
111
+ "Agent.chat expects a LLMConversationHistory instance or None."
96
112
  )
97
113
 
98
114
  max_retries = 5
@@ -106,8 +122,7 @@ class Agent:
106
122
  spinner=spinner,
107
123
  max_tokens=max_tokens,
108
124
  verbose_events=runtime_config.get("verbose_events", False),
109
- stream=stream,
110
- verbose_stream=runtime_config.get("verbose_stream", False),
125
+ tool_user=tool_user,
111
126
  )
112
127
  except ProviderError as e:
113
128
  error_data = getattr(e, "error_data", {}) or {}