janito 1.9.0__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. janito/__init__.py +1 -1
  2. janito/agent/api_exceptions.py +4 -0
  3. janito/agent/config.py +1 -1
  4. janito/agent/config_defaults.py +2 -26
  5. janito/agent/conversation.py +163 -122
  6. janito/agent/conversation_api.py +149 -159
  7. janito/agent/{conversation_history.py → llm_conversation_history.py} +18 -1
  8. janito/agent/openai_client.py +38 -23
  9. janito/agent/openai_schema_generator.py +162 -129
  10. janito/agent/platform_discovery.py +134 -77
  11. janito/agent/profile_manager.py +5 -5
  12. janito/agent/rich_message_handler.py +80 -31
  13. janito/agent/templates/profiles/system_prompt_template_base.txt.j2 +5 -4
  14. janito/agent/test_openai_schema_generator.py +93 -0
  15. janito/agent/tool_base.py +7 -2
  16. janito/agent/tool_executor.py +54 -49
  17. janito/agent/tool_registry.py +5 -2
  18. janito/agent/tool_use_tracker.py +26 -5
  19. janito/agent/tools/__init__.py +6 -3
  20. janito/agent/tools/create_directory.py +3 -1
  21. janito/agent/tools/create_file.py +7 -1
  22. janito/agent/tools/fetch_url.py +40 -3
  23. janito/agent/tools/find_files.py +3 -1
  24. janito/agent/tools/get_file_outline/core.py +6 -7
  25. janito/agent/tools/get_file_outline/search_outline.py +3 -1
  26. janito/agent/tools/get_lines.py +7 -2
  27. janito/agent/tools/move_file.py +3 -1
  28. janito/agent/tools/present_choices.py +3 -1
  29. janito/agent/tools/python_command_runner.py +150 -0
  30. janito/agent/tools/python_file_runner.py +148 -0
  31. janito/agent/tools/python_stdin_runner.py +154 -0
  32. janito/agent/tools/remove_directory.py +3 -1
  33. janito/agent/tools/remove_file.py +5 -1
  34. janito/agent/tools/replace_file.py +12 -2
  35. janito/agent/tools/replace_text_in_file.py +4 -2
  36. janito/agent/tools/run_bash_command.py +30 -69
  37. janito/agent/tools/run_powershell_command.py +134 -105
  38. janito/agent/tools/search_text.py +172 -122
  39. janito/agent/tools/validate_file_syntax/core.py +3 -1
  40. janito/agent/tools_utils/action_type.py +7 -0
  41. janito/agent/tools_utils/dir_walk_utils.py +3 -2
  42. janito/agent/tools_utils/formatting.py +47 -21
  43. janito/agent/tools_utils/gitignore_utils.py +66 -40
  44. janito/agent/tools_utils/test_gitignore_utils.py +46 -0
  45. janito/cli/_print_config.py +63 -61
  46. janito/cli/arg_parser.py +13 -12
  47. janito/cli/cli_main.py +137 -147
  48. janito/cli/main.py +152 -174
  49. janito/cli/one_shot.py +40 -26
  50. janito/i18n/__init__.py +1 -1
  51. janito/rich_utils.py +46 -8
  52. janito/shell/commands/__init__.py +2 -4
  53. janito/shell/commands/conversation_restart.py +3 -1
  54. janito/shell/commands/edit.py +3 -0
  55. janito/shell/commands/history_view.py +3 -3
  56. janito/shell/commands/lang.py +3 -0
  57. janito/shell/commands/livelogs.py +5 -3
  58. janito/shell/commands/prompt.py +6 -0
  59. janito/shell/commands/session.py +3 -0
  60. janito/shell/commands/session_control.py +3 -0
  61. janito/shell/commands/termweb_log.py +8 -0
  62. janito/shell/commands/tools.py +3 -0
  63. janito/shell/commands/track.py +36 -0
  64. janito/shell/commands/utility.py +13 -18
  65. janito/shell/commands/verbose.py +3 -4
  66. janito/shell/input_history.py +62 -0
  67. janito/shell/main.py +117 -181
  68. janito/shell/session/manager.py +0 -21
  69. janito/shell/ui/interactive.py +0 -2
  70. janito/termweb/static/editor.css +0 -4
  71. janito/tests/test_rich_utils.py +44 -0
  72. janito/web/app.py +0 -75
  73. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/METADATA +61 -42
  74. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/RECORD +78 -71
  75. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/WHEEL +1 -1
  76. janito/agent/providers.py +0 -77
  77. janito/agent/tools/run_python_command.py +0 -161
  78. janito/shell/commands/sum.py +0 -49
  79. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/entry_points.txt +0 -0
  80. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/licenses/LICENSE +0 -0
  81. {janito-1.9.0.dist-info → janito-1.10.0.dist-info}/top_level.txt +0 -0
janito/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.9.0"
1
+ __version__ = "1.10.0-dev"
@@ -0,0 +1,4 @@
1
+ class ApiError(Exception):
2
+ """Custom exception for API errors."""
3
+
4
+ pass
janito/agent/config.py CHANGED
@@ -55,7 +55,7 @@ class FileConfig(BaseConfig):
55
55
  CONFIG_OPTIONS = {
56
56
  "api_key": "API key for OpenAI-compatible service (required)",
57
57
  "trust": "Trust mode: suppress all console output (bool, default: False)",
58
- "model": "Model name to use (e.g., 'openai/gpt-4.1')",
58
+ "model": "Model name to use (e.g., 'gpt-4.1')",
59
59
  "base_url": "API base URL (OpenAI-compatible endpoint)",
60
60
  "role": "Role description for the Agent Profile (e.g., 'software engineer')",
61
61
  "system_prompt_template": "Override the entire Agent Profile prompt text",
@@ -1,36 +1,12 @@
1
1
  # Centralized config defaults for Janito
2
2
  CONFIG_DEFAULTS = {
3
3
  "api_key": None, # Must be set by user
4
- "model": "openai/gpt-4.1", # Default model
5
- "base_url": "https://openrouter.ai/api/v1",
4
+ "model": "gpt-4.1", # Default model
6
5
  "role": "software developer", # Part of the Agent Profile
7
6
  "system_prompt_template": None, # None means auto-generate from Agent Profile role
8
7
  "temperature": 0.2,
9
- "max_tokens": 200000,
8
+ "max_tokens": 32000,
10
9
  "use_azure_openai": False,
11
10
  "azure_openai_api_version": "2023-05-15",
12
11
  "profile": "base",
13
- "providers": {
14
- "openai": {
15
- "api_key": None,
16
- "base_url": "https://api.openai.com/v1",
17
- "default_model": "gpt-3.5-turbo",
18
- },
19
- "azureai": {
20
- "api_key": None,
21
- "base_url": "https://your-azure-endpoint.openai.azure.com/",
22
- "api_version": "2023-05-15",
23
- "default_model": "gpt-35-turbo",
24
- },
25
- "openrouterai": {
26
- "api_key": None,
27
- "base_url": "https://openrouter.ai/api/v1",
28
- "default_model": "openrouter/cognitive",
29
- },
30
- "fireworksai": {
31
- "api_key": None,
32
- "base_url": "https://api.fireworks.ai/inference/v1",
33
- "default_model": "accounts/fireworks/models/firefunction-v1",
34
- },
35
- },
36
12
  }
@@ -1,6 +1,5 @@
1
1
  from janito.agent.conversation_api import (
2
2
  get_openai_response,
3
- get_openai_stream_response,
4
3
  retry_api_call,
5
4
  )
6
5
  from janito.agent.conversation_tool_calls import handle_tool_calls
@@ -11,7 +10,32 @@ from janito.agent.conversation_exceptions import (
11
10
  NoToolSupportError,
12
11
  )
13
12
  from janito.agent.runtime_config import unified_config, runtime_config
13
+ from janito.agent.api_exceptions import ApiError
14
14
  import pprint
15
+ from janito.agent.llm_conversation_history import LLMConversationHistory
16
+
17
+
18
+ def get_openai_response_with_content_check(client, model, messages, max_tokens):
19
+ response = get_openai_response(client, model, messages, max_tokens)
20
+ # Check for empty assistant message content, but allow tool/function calls
21
+ if not hasattr(response, "choices") or not response.choices:
22
+ return response # Let normal error handling occur
23
+ choice = response.choices[0]
24
+ content = getattr(choice.message, "content", None)
25
+ # Check for function_call (legacy OpenAI) or tool_calls (OpenAI v2 and others)
26
+ has_function_call = (
27
+ hasattr(choice.message, "function_call") and choice.message.function_call
28
+ )
29
+ has_tool_calls = hasattr(choice.message, "tool_calls") and choice.message.tool_calls
30
+ if (content is None or str(content).strip() == "") and not (
31
+ has_function_call or has_tool_calls
32
+ ):
33
+ print(
34
+ "[DEBUG] Empty assistant message detected with no tool/function call. Will retry. Raw response:"
35
+ )
36
+ print(repr(response))
37
+ raise EmptyResponseError("Empty assistant message content.")
38
+ return response
15
39
 
16
40
 
17
41
  class ConversationHandler:
@@ -27,6 +51,130 @@ class ConversationHandler:
27
51
  """
28
52
  return [msg for msg in messages if msg.get("role") != "system"]
29
53
 
54
+ def _resolve_max_tokens(self, max_tokens):
55
+ resolved_max_tokens = max_tokens
56
+ if resolved_max_tokens is None:
57
+ resolved_max_tokens = unified_config.get("max_tokens", 32000)
58
+ try:
59
+ resolved_max_tokens = int(resolved_max_tokens)
60
+ except (TypeError, ValueError):
61
+ raise ValueError(
62
+ "max_tokens must be an integer, got: {resolved_max_tokens!r}".format(
63
+ resolved_max_tokens=resolved_max_tokens
64
+ )
65
+ )
66
+ if runtime_config.get("vanilla_mode", False) and max_tokens is None:
67
+ resolved_max_tokens = 8000
68
+ return resolved_max_tokens
69
+
70
+ def _call_openai_api(self, history, resolved_max_tokens, spinner):
71
+ def api_call():
72
+ return get_openai_response_with_content_check(
73
+ self.client,
74
+ self.model,
75
+ history.get_messages(),
76
+ resolved_max_tokens,
77
+ )
78
+
79
+ user_message_on_empty = "Received an empty message from you. Please try again."
80
+ if spinner:
81
+ response = show_spinner(
82
+ "Waiting for AI response...",
83
+ retry_api_call,
84
+ api_call,
85
+ history=history,
86
+ user_message_on_empty=user_message_on_empty,
87
+ )
88
+ else:
89
+ response = retry_api_call(
90
+ api_call, history=history, user_message_on_empty=user_message_on_empty
91
+ )
92
+ return response
93
+
94
+ def _handle_no_tool_support(self, messages, max_tokens, spinner):
95
+ print(
96
+ "\u26a0\ufe0f Endpoint does not support tool use. Proceeding in vanilla mode (tools disabled)."
97
+ )
98
+ runtime_config.set("vanilla_mode", True)
99
+ resolved_max_tokens = 8000
100
+ if max_tokens is None:
101
+ runtime_config.set("max_tokens", 8000)
102
+
103
+ def api_call_vanilla():
104
+ return get_openai_response_with_content_check(
105
+ self.client, self.model, messages, resolved_max_tokens
106
+ )
107
+
108
+ user_message_on_empty = "Received an empty message from you. Please try again."
109
+ if spinner:
110
+ response = show_spinner(
111
+ "Waiting for AI response (tools disabled)...",
112
+ retry_api_call,
113
+ api_call_vanilla,
114
+ history=None,
115
+ user_message_on_empty=user_message_on_empty,
116
+ )
117
+ else:
118
+ response = retry_api_call(
119
+ api_call_vanilla,
120
+ history=None,
121
+ user_message_on_empty=user_message_on_empty,
122
+ )
123
+ print(
124
+ "[DEBUG] OpenAI API raw response (tools disabled):",
125
+ repr(response),
126
+ )
127
+ return response, resolved_max_tokens
128
+
129
+ def _process_response(self, response):
130
+ if runtime_config.get("verbose_response", False):
131
+ pprint.pprint(response)
132
+ if response is None or not getattr(response, "choices", None):
133
+ error = getattr(response, "error", None)
134
+ if error:
135
+ print(f"ApiError: {error.get('message', error)}")
136
+ raise ApiError(error.get("message", str(error)))
137
+ raise EmptyResponseError(
138
+ f"No choices in response; possible API or LLM error. Raw response: {response!r}"
139
+ )
140
+ choice = response.choices[0]
141
+ usage = getattr(response, "usage", None)
142
+ usage_info = (
143
+ {
144
+ "_debug_raw_usage": getattr(response, "usage", None),
145
+ "prompt_tokens": getattr(usage, "prompt_tokens", None),
146
+ "completion_tokens": getattr(usage, "completion_tokens", None),
147
+ "total_tokens": getattr(usage, "total_tokens", None),
148
+ }
149
+ if usage
150
+ else None
151
+ )
152
+ return choice, usage_info
153
+
154
+ def _handle_tool_calls(
155
+ self, choice, history, message_handler, usage_info, tool_user=False
156
+ ):
157
+ tool_responses = handle_tool_calls(
158
+ choice.message.tool_calls, message_handler=message_handler
159
+ )
160
+ agent_idx = len([m for m in history.get_messages() if m.get("role") == "agent"])
161
+ self.usage_history.append({"agent_index": agent_idx, "usage": usage_info})
162
+ history.add_message(
163
+ {
164
+ "role": "assistant",
165
+ "content": choice.message.content,
166
+ "tool_calls": [tc.to_dict() for tc in choice.message.tool_calls],
167
+ }
168
+ )
169
+ for tool_response in tool_responses:
170
+ history.add_message(
171
+ {
172
+ "role": "user" if tool_user else "tool",
173
+ "tool_call_id": tool_response["tool_call_id"],
174
+ "content": tool_response["content"],
175
+ }
176
+ )
177
+
30
178
  def handle_conversation(
31
179
  self,
32
180
  messages,
@@ -36,116 +184,31 @@ class ConversationHandler:
36
184
  spinner=False,
37
185
  max_tokens=None,
38
186
  verbose_events=False,
39
- stream=False,
40
- verbose_stream=False,
187
+ tool_user=False,
41
188
  ):
42
- from janito.agent.conversation_history import ConversationHistory
43
189
 
44
- # Accept either ConversationHistory or a list for backward compatibility
45
- if isinstance(messages, ConversationHistory):
190
+ if isinstance(messages, LLMConversationHistory):
46
191
  history = messages
47
192
  else:
48
- history = ConversationHistory(messages)
193
+ history = LLMConversationHistory(messages)
49
194
 
50
195
  if len(history) == 0:
51
196
  raise ValueError("No prompt provided in messages")
52
197
 
53
- resolved_max_tokens = max_tokens
54
- if resolved_max_tokens is None:
55
- resolved_max_tokens = unified_config.get("max_tokens", 200000)
56
- try:
57
- resolved_max_tokens = int(resolved_max_tokens)
58
- except (TypeError, ValueError):
59
- raise ValueError(
60
- "max_tokens must be an integer, got: {resolved_max_tokens!r}".format(
61
- resolved_max_tokens=resolved_max_tokens
62
- )
63
- )
64
-
65
- # If vanilla mode is set and max_tokens was not provided, default to 8000
66
- if runtime_config.get("vanilla_mode", False) and max_tokens is None:
67
- resolved_max_tokens = 8000
198
+ resolved_max_tokens = self._resolve_max_tokens(max_tokens)
68
199
 
69
200
  for _ in range(max_rounds):
70
201
  try:
71
- if stream:
72
- # Streaming mode
73
- def get_stream():
74
- return get_openai_stream_response(
75
- self.client,
76
- self.model,
77
- history.get_messages(),
78
- resolved_max_tokens,
79
- verbose_stream=runtime_config.get("verbose_stream", False),
80
- message_handler=message_handler,
81
- )
82
-
83
- retry_api_call(get_stream)
84
- return None
85
- else:
86
- # Non-streaming mode
87
- def api_call():
88
- return get_openai_response(
89
- self.client,
90
- self.model,
91
- history.get_messages(),
92
- resolved_max_tokens,
93
- )
94
-
95
- if spinner:
96
- response = show_spinner(
97
- "Waiting for AI response...", retry_api_call, api_call
98
- )
99
- else:
100
- response = retry_api_call(api_call)
202
+ response = self._call_openai_api(history, resolved_max_tokens, spinner)
203
+ error = getattr(response, "error", None)
204
+ if error:
205
+ print(f"ApiError: {error.get('message', error)}")
206
+ raise ApiError(error.get("message", str(error)))
101
207
  except NoToolSupportError:
102
- print(
103
- "⚠️ Endpoint does not support tool use. Proceeding in vanilla mode (tools disabled)."
208
+ response, resolved_max_tokens = self._handle_no_tool_support(
209
+ messages, max_tokens, spinner
104
210
  )
105
- runtime_config.set("vanilla_mode", True)
106
- if max_tokens is None:
107
- runtime_config.set("max_tokens", 8000)
108
- resolved_max_tokens = 8000
109
-
110
- # Remove system prompt for vanilla mode if needed (call this externally when appropriate)
111
- # messages = ConversationHandler.remove_system_prompt(messages)
112
- # Retry once with tools disabled
113
- def api_call_vanilla():
114
- return get_openai_response(
115
- self.client, self.model, messages, resolved_max_tokens
116
- )
117
-
118
- if spinner:
119
- response = show_spinner(
120
- "Waiting for AI response (tools disabled)...",
121
- retry_api_call,
122
- api_call_vanilla,
123
- )
124
- else:
125
- response = retry_api_call(api_call_vanilla)
126
- print(
127
- "[DEBUG] OpenAI API raw response (tools disabled):",
128
- repr(response),
129
- )
130
- if runtime_config.get("verbose_response", False):
131
- pprint.pprint(response)
132
- if response is None or not getattr(response, "choices", None):
133
- raise EmptyResponseError(
134
- f"No choices in response; possible API or LLM error. Raw response: {response!r}"
135
- )
136
- choice = response.choices[0]
137
- usage = getattr(response, "usage", None)
138
- usage_info = (
139
- {
140
- # DEBUG: Show usage extraction
141
- "_debug_raw_usage": getattr(response, "usage", None),
142
- "prompt_tokens": getattr(usage, "prompt_tokens", None),
143
- "completion_tokens": getattr(usage, "completion_tokens", None),
144
- "total_tokens": getattr(usage, "total_tokens", None),
145
- }
146
- if usage
147
- else None
148
- )
211
+ choice, usage_info = self._process_response(response)
149
212
  event = {"type": "content", "message": choice.message.content}
150
213
  if runtime_config.get("verbose_events", False):
151
214
  print_verbose_event(event)
@@ -158,7 +221,6 @@ class ConversationHandler:
158
221
  self.usage_history.append(
159
222
  {"agent_index": agent_idx, "usage": usage_info}
160
223
  )
161
- # Add assistant response to history
162
224
  history.add_message(
163
225
  {
164
226
  "role": "assistant",
@@ -170,28 +232,7 @@ class ConversationHandler:
170
232
  "usage": usage_info,
171
233
  "usage_history": self.usage_history,
172
234
  }
173
- # Tool calls
174
- tool_responses = handle_tool_calls(
175
- choice.message.tool_calls, message_handler=message_handler
176
- )
177
- agent_idx = len(
178
- [m for m in history.get_messages() if m.get("role") == "agent"]
179
- )
180
- self.usage_history.append({"agent_index": agent_idx, "usage": usage_info})
181
- # Add assistant response with tool calls
182
- history.add_message(
183
- {
184
- "role": "assistant",
185
- "content": choice.message.content,
186
- "tool_calls": [tc.to_dict() for tc in choice.message.tool_calls],
187
- }
235
+ self._handle_tool_calls(
236
+ choice, history, message_handler, usage_info, tool_user=tool_user
188
237
  )
189
- for tool_response in tool_responses:
190
- history.add_message(
191
- {
192
- "role": "tool",
193
- "tool_call_id": tool_response["tool_call_id"],
194
- "content": tool_response["content"],
195
- }
196
- )
197
238
  raise MaxRoundsExceededError(f"Max conversation rounds exceeded ({max_rounds})")