janito 1.7.0__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. janito/__init__.py +1 -1
  2. janito/agent/config.py +1 -1
  3. janito/agent/config_defaults.py +2 -2
  4. janito/agent/conversation.py +70 -27
  5. janito/agent/conversation_api.py +104 -4
  6. janito/agent/conversation_exceptions.py +6 -0
  7. janito/agent/conversation_tool_calls.py +17 -3
  8. janito/agent/event.py +24 -0
  9. janito/agent/event_dispatcher.py +24 -0
  10. janito/agent/event_handler_protocol.py +5 -0
  11. janito/agent/event_system.py +15 -0
  12. janito/agent/message_handler.py +4 -1
  13. janito/agent/message_handler_protocol.py +5 -0
  14. janito/agent/openai_client.py +5 -8
  15. janito/agent/openai_schema_generator.py +23 -4
  16. janito/agent/profile_manager.py +15 -83
  17. janito/agent/queued_message_handler.py +22 -3
  18. janito/agent/rich_message_handler.py +66 -72
  19. janito/agent/templates/profiles/system_prompt_template_base.txt.j2 +14 -0
  20. janito/agent/templates/profiles/system_prompt_template_base_pt.txt.j2 +13 -0
  21. janito/agent/test_handler_protocols.py +47 -0
  22. janito/agent/tests/__init__.py +1 -0
  23. janito/agent/tool_base.py +1 -1
  24. janito/agent/tool_executor.py +109 -0
  25. janito/agent/tool_registry.py +3 -75
  26. janito/agent/tool_use_tracker.py +46 -0
  27. janito/agent/tools/__init__.py +8 -9
  28. janito/agent/tools/ask_user.py +19 -11
  29. janito/agent/tools/create_directory.py +43 -28
  30. janito/agent/tools/create_file.py +60 -29
  31. janito/agent/tools/dir_walk_utils.py +16 -0
  32. janito/agent/tools/fetch_url.py +10 -11
  33. janito/agent/tools/find_files.py +49 -32
  34. janito/agent/tools/get_lines.py +54 -18
  35. janito/agent/tools/memory.py +32 -52
  36. janito/agent/tools/move_file.py +72 -23
  37. janito/agent/tools/outline_file/__init__.py +85 -0
  38. janito/agent/tools/outline_file/formatting.py +20 -0
  39. janito/agent/tools/outline_file/markdown_outline.py +14 -0
  40. janito/agent/tools/outline_file/python_outline.py +71 -0
  41. janito/agent/tools/present_choices.py +62 -0
  42. janito/agent/tools/present_choices_test.py +18 -0
  43. janito/agent/tools/remove_directory.py +31 -26
  44. janito/agent/tools/remove_file.py +31 -13
  45. janito/agent/tools/replace_text_in_file.py +135 -36
  46. janito/agent/tools/run_bash_command.py +47 -50
  47. janito/agent/tools/run_powershell_command.py +52 -36
  48. janito/agent/tools/run_python_command.py +49 -29
  49. janito/agent/tools/search_outline.py +17 -0
  50. janito/agent/tools/search_text.py +208 -0
  51. janito/agent/tools/tools_utils.py +47 -4
  52. janito/agent/tools/utils.py +14 -15
  53. janito/agent/tools/validate_file_syntax.py +163 -0
  54. janito/cli/arg_parser.py +36 -4
  55. janito/cli/logging_setup.py +7 -2
  56. janito/cli/main.py +96 -2
  57. janito/cli/runner/_termweb_log_utils.py +17 -0
  58. janito/cli/runner/cli_main.py +119 -77
  59. janito/cli/runner/config.py +2 -2
  60. janito/cli/termweb_starter.py +73 -0
  61. janito/cli_chat_shell/chat_loop.py +42 -7
  62. janito/cli_chat_shell/chat_state.py +1 -1
  63. janito/cli_chat_shell/chat_ui.py +0 -1
  64. janito/cli_chat_shell/commands/__init__.py +15 -6
  65. janito/cli_chat_shell/commands/{history_reset.py → history_start.py} +13 -5
  66. janito/cli_chat_shell/commands/lang.py +16 -0
  67. janito/cli_chat_shell/commands/prompt.py +42 -0
  68. janito/cli_chat_shell/commands/session_control.py +36 -1
  69. janito/cli_chat_shell/commands/termweb_log.py +86 -0
  70. janito/cli_chat_shell/commands/utility.py +5 -2
  71. janito/cli_chat_shell/commands/verbose.py +29 -0
  72. janito/cli_chat_shell/session_manager.py +9 -1
  73. janito/cli_chat_shell/shell_command_completer.py +20 -0
  74. janito/cli_chat_shell/ui.py +110 -99
  75. janito/i18n/__init__.py +35 -0
  76. janito/i18n/messages.py +23 -0
  77. janito/i18n/pt.py +46 -0
  78. janito/rich_utils.py +43 -43
  79. janito/termweb/app.py +95 -0
  80. janito/termweb/static/editor.html +238 -0
  81. janito/termweb/static/editor.html.bak +238 -0
  82. janito/termweb/static/explorer.html.bak +59 -0
  83. janito/termweb/static/favicon.ico +0 -0
  84. janito/termweb/static/favicon.ico.bak +0 -0
  85. janito/termweb/static/index.html +55 -0
  86. janito/termweb/static/index.html.bak +55 -0
  87. janito/termweb/static/index.html.bak.bak +175 -0
  88. janito/termweb/static/landing.html.bak +36 -0
  89. janito/termweb/static/termicon.svg +1 -0
  90. janito/termweb/static/termweb.css +235 -0
  91. janito/termweb/static/termweb.css.bak +286 -0
  92. janito/termweb/static/termweb.js +187 -0
  93. janito/termweb/static/termweb.js.bak +187 -0
  94. janito/termweb/static/termweb.js.bak.bak +157 -0
  95. janito/termweb/static/termweb_quickopen.js +135 -0
  96. janito/termweb/static/termweb_quickopen.js.bak +125 -0
  97. janito/web/app.py +4 -4
  98. {janito-1.7.0.dist-info → janito-1.8.0.dist-info}/METADATA +58 -25
  99. janito-1.8.0.dist-info/RECORD +127 -0
  100. {janito-1.7.0.dist-info → janito-1.8.0.dist-info}/WHEEL +1 -1
  101. janito/agent/templates/profiles/system_prompt_template_base.toml +0 -76
  102. janito/agent/templates/profiles/system_prompt_template_default.toml +0 -3
  103. janito/agent/templates/profiles/system_prompt_template_technical.toml +0 -13
  104. janito/agent/tests/test_prompt_toml.py +0 -61
  105. janito/agent/tool_registry_core.py +0 -2
  106. janito/agent/tools/get_file_outline.py +0 -146
  107. janito/agent/tools/py_compile_file.py +0 -40
  108. janito/agent/tools/replace_file.py +0 -51
  109. janito/agent/tools/search_files.py +0 -65
  110. janito/cli/runner/scan.py +0 -57
  111. janito/cli_chat_shell/commands/system.py +0 -73
  112. janito-1.7.0.dist-info/RECORD +0 -89
  113. {janito-1.7.0.dist-info → janito-1.8.0.dist-info}/entry_points.txt +0 -0
  114. {janito-1.7.0.dist-info → janito-1.8.0.dist-info}/licenses/LICENSE +0 -0
  115. {janito-1.7.0.dist-info → janito-1.8.0.dist-info}/top_level.txt +0 -0
janito/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.7.0"
1
+ __version__ = "1.8.0"
janito/agent/config.py CHANGED
@@ -64,7 +64,7 @@ CONFIG_OPTIONS = {
64
64
  "use_azure_openai": "Whether to use Azure OpenAI client (default: False)",
65
65
  # Accept template.* keys as valid config keys (for CLI validation, etc.)
66
66
  "template": "Template context dictionary for Agent Profile prompt rendering (nested)",
67
- "interaction_style": "Interaction style for the Agent Profile (e.g., 'default' or 'technical')",
67
+ "profile": "Agent Profile name (only 'base' is supported)",
68
68
  # Note: template.* keys are validated dynamically, not statically here
69
69
  }
70
70
 
@@ -3,11 +3,11 @@ CONFIG_DEFAULTS = {
3
3
  "api_key": None, # Must be set by user
4
4
  "model": "openai/gpt-4.1", # Default model
5
5
  "base_url": "https://openrouter.ai/api/v1",
6
- "role": "software engineer", # Part of the Agent Profile
6
+ "role": "software developer", # Part of the Agent Profile
7
7
  "system_prompt_template": None, # None means auto-generate from Agent Profile role
8
8
  "temperature": 0.2,
9
9
  "max_tokens": 200000,
10
10
  "use_azure_openai": False,
11
11
  "azure_openai_api_version": "2023-05-15",
12
- "interaction_style": "default",
12
+ "profile": "base",
13
13
  }
@@ -8,8 +8,9 @@ from janito.agent.conversation_ui import show_spinner, print_verbose_event
8
8
  from janito.agent.conversation_exceptions import (
9
9
  MaxRoundsExceededError,
10
10
  EmptyResponseError,
11
+ NoToolSupportError,
11
12
  )
12
- from janito.agent.runtime_config import unified_config
13
+ from janito.agent.runtime_config import unified_config, runtime_config
13
14
  import pprint
14
15
 
15
16
 
@@ -19,6 +20,13 @@ class ConversationHandler:
19
20
  self.model = model
20
21
  self.usage_history = []
21
22
 
23
+ @staticmethod
24
+ def remove_system_prompt(messages):
25
+ """
26
+ Return a new messages list with all system prompts removed.
27
+ """
28
+ return [msg for msg in messages if msg.get("role") != "system"]
29
+
22
30
  def handle_conversation(
23
31
  self,
24
32
  messages,
@@ -41,40 +49,75 @@ class ConversationHandler:
41
49
  resolved_max_tokens = int(resolved_max_tokens)
42
50
  except (TypeError, ValueError):
43
51
  raise ValueError(
44
- f"max_tokens must be an integer, got: {resolved_max_tokens!r}"
52
+ "max_tokens must be an integer, got: {resolved_max_tokens!r}".format(
53
+ resolved_max_tokens=resolved_max_tokens
54
+ )
45
55
  )
46
56
 
57
+ # If vanilla mode is set and max_tokens was not provided, default to 8000
58
+ if runtime_config.get("vanilla_mode", False) and max_tokens is None:
59
+ resolved_max_tokens = 8000
60
+
47
61
  for _ in range(max_rounds):
48
- if stream:
49
- # Streaming mode
50
- def get_stream():
51
- return get_openai_stream_response(
52
- self.client,
53
- self.model,
54
- messages,
55
- resolved_max_tokens,
56
- verbose_stream=verbose_stream,
57
- message_handler=message_handler,
58
- )
62
+ try:
63
+ if stream:
64
+ # Streaming mode
65
+ def get_stream():
66
+ return get_openai_stream_response(
67
+ self.client,
68
+ self.model,
69
+ messages,
70
+ resolved_max_tokens,
71
+ verbose_stream=runtime_config.get("verbose_stream", False),
72
+ message_handler=message_handler,
73
+ )
74
+
75
+ retry_api_call(get_stream)
76
+ return None
77
+ else:
78
+ # Non-streaming mode
79
+ def api_call():
80
+ return get_openai_response(
81
+ self.client, self.model, messages, resolved_max_tokens
82
+ )
83
+
84
+ if spinner:
85
+ response = show_spinner(
86
+ "Waiting for AI response...", retry_api_call, api_call
87
+ )
88
+ else:
89
+ response = retry_api_call(api_call)
90
+ print("[DEBUG] OpenAI API raw response:", repr(response))
91
+ except NoToolSupportError:
92
+ print(
93
+ "⚠️ Endpoint does not support tool use. Proceeding in vanilla mode (tools disabled)."
94
+ )
95
+ runtime_config.set("vanilla_mode", True)
96
+ if max_tokens is None:
97
+ runtime_config.set("max_tokens", 8000)
98
+ resolved_max_tokens = 8000
59
99
 
60
- retry_api_call(get_stream)
61
- return None
62
- else:
63
- # Non-streaming mode
64
- def api_call():
100
+ # Remove system prompt for vanilla mode if needed (call this externally when appropriate)
101
+ # messages = ConversationHandler.remove_system_prompt(messages)
102
+ # Retry once with tools disabled
103
+ def api_call_vanilla():
65
104
  return get_openai_response(
66
105
  self.client, self.model, messages, resolved_max_tokens
67
106
  )
68
107
 
69
108
  if spinner:
70
109
  response = show_spinner(
71
- "Waiting for AI response...", retry_api_call, api_call
110
+ "Waiting for AI response (tools disabled)...",
111
+ retry_api_call,
112
+ api_call_vanilla,
72
113
  )
73
114
  else:
74
- response = retry_api_call(api_call)
75
- print("[DEBUG] OpenAI API raw response:", repr(response))
76
-
77
- if verbose_response:
115
+ response = retry_api_call(api_call_vanilla)
116
+ print(
117
+ "[DEBUG] OpenAI API raw response (tools disabled):",
118
+ repr(response),
119
+ )
120
+ if runtime_config.get("verbose_response", False):
78
121
  pprint.pprint(response)
79
122
  if response is None or not getattr(response, "choices", None):
80
123
  raise EmptyResponseError(
@@ -94,7 +137,7 @@ class ConversationHandler:
94
137
  else None
95
138
  )
96
139
  event = {"type": "content", "message": choice.message.content}
97
- if verbose_events:
140
+ if runtime_config.get("verbose_events", False):
98
141
  print_verbose_event(event)
99
142
  if message_handler is not None and choice.message.content:
100
143
  message_handler.handle_message(event)
@@ -121,12 +164,12 @@ class ConversationHandler:
121
164
  "tool_calls": [tc.to_dict() for tc in choice.message.tool_calls],
122
165
  }
123
166
  )
124
- for tr in tool_responses:
167
+ for tool_response in tool_responses:
125
168
  messages.append(
126
169
  {
127
170
  "role": "tool",
128
- "tool_call_id": tr["tool_call_id"],
129
- "content": tr["content"],
171
+ "tool_call_id": tool_response["tool_call_id"],
172
+ "content": tool_response["content"],
130
173
  }
131
174
  )
132
175
  raise MaxRoundsExceededError(f"Max conversation rounds exceeded ({max_rounds})")
@@ -3,9 +3,11 @@ Handles OpenAI API calls and retry logic for conversation.
3
3
  """
4
4
 
5
5
  import time
6
+ from janito.i18n import tr
6
7
  import json
7
8
  from janito.agent.runtime_config import runtime_config
8
9
  from janito.agent.tool_registry import get_tool_schemas
10
+ from janito.agent.conversation_exceptions import NoToolSupportError
9
11
 
10
12
 
11
13
  def get_openai_response(
@@ -79,20 +81,118 @@ def retry_api_call(api_func, max_retries=5, *args, **kwargs):
79
81
  if attempt < max_retries:
80
82
  wait_time = 2**attempt
81
83
  print(
82
- f"Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds..."
84
+ tr(
85
+ "Invalid/malformed response from OpenAI (attempt {attempt}/{max_retries}). Retrying in {wait_time} seconds...",
86
+ attempt=attempt,
87
+ max_retries=max_retries,
88
+ wait_time=wait_time,
89
+ )
83
90
  )
84
91
  time.sleep(wait_time)
85
92
  else:
86
- print("Max retries for invalid response reached. Raising error.")
93
+ print(tr("Max retries for invalid response reached. Raising error."))
87
94
  raise last_exception
88
95
  except Exception as e:
89
96
  last_exception = e
97
+ status_code = None
98
+ error_message = str(e)
99
+ retry_after = None
100
+ # Detect specific tool support error
101
+ if "No endpoints found that support tool use" in error_message:
102
+ print(tr("API does not support tool use."))
103
+ raise NoToolSupportError(error_message)
104
+ # Try to extract status code and Retry-After from known exception types or message
105
+ if hasattr(e, "status_code"):
106
+ status_code = getattr(e, "status_code")
107
+ elif hasattr(e, "response") and hasattr(e.response, "status_code"):
108
+ status_code = getattr(e.response, "status_code")
109
+ # Check for Retry-After header
110
+ if hasattr(e.response, "headers") and e.response.headers:
111
+ retry_after = e.response.headers.get("Retry-After")
112
+ else:
113
+ # Try to parse from error message
114
+ import re
115
+
116
+ match = re.search(r"[Ee]rror code: (\d{3})", error_message)
117
+ if match:
118
+ status_code = int(match.group(1))
119
+ # Try to find Retry-After in message
120
+ retry_after_match = re.search(
121
+ r"Retry-After['\"]?:?\s*(\d+)", error_message
122
+ )
123
+ if retry_after_match:
124
+ retry_after = retry_after_match.group(1)
125
+ # Decide retry logic based on status code
126
+ if status_code is not None:
127
+ if status_code == 429:
128
+ # Use Retry-After if available, else exponential backoff
129
+ if retry_after is not None:
130
+ try:
131
+ wait_time = int(float(retry_after))
132
+ except Exception:
133
+ wait_time = 2**attempt
134
+ else:
135
+ wait_time = 2**attempt
136
+ if attempt < max_retries:
137
+ print(
138
+ tr(
139
+ "OpenAI API rate limit (429) (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
140
+ attempt=attempt,
141
+ max_retries=max_retries,
142
+ e=e,
143
+ wait_time=wait_time,
144
+ )
145
+ )
146
+ time.sleep(wait_time)
147
+ continue
148
+ else:
149
+ print(
150
+ "Max retries for OpenAI API rate limit reached. Raising error."
151
+ )
152
+ raise last_exception
153
+ elif 500 <= status_code < 600:
154
+ # Retry on server errors
155
+ if attempt < max_retries:
156
+ wait_time = 2**attempt
157
+ print(
158
+ tr(
159
+ "OpenAI API server error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
160
+ attempt=attempt,
161
+ max_retries=max_retries,
162
+ e=e,
163
+ wait_time=wait_time,
164
+ )
165
+ )
166
+ time.sleep(wait_time)
167
+ continue
168
+ else:
169
+ print(
170
+ "Max retries for OpenAI API server error reached. Raising error."
171
+ )
172
+ raise last_exception
173
+ elif 400 <= status_code < 500:
174
+ # Do not retry on client errors (except 429)
175
+ print(
176
+ tr(
177
+ "OpenAI API client error {status_code}: {e}. Not retrying.",
178
+ status_code=status_code,
179
+ e=e,
180
+ )
181
+ )
182
+ raise last_exception
183
+ # If status code not detected, fallback to previous behavior
90
184
  if attempt < max_retries:
91
185
  wait_time = 2**attempt
92
186
  print(
93
- f"OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds..."
187
+ tr(
188
+ "OpenAI API error (attempt {attempt}/{max_retries}): {e}. Retrying in {wait_time} seconds...",
189
+ attempt=attempt,
190
+ max_retries=max_retries,
191
+ e=e,
192
+ wait_time=wait_time,
193
+ )
94
194
  )
95
195
  time.sleep(wait_time)
96
196
  else:
97
- print("Max retries for OpenAI API error reached. Raising error.")
197
+ print(tr("Max retries for OpenAI API error reached. Raising error."))
98
198
  raise last_exception
@@ -10,3 +10,9 @@ class ProviderError(Exception):
10
10
  def __init__(self, message, error_data):
11
11
  self.error_data = error_data
12
12
  super().__init__(message)
13
+
14
+
15
+ class NoToolSupportError(Exception):
16
+ """Raised when the API endpoint does not support tool use."""
17
+
18
+ pass
@@ -2,7 +2,8 @@
2
2
  Helpers for handling tool calls in conversation.
3
3
  """
4
4
 
5
- from janito.agent.tool_registry import handle_tool_call
5
+ from janito.agent.tool_executor import ToolExecutor
6
+ from janito.agent import tool_registry
6
7
  from .conversation_exceptions import MaxRoundsExceededError
7
8
  from janito.agent.runtime_config import runtime_config
8
9
 
@@ -16,7 +17,20 @@ def handle_tool_calls(tool_calls, message_handler=None):
16
17
  raise MaxRoundsExceededError(
17
18
  f"Maximum number of tool calls ({max_tools}) reached in this chat session."
18
19
  )
19
- result = handle_tool_call(tool_call, message_handler=message_handler)
20
- tool_responses.append({"tool_call_id": tool_call.id, "content": result})
20
+ tool_entry = tool_registry._tool_registry[tool_call.function.name]
21
+ try:
22
+ result = ToolExecutor(message_handler=message_handler).execute(
23
+ tool_entry, tool_call
24
+ )
25
+ tool_responses.append({"tool_call_id": tool_call.id, "content": result})
26
+ except TypeError as e:
27
+ # Return the error as a tool result, asking to retry with correct params
28
+ error_msg = str(e)
29
+ tool_responses.append(
30
+ {
31
+ "tool_call_id": tool_call.id,
32
+ "content": f"Tool execution error: {error_msg}. Please retry with the correct parameters.",
33
+ }
34
+ )
21
35
  tool_calls_made += 1
22
36
  return tool_responses
janito/agent/event.py ADDED
@@ -0,0 +1,24 @@
1
+ from enum import Enum, auto
2
+ from typing import Any
3
+
4
+
5
+ class EventType(Enum):
6
+ CONTENT = auto()
7
+ INFO = auto()
8
+ SUCCESS = auto()
9
+ ERROR = auto()
10
+ PROGRESS = auto()
11
+ WARNING = auto()
12
+ STDOUT = auto()
13
+ STDERR = auto()
14
+ STREAM = auto()
15
+ STREAM_TOOL_CALL = auto()
16
+ STREAM_END = auto()
17
+ TOOL_CALL = auto()
18
+ TOOL_RESULT = auto()
19
+
20
+
21
+ class Event:
22
+ def __init__(self, type: EventType, payload: Any = None):
23
+ self.type = type
24
+ self.payload = payload
@@ -0,0 +1,24 @@
1
+ from typing import Callable, Dict, List
2
+ from janito.agent.event import Event, EventType
3
+
4
+
5
+ from janito.agent.event_handler_protocol import EventHandlerProtocol
6
+
7
+
8
+ class EventDispatcher:
9
+ def __init__(self):
10
+ self._handlers: Dict[EventType, List[Callable[[Event], None]]] = {}
11
+
12
+ def register(self, event_type: EventType, handler: EventHandlerProtocol):
13
+ if event_type not in self._handlers:
14
+ self._handlers[event_type] = []
15
+ self._handlers[event_type].append(handler)
16
+
17
+ def register_all(self, handler: EventHandlerProtocol):
18
+ # Register handler for all event types
19
+ for event_type in EventType:
20
+ self.register(event_type, handler)
21
+
22
+ def dispatch(self, event: Event):
23
+ for handler in self._handlers.get(event.type, []):
24
+ handler.handle_event(event)
@@ -0,0 +1,5 @@
1
+ from typing import Protocol, Any
2
+
3
+
4
+ class EventHandlerProtocol(Protocol):
5
+ def handle_event(self, event: Any) -> None: ...
@@ -0,0 +1,15 @@
1
+ from janito.agent.event_dispatcher import EventDispatcher
2
+ from janito.agent.rich_message_handler import RichMessageHandler
3
+
4
+ # Singleton dispatcher
5
+ shared_event_dispatcher = EventDispatcher()
6
+
7
+ # Register handlers (example: RichMessageHandler for all events)
8
+ rich_handler = RichMessageHandler()
9
+ shared_event_dispatcher.register_all(rich_handler)
10
+
11
+ # You can register other handlers as needed, e.g.:
12
+ # queued_handler = QueuedMessageHandler(...)
13
+ # shared_event_dispatcher.register_all(queued_handler)
14
+ # queue_handler = QueueMessageHandler(...)
15
+ # shared_event_dispatcher.register_all(queue_handler)
@@ -1,4 +1,7 @@
1
- class QueueMessageHandler:
1
+ from janito.agent.message_handler_protocol import MessageHandlerProtocol
2
+
3
+
4
+ class QueueMessageHandler(MessageHandlerProtocol):
2
5
  def __init__(self, queue, *args, **kwargs):
3
6
  self._queue = queue
4
7
 
@@ -0,0 +1,5 @@
1
+ from typing import Protocol
2
+
3
+
4
+ class MessageHandlerProtocol(Protocol):
5
+ def handle_message(self, msg: dict, msg_type: str = None) -> None: ...
@@ -66,13 +66,10 @@ class Agent:
66
66
  self,
67
67
  messages,
68
68
  message_handler=None,
69
- verbose_response=False,
70
69
  spinner=False,
71
70
  max_tokens=None,
72
71
  max_rounds=50,
73
- verbose_events=False,
74
72
  stream=False,
75
- verbose_stream=False,
76
73
  ):
77
74
  """
78
75
  Start a chat conversation with the agent.
@@ -80,16 +77,16 @@ class Agent:
80
77
  Args:
81
78
  messages: List of message dicts.
82
79
  message_handler: Optional handler for streaming or event messages.
83
- verbose_response: Print full response for debugging.
84
80
  spinner: Show spinner during request.
85
81
  max_tokens: Max tokens for completion.
86
82
  max_rounds: Max conversation rounds.
87
- verbose_events: Print all events for debugging.
88
83
  stream: If True, enable OpenAI streaming mode (yields tokens incrementally).
89
84
  Returns:
90
85
  If stream=False: dict with 'content', 'usage', and 'usage_history'.
91
86
  If stream=True: generator yielding content chunks or events.
92
87
  """
88
+ from janito.agent.runtime_config import runtime_config
89
+
93
90
  max_retries = 5
94
91
  for attempt in range(1, max_retries + 1):
95
92
  try:
@@ -97,12 +94,12 @@ class Agent:
97
94
  messages,
98
95
  max_rounds=max_rounds,
99
96
  message_handler=message_handler,
100
- verbose_response=verbose_response,
97
+ verbose_response=runtime_config.get("verbose_response", False),
101
98
  spinner=spinner,
102
99
  max_tokens=max_tokens,
103
- verbose_events=verbose_events,
100
+ verbose_events=runtime_config.get("verbose_events", False),
104
101
  stream=stream,
105
- verbose_stream=verbose_stream,
102
+ verbose_stream=runtime_config.get("verbose_stream", False),
106
103
  )
107
104
  except ProviderError as e:
108
105
  error_data = getattr(e, "error_data", {}) or {}
@@ -8,6 +8,7 @@ MUST BE IMPLEMENTED:
8
8
  import inspect
9
9
  import re
10
10
  import typing
11
+ from collections import OrderedDict
11
12
 
12
13
  PYTHON_TYPE_TO_JSON = {
13
14
  str: "string",
@@ -85,7 +86,7 @@ def _parse_docstring(docstring: str):
85
86
  def generate_openai_function_schema(func, tool_name: str, tool_class=None):
86
87
  """
87
88
  Generates an OpenAI-compatible function schema for a callable.
88
- Raises ValueError if the return type is not explicitly str.
89
+ Raises ValueError if the return type is not explicitly str or if any parameter is missing a type hint.
89
90
  """
90
91
  sig = inspect.signature(func)
91
92
  # Enforce explicit str return type
@@ -93,6 +94,17 @@ def generate_openai_function_schema(func, tool_name: str, tool_class=None):
93
94
  raise ValueError(
94
95
  f"Tool '{tool_name}' must have an explicit return type of 'str'. Found: {sig.return_annotation}"
95
96
  )
97
+ # Enforce type hints for all parameters (except self)
98
+ missing_type_hints = [
99
+ name
100
+ for name, param in sig.parameters.items()
101
+ if name != "self" and param.annotation is inspect._empty
102
+ ]
103
+ if missing_type_hints:
104
+ raise ValueError(
105
+ f"Tool '{tool_name}' is missing type hints for parameter(s): {', '.join(missing_type_hints)}.\n"
106
+ f"All parameters must have explicit type hints for schema generation."
107
+ )
96
108
  # Only use the class docstring for schema generation
97
109
  class_doc = tool_class.__doc__.strip() if tool_class and tool_class.__doc__ else ""
98
110
  summary, param_descs, return_desc = _parse_docstring(class_doc)
@@ -107,14 +119,21 @@ def generate_openai_function_schema(func, tool_name: str, tool_class=None):
107
119
  ]
108
120
  if undocumented:
109
121
  raise ValueError(
110
- f"Tool '{tool_name}' is missing docstring documentation for parameter(s): {', '.join(undocumented)}"
122
+ f"Tool '{tool_name}' is missing docstring documentation for parameter(s): {', '.join(undocumented)}.\n"
123
+ f"Parameter documentation must be provided in the Tool class docstring, not the method docstring."
111
124
  )
112
- properties = {}
125
+ properties = OrderedDict()
113
126
  required = []
127
+ # Inject tool_call_reason as the first required parameter
128
+ properties["tool_call_reason"] = {
129
+ "type": "string",
130
+ "description": "The reason or context for why this tool is being called. This is required for traceability.",
131
+ }
132
+ required.append("tool_call_reason")
114
133
  for name, param in sig.parameters.items():
115
134
  if name == "self":
116
135
  continue
117
- annotation = param.annotation if param.annotation != inspect._empty else str
136
+ annotation = param.annotation
118
137
  pdesc = param_descs.get(name, "")
119
138
  schema = _type_to_json_schema(annotation)
120
139
  schema["description"] = pdesc