janito 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. janito/__init__.py +1 -1
  2. janito/agent/__init__.py +0 -1
  3. janito/agent/agent.py +7 -25
  4. janito/agent/config.py +4 -6
  5. janito/agent/config_defaults.py +2 -2
  6. janito/agent/content_handler.py +0 -0
  7. janito/agent/conversation.py +63 -37
  8. janito/agent/message_handler.py +18 -0
  9. janito/agent/openai_schema_generator.py +116 -0
  10. janito/agent/queued_message_handler.py +32 -0
  11. janito/agent/rich_tool_handler.py +43 -0
  12. janito/agent/runtime_config.py +1 -1
  13. janito/agent/templates/system_instructions.j2 +10 -4
  14. janito/agent/tool_registry.py +92 -0
  15. janito/agent/tools/append_text_to_file.py +41 -0
  16. janito/agent/tools/ask_user.py +16 -3
  17. janito/agent/tools/create_directory.py +31 -0
  18. janito/agent/tools/create_file.py +52 -0
  19. janito/agent/tools/fetch_url.py +23 -8
  20. janito/agent/tools/find_files.py +40 -21
  21. janito/agent/tools/get_file_outline.py +26 -8
  22. janito/agent/tools/get_lines.py +53 -19
  23. janito/agent/tools/move_file.py +50 -0
  24. janito/agent/tools/py_compile.py +27 -11
  25. janito/agent/tools/python_exec.py +43 -14
  26. janito/agent/tools/remove_directory.py +23 -7
  27. janito/agent/tools/remove_file.py +38 -0
  28. janito/agent/tools/replace_text_in_file.py +40 -17
  29. janito/agent/tools/run_bash_command.py +107 -80
  30. janito/agent/tools/search_files.py +38 -19
  31. janito/agent/tools/tool_base.py +30 -3
  32. janito/agent/tools/tools_utils.py +11 -0
  33. janito/agent/tools/utils.py +0 -1
  34. janito/cli/_print_config.py +1 -1
  35. janito/cli/arg_parser.py +2 -1
  36. janito/cli/config_commands.py +3 -6
  37. janito/cli/main.py +2 -2
  38. janito/cli/runner.py +18 -14
  39. janito/cli_chat_shell/chat_loop.py +10 -15
  40. janito/cli_chat_shell/commands.py +8 -3
  41. janito/cli_chat_shell/config_shell.py +0 -3
  42. janito/cli_chat_shell/session_manager.py +11 -0
  43. janito/cli_chat_shell/ui.py +12 -113
  44. janito/render_prompt.py +0 -1
  45. janito/rich_utils.py +30 -0
  46. janito/web/app.py +10 -12
  47. janito-1.5.0.dist-info/METADATA +176 -0
  48. janito-1.5.0.dist-info/RECORD +64 -0
  49. janito/agent/queued_tool_handler.py +0 -16
  50. janito/agent/tool_handler.py +0 -196
  51. janito/agent/tools/file_ops.py +0 -114
  52. janito/agent/tools/rich_utils.py +0 -31
  53. janito-1.4.0.dist-info/METADATA +0 -142
  54. janito-1.4.0.dist-info/RECORD +0 -55
  55. {janito-1.4.0.dist-info → janito-1.5.0.dist-info}/WHEEL +0 -0
  56. {janito-1.4.0.dist-info → janito-1.5.0.dist-info}/entry_points.txt +0 -0
  57. {janito-1.4.0.dist-info → janito-1.5.0.dist-info}/licenses/LICENSE +0 -0
  58. {janito-1.4.0.dist-info → janito-1.5.0.dist-info}/top_level.txt +0 -0
janito/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.4.0"
1
+ __version__ = "1.5.0"
janito/agent/__init__.py CHANGED
@@ -1 +0,0 @@
1
- from . import tools
janito/agent/agent.py CHANGED
@@ -1,10 +1,8 @@
1
1
  """Agent module: defines the core LLM agent with tool and conversation handling."""
2
2
 
3
- import os
4
- import json
3
+ import time
5
4
  from openai import OpenAI
6
- from janito.agent.conversation import ConversationHandler
7
- from janito.agent.tool_handler import ToolHandler
5
+ from janito.agent.conversation import ConversationHandler, ProviderError
8
6
 
9
7
  class Agent:
10
8
  """Agent capable of handling conversations and tool calls."""
@@ -18,36 +16,27 @@ class Agent:
18
16
  model: str = None,
19
17
  system_prompt: str | None = None,
20
18
  verbose_tools: bool = False,
21
- tool_handler = None,
22
19
  base_url: str = "https://openrouter.ai/api/v1",
23
20
  azure_openai_api_version: str = "2023-05-15",
24
21
  use_azure_openai: bool = False
25
22
  ):
26
23
  """
27
- Initialize Agent,
24
+ Initialize Agent.
28
25
 
29
26
  Args:
30
27
  api_key: API key for OpenAI-compatible service.
31
28
  model: Model name to use.
32
29
  system_prompt: Optional system prompt override.
33
30
  verbose_tools: Enable verbose tool call logging.
34
- tool_handler: Optional custom ToolHandler instance.
35
31
  base_url: API base URL.
36
32
  azure_openai_api_version: Azure OpenAI API version (default: "2023-05-15").
37
33
  use_azure_openai: Whether to use Azure OpenAI client (default: False).
38
-
39
- Args:
40
- api_key: API key for OpenAI-compatible service.
41
- model: Model name to use.
42
- system_prompt: Optional system prompt override.
43
- verbose_tools: Enable verbose tool call logging.
44
- tool_handler: Optional custom ToolHandler instance.
45
- base_url: API base URL.
46
34
  """
47
35
  self.api_key = api_key
48
36
  self.model = model
49
37
  self.system_prompt = system_prompt
50
38
  if use_azure_openai:
39
+ # Import inside conditional to avoid requiring AzureOpenAI unless needed
51
40
  from openai import AzureOpenAI
52
41
  self.client = AzureOpenAI(
53
42
  api_key=api_key,
@@ -63,22 +52,16 @@ class Agent:
63
52
  "X-Title": self.TITLE
64
53
  }
65
54
  )
66
- if tool_handler is not None:
67
- self.tool_handler = tool_handler
68
- else:
69
- self.tool_handler = ToolHandler(verbose=verbose_tools)
70
55
 
71
56
  self.conversation_handler = ConversationHandler(
72
- self.client, self.model, self.tool_handler
57
+ self.client, self.model,
73
58
  )
74
59
 
75
60
  @property
76
61
  def usage_history(self):
77
62
  return self.conversation_handler.usage_history
78
63
 
79
- def chat(self, messages, on_content=None, on_tool_progress=None, verbose_response=False, spinner=False, max_tokens=None, max_rounds=50):
80
- import time
81
- from janito.agent.conversation import ProviderError
64
+ def chat(self, messages, message_handler=None, verbose_response=False, spinner=False, max_tokens=None, max_rounds=50):
82
65
 
83
66
  max_retries = 5
84
67
  for attempt in range(1, max_retries + 1):
@@ -86,8 +69,7 @@ class Agent:
86
69
  return self.conversation_handler.handle_conversation(
87
70
  messages,
88
71
  max_rounds=max_rounds,
89
- on_content=on_content,
90
- on_tool_progress=on_tool_progress,
72
+ message_handler=message_handler,
91
73
  verbose_response=verbose_response,
92
74
  spinner=spinner,
93
75
  max_tokens=max_tokens
janito/agent/config.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import json
2
- import os
3
2
  from pathlib import Path
4
3
  from threading import Lock
4
+ from .config_defaults import CONFIG_DEFAULTS
5
5
 
6
6
 
7
7
  class SingletonMeta(type):
@@ -60,13 +60,13 @@ CONFIG_OPTIONS = {
60
60
  "api_key": "API key for OpenAI-compatible service (required)",
61
61
  "model": "Model name to use (e.g., 'openai/gpt-4.1')",
62
62
  "base_url": "API base URL (OpenAI-compatible endpoint)",
63
- "role": "Role description for the system prompt (e.g., 'software engineer')",
64
- "system_prompt": "Override the entire system prompt text",
63
+ "role": "Role description for the Agent Profile (e.g., 'software engineer')",
64
+ "system_prompt": "Override the entire Agent Profile prompt text",
65
65
  "temperature": "Sampling temperature (float, e.g., 0.0 - 2.0)",
66
66
  "max_tokens": "Maximum tokens for model response (int)",
67
67
  "use_azure_openai": "Whether to use Azure OpenAI client (default: False)",
68
68
  # Accept template.* keys as valid config keys (for CLI validation, etc.)
69
- "template": "Template context dictionary for prompt rendering (nested)",
69
+ "template": "Template context dictionary for Agent Profile prompt rendering (nested)",
70
70
  # Note: template.* keys are validated dynamically, not statically here
71
71
  }
72
72
 
@@ -97,7 +97,6 @@ class BaseConfig:
97
97
 
98
98
 
99
99
  # Import defaults for reference
100
- from .config_defaults import CONFIG_DEFAULTS
101
100
 
102
101
  class EffectiveConfig:
103
102
  """Read-only merged view of local and global configs"""
@@ -106,7 +105,6 @@ class EffectiveConfig:
106
105
  self.global_cfg = global_cfg
107
106
 
108
107
  def get(self, key, default=None):
109
- from .config_defaults import CONFIG_DEFAULTS
110
108
  for cfg in (self.local_cfg, self.global_cfg):
111
109
  val = cfg.get(key)
112
110
  if val is not None:
@@ -3,8 +3,8 @@ CONFIG_DEFAULTS = {
3
3
  "api_key": None, # Must be set by user
4
4
  "model": "openai/gpt-4.1", # Default model
5
5
  "base_url": "https://openrouter.ai/api/v1",
6
- "role": "software engineer",
7
- "system_prompt": None, # None means auto-generate from role
6
+ "role": "software engineer", # Part of the Agent Profile
7
+ "system_prompt": None, # None means auto-generate from Agent Profile role
8
8
  "temperature": 0.2,
9
9
  "max_tokens": 200000,
10
10
  "use_azure_openai": False,
File without changes
@@ -1,4 +1,7 @@
1
- import json
1
+ from janito.agent.tool_registry import get_tool_schemas, handle_tool_call
2
+ from janito.agent.runtime_config import runtime_config, unified_config
3
+ from rich.console import Console
4
+ import pprint
2
5
 
3
6
  class MaxRoundsExceededError(Exception):
4
7
  pass
@@ -12,24 +15,17 @@ class ProviderError(Exception):
12
15
  super().__init__(message)
13
16
 
14
17
  class ConversationHandler:
15
- def __init__(self, client, model, tool_handler):
18
+ def __init__(self, client, model):
16
19
  self.client = client
17
20
  self.model = model
18
- self.tool_handler = tool_handler
19
21
  self.usage_history = []
20
22
 
21
- def handle_conversation(self, messages, max_rounds=50, on_content=None, on_tool_progress=None, verbose_response=False, spinner=False, max_tokens=None):
22
- from janito.agent.runtime_config import runtime_config
23
+ def handle_conversation(self, messages, max_rounds=50, message_handler=None, verbose_response=False, spinner=False, max_tokens=None):
23
24
  max_tools = runtime_config.get('max_tools', None)
24
25
  tool_calls_made = 0
25
26
  if not messages:
26
27
  raise ValueError("No prompt provided in messages")
27
28
 
28
- from rich.console import Console
29
- console = Console()
30
-
31
- from janito.agent.runtime_config import unified_config
32
-
33
29
  # Resolve max_tokens priority: runtime param > config > default
34
30
  resolved_max_tokens = max_tokens
35
31
  if resolved_max_tokens is None:
@@ -43,32 +39,63 @@ class ConversationHandler:
43
39
 
44
40
  for _ in range(max_rounds):
45
41
  if spinner:
46
- # Calculate word count for all messages
42
+ console = Console()
43
+ # Calculate word count for all messages
47
44
  word_count = sum(len(str(m.get('content', '')).split()) for m in messages if 'content' in m)
48
- spinner_msg = f"[bold green]Waiting for AI response... ({word_count} words in conversation)"
45
+ def format_count(n):
46
+ if n >= 1_000_000:
47
+ return f"{n/1_000_000:.1f}m"
48
+ elif n >= 1_000:
49
+ return f"{n/1_000:.1f}k"
50
+ return str(n)
51
+ # Count message types
52
+ user_msgs = sum(1 for m in messages if m.get('role') == 'user')
53
+ agent_msgs = sum(1 for m in messages if m.get('role') == 'assistant')
54
+ tool_msgs = sum(1 for m in messages if m.get('role') == 'tool')
55
+ # Tool uses: count tool_calls in all agent messages
56
+ tool_uses = sum(len(m.get('tool_calls', [])) for m in messages if m.get('role') == 'assistant')
57
+ # Tool responses: tool_msgs
58
+ spinner_msg = (
59
+ f"[bold green]Waiting for AI response... ("
60
+ f"{format_count(word_count)} words, "
61
+ f"{user_msgs} user, {agent_msgs} agent, "
62
+ f"{tool_uses} tool uses, {tool_msgs} tool responses)"
63
+ )
49
64
  with console.status(spinner_msg, spinner="dots") as status:
65
+ if runtime_config.get('vanilla_mode', False):
66
+ response = self.client.chat.completions.create(
67
+ model=self.model,
68
+ messages=messages,
69
+ max_tokens=resolved_max_tokens
70
+ )
71
+ else:
72
+ response = self.client.chat.completions.create(
73
+ model=self.model,
74
+ messages=messages,
75
+ tools=get_tool_schemas(),
76
+ tool_choice="auto",
77
+ temperature=0.2,
78
+ max_tokens=resolved_max_tokens
79
+ )
80
+ status.stop()
81
+ else:
82
+ if runtime_config.get('vanilla_mode', False):
50
83
  response = self.client.chat.completions.create(
51
84
  model=self.model,
52
85
  messages=messages,
53
- tools=self.tool_handler.get_tool_schemas(),
86
+ max_tokens=resolved_max_tokens
87
+ )
88
+ else:
89
+ response = self.client.chat.completions.create(
90
+ model=self.model,
91
+ messages=messages,
92
+ tools=get_tool_schemas(),
54
93
  tool_choice="auto",
55
94
  temperature=0.2,
56
95
  max_tokens=resolved_max_tokens
57
96
  )
58
- status.stop()
59
- # console.print("\r\033[2K", end="") # Clear the spinner line removed
60
- else:
61
- response = self.client.chat.completions.create(
62
- model=self.model,
63
- messages=messages,
64
- tools=self.tool_handler.get_tool_schemas(),
65
- tool_choice="auto",
66
- temperature=0.2,
67
- max_tokens=resolved_max_tokens
68
- )
69
97
 
70
98
  if verbose_response:
71
- import pprint
72
99
  pprint.pprint(response)
73
100
 
74
101
  # Check for provider errors
@@ -93,34 +120,33 @@ class ConversationHandler:
93
120
  else:
94
121
  usage_info = None
95
122
 
96
- # Call the on_content callback if provided and content is not None
97
- if on_content is not None and choice.message.content is not None:
98
- on_content({"content": choice.message.content})
123
+ # Route content through the unified message handler if provided
124
+ if message_handler is not None and choice.message.content:
125
+ message_handler.handle_message(choice.message.content, msg_type="content")
99
126
 
100
- # If no tool calls, return the assistant's message and usage info
127
+ # If no tool calls, return the agent's message and usage info
101
128
  if not choice.message.tool_calls:
102
- # Store usage info in usage_history, linked to the next assistant message index
103
- assistant_idx = len([m for m in messages if m.get('role') == 'assistant'])
104
- self.usage_history.append({"assistant_index": assistant_idx, "usage": usage_info})
129
+ # Store usage info in usage_history, linked to the next agent message index
130
+ agent_idx = len([m for m in messages if m.get('role') == 'agent'])
131
+ self.usage_history.append({"agent_index": agent_idx, "usage": usage_info})
105
132
  return {
106
133
  "content": choice.message.content,
107
134
  "usage": usage_info,
108
135
  "usage_history": self.usage_history
109
136
  }
110
137
 
111
- from janito.agent.runtime_config import runtime_config
112
138
  tool_responses = []
113
139
  # Sequential tool execution (default, only mode)
114
140
  for tool_call in choice.message.tool_calls:
115
141
  if max_tools is not None and tool_calls_made >= max_tools:
116
142
  raise MaxRoundsExceededError(f"Maximum number of tool calls ({max_tools}) reached in this chat session.")
117
- result = self.tool_handler.handle_tool_call(tool_call, on_progress=on_tool_progress)
143
+ result = handle_tool_call(tool_call, message_handler=message_handler)
118
144
  tool_responses.append({"tool_call_id": tool_call.id, "content": result})
119
145
  tool_calls_made += 1
120
146
 
121
- # Store usage info in usage_history, linked to the next assistant message index
122
- assistant_idx = len([m for m in messages if m.get('role') == 'assistant'])
123
- self.usage_history.append({"assistant_index": assistant_idx, "usage": usage_info})
147
+ # Store usage info in usage_history, linked to the next agent message index
148
+ agent_idx = len([m for m in messages if m.get('role') == 'agent'])
149
+ self.usage_history.append({"agent_index": agent_idx, "usage": usage_info})
124
150
  messages.append({"role": "assistant", "content": choice.message.content, "tool_calls": [tc.to_dict() for tc in choice.message.tool_calls]})
125
151
 
126
152
  for tr in tool_responses:
@@ -0,0 +1,18 @@
1
+
2
+ class MessageHandler:
3
+ def __init__(self, queue, *args, **kwargs):
4
+ self._queue = queue
5
+
6
+ def handle_tool_call(self, tool_call):
7
+ # All output is routed through the unified message handler and queue
8
+ return super().handle_tool_call(tool_call)
9
+
10
+ def handle_message(self, msg, msg_type=None):
11
+ # Unified: send content (agent/LLM) messages to the frontend
12
+ if isinstance(msg, dict):
13
+ msg_type = msg.get('type', 'info')
14
+ message = msg.get('message', '')
15
+ else:
16
+ message = msg
17
+ msg_type = msg_type or 'info'
18
+ self._queue.put(('message', message, msg_type))
@@ -0,0 +1,116 @@
1
+ """
2
+ MUST BE IMPLEMENTED:
3
+ - check that all params found in the signature have documentation in the docstring which is provided in the parameter schema doc
4
+ - the Return must be documented and integrated in the sechema description
5
+ - backward compatibility is not required
6
+ """
7
+
8
+
9
+ import inspect
10
+ import re
11
+ import typing
12
+
13
+ PYTHON_TYPE_TO_JSON = {
14
+ str: "string",
15
+ int: "integer",
16
+ float: "number",
17
+ bool: "boolean",
18
+ list: "array",
19
+ dict: "object",
20
+ }
21
+
22
+ def _parse_docstring(docstring: str):
23
+ """
24
+ Parses a docstring to extract summary, parameter descriptions, and return description.
25
+ Expects Google or NumPy style docstrings.
26
+ Returns: summary, {param: description}, return_description
27
+ """
28
+ if not docstring:
29
+ return "", {}, ""
30
+ lines = docstring.strip().split("\n")
31
+ summary = lines[0].strip()
32
+ param_descs = {}
33
+ return_desc = ""
34
+ in_params = False
35
+ in_returns = False
36
+ for line in lines[1:]:
37
+ l = line.strip()
38
+ if l.lower().startswith(("args:", "parameters:")):
39
+ in_params = True
40
+ in_returns = False
41
+ continue
42
+ if l.lower().startswith("returns:"):
43
+ in_returns = True
44
+ in_params = False
45
+ continue
46
+ if in_params:
47
+ m = re.match(r"([a-zA-Z_][a-zA-Z0-9_]*)(?: \(([^)]+)\))?: (.+)", l)
48
+ if m:
49
+ param, _, desc = m.groups()
50
+ param_descs[param] = desc.strip()
51
+ elif l and l[0] != "-":
52
+ # Continuation of previous param
53
+ if param_descs:
54
+ last = list(param_descs)[-1]
55
+ param_descs[last] += " " + l
56
+ elif in_returns:
57
+ if l:
58
+ return_desc += (" " if return_desc else "") + l
59
+ return summary, param_descs, return_desc
60
+
61
+ def _type_to_json_schema(tp):
62
+ # Handle typing.Optional, typing.Union, typing.List, etc.
63
+ origin = typing.get_origin(tp)
64
+ args = typing.get_args(tp)
65
+ if origin is None:
66
+ return {"type": PYTHON_TYPE_TO_JSON.get(tp, "string")}
67
+ if origin is list or origin is typing.List:
68
+ item_type = args[0] if args else str
69
+ return {"type": "array", "items": _type_to_json_schema(item_type)}
70
+ if origin is dict or origin is typing.Dict:
71
+ return {"type": "object"}
72
+ if origin is typing.Union:
73
+ # Optional[...] is Union[..., NoneType]
74
+ non_none = [a for a in args if a is not type(None)]
75
+ if len(non_none) == 1:
76
+ return _type_to_json_schema(non_none[0])
77
+ # Otherwise, fallback
78
+ return {"type": "string"}
79
+ return {"type": "string"}
80
+
81
+ def generate_openai_function_schema(func, tool_name: str):
82
+ """
83
+ Generates an OpenAI-compatible function schema for a callable.
84
+ Raises ValueError if the return type is not explicitly str.
85
+ """
86
+ sig = inspect.signature(func)
87
+ # Enforce explicit str return type
88
+ if sig.return_annotation is inspect._empty or sig.return_annotation is not str:
89
+ raise ValueError(f"Tool '{tool_name}' must have an explicit return type of 'str'. Found: {sig.return_annotation}")
90
+ docstring = func.__doc__
91
+ summary, param_descs, _ = _parse_docstring(docstring)
92
+ # Check that all parameters in the signature have documentation
93
+ undocumented = [name for name, param in sig.parameters.items() if name != "self" and name not in param_descs]
94
+ if undocumented:
95
+ raise ValueError(f"Tool '{tool_name}' is missing docstring documentation for parameter(s): {', '.join(undocumented)}")
96
+ properties = {}
97
+ required = []
98
+ for name, param in sig.parameters.items():
99
+ if name == "self":
100
+ continue
101
+ annotation = param.annotation if param.annotation != inspect._empty else str
102
+ pdesc = param_descs.get(name, "")
103
+ schema = _type_to_json_schema(annotation)
104
+ schema["description"] = pdesc
105
+ properties[name] = schema
106
+ if param.default == inspect._empty:
107
+ required.append(name)
108
+ return {
109
+ "name": tool_name,
110
+ "description": summary,
111
+ "parameters": {
112
+ "type": "object",
113
+ "properties": properties,
114
+ "required": required,
115
+ }
116
+ }
@@ -0,0 +1,32 @@
1
+ class QueuedMessageHandler:
2
+ def __init__(self, queue, *args, **kwargs):
3
+ self._queue = queue
4
+
5
+ def handle_message(self, msg, msg_type=None):
6
+ # Unified: send content (agent/LLM) messages to the frontend via queue
7
+ if isinstance(msg, dict):
8
+ msg_type = msg.get('type', 'info')
9
+ # For tool_call and tool_result, print and forward the full dict
10
+ if msg_type in ("tool_call", "tool_result"):
11
+ print(f"[QueuedMessageHandler] {msg_type}: {msg}")
12
+ self._queue.put(msg)
13
+ return
14
+ message = msg.get('message', '')
15
+ else:
16
+ message = msg
17
+ msg_type = msg_type or 'info'
18
+ # For normal agent/user/info messages, emit type 'content' for frontend compatibility
19
+ print(f"[QueuedMessageHandler] {msg_type}: {message}")
20
+ if msg_type == "content":
21
+ self._queue.put({"type": "content", "content": message})
22
+ elif msg_type == "info":
23
+ out = {"type": "info", "message": message}
24
+ if 'tool' in msg:
25
+ out["tool"] = msg["tool"]
26
+ self._queue.put(out)
27
+ else:
28
+ out = {"type": msg_type, "message": message}
29
+ if 'tool' in msg:
30
+ out["tool"] = msg["tool"]
31
+ self._queue.put(out)
32
+
@@ -0,0 +1,43 @@
1
+ from rich.console import Console
2
+ console = Console()
3
+
4
+ class MessageHandler:
5
+ """
6
+ Unified message handler for all output (tool, agent, system) using Rich for styled output.
7
+ """
8
+ def __init__(self):
9
+ self.console = console
10
+
11
+ def handle_message(self, msg, msg_type=None):
12
+ """
13
+ Handles either a dict (with 'type' and 'message') or a plain string.
14
+ If dict: uses type/message. If str: uses msg_type or defaults to 'info'.
15
+ """
16
+ from rich.markdown import Markdown
17
+ if isinstance(msg, dict):
18
+ msg_type = msg.get("type", "info")
19
+ message = msg.get("message", "")
20
+ if msg_type == "content":
21
+ self.console.print(Markdown(message))
22
+ elif msg_type == "info":
23
+ self.console.print(message, style="cyan", end="")
24
+ elif msg_type == "success":
25
+ self.console.print(message, style="bold green", end="\n")
26
+ elif msg_type == "error":
27
+ self.console.print(message, style="bold red", end="\n")
28
+ elif msg_type == "progress":
29
+ self._handle_progress(message)
30
+ elif msg_type == "warning":
31
+ self.console.print(message, style="bold yellow", end="\n")
32
+ elif msg_type == "stdout":
33
+ from rich.text import Text
34
+ self.console.print(Text(message, style="on #003300", no_wrap=True, overflow=None), end="")
35
+ elif msg_type == "stderr":
36
+ from rich.text import Text
37
+ self.console.print(Text(message, style="on #330000", no_wrap=True, overflow=None), end="")
38
+ else:
39
+ # Ignore unsupported message types silently
40
+ return
41
+ else:
42
+ # Print plain strings as markdown/markup
43
+ self.console.print(Markdown(str(msg)))
@@ -1,4 +1,4 @@
1
- from .config import BaseConfig, EffectiveConfig, effective_config
1
+ from .config import BaseConfig, effective_config
2
2
 
3
3
  class RuntimeConfig(BaseConfig):
4
4
  """In-memory only config, reset on restart"""
@@ -1,9 +1,14 @@
1
- Your main role is {{ role }} .
1
+ Your Agent Profile:
2
+ - Role: {{ role }}
2
3
 
3
- You are an assistant for a analysis and development tool that operates on files and
4
- directories using text-based operations.
4
+ You are an Agent for an analysis and development tool that operates on files and directories using text-based operations.
5
+
6
+ <agent_profile>
7
+ This Agent operates according to its Agent Profile, which includes system settings, role, and tools.
8
+ </agent_profile>
5
9
 
6
10
  Provide a concise plan before calling any tool.
11
+ Plan changes only after gathering all the necessary information.
7
12
  Always execute the plan immediately after presenting it, unless the user requests otherwise.
8
13
 
9
14
  <context>
@@ -23,10 +28,11 @@ When you need to make changes to a file, consider the following:
23
28
  - It is preferred to replace exact text occurrences over file overwriting.
24
29
  - When replacing files, review their current content before requesting the update.
25
30
  - When reorganizing, moving files, or functions, search for references in other files that might need to be updated accordingly.
31
+ - After making changes to files, use available tools (such as syntax checkers, linters, or test runners) to validate the files and ensure correctness before proceeding.
26
32
  </editing>
27
33
 
28
34
  <finishing>
29
- - When asked to commit and no message is provided, check the git diff and summarize the changes in the commit message.
35
+ - When asked to commit, check the git diff and summarize the changes in the commit message.
30
36
  - Review the README content if there are user-exposed or public API changes.
31
37
  - Update `README_structure.txt` considering discovered, created, or modified files.
32
38
  </finishing>
@@ -0,0 +1,92 @@
1
+ # janito/agent/tool_registry.py
2
+ import json
3
+ from janito.agent.tools.tool_base import ToolBase
4
+
5
+ from janito.agent.openai_schema_generator import generate_openai_function_schema
6
+
7
+ _tool_registry = {}
8
+
9
+ def register_tool(tool=None, *, name: str = None):
10
+ if tool is None:
11
+ return lambda t: register_tool(t, name=name)
12
+ override_name = name
13
+ if not (isinstance(tool, type) and issubclass(tool, ToolBase)):
14
+ raise TypeError("Tool must be a class derived from ToolBase.")
15
+ instance = tool()
16
+ func = instance.call
17
+ default_name = tool.__name__
18
+ tool_name = override_name or default_name
19
+ schema = generate_openai_function_schema(func, tool_name)
20
+
21
+
22
+ _tool_registry[tool_name] = {
23
+ "function": func,
24
+ "description": schema["description"],
25
+ "parameters": schema["parameters"]
26
+ }
27
+ return tool
28
+
29
+ def get_tool_schemas():
30
+ schemas = []
31
+ for name, entry in _tool_registry.items():
32
+ schemas.append({
33
+ "type": "function",
34
+ "function": {
35
+ "name": name,
36
+ "description": entry["description"],
37
+ "parameters": entry["parameters"]
38
+ }
39
+ })
40
+ return schemas
41
+
42
+ def handle_tool_call(tool_call, message_handler=None, verbose=False):
43
+ import uuid
44
+ call_id = getattr(tool_call, 'id', None) or str(uuid.uuid4())
45
+ tool_entry = _tool_registry.get(tool_call.function.name)
46
+ if not tool_entry:
47
+ return f"Unknown tool: {tool_call.function.name}"
48
+ func = tool_entry["function"]
49
+ args = json.loads(tool_call.function.arguments)
50
+ if verbose:
51
+ print(f"[Tool Call] {tool_call.function.name} called with arguments: {args}")
52
+ instance = None
53
+ if hasattr(func, '__self__') and isinstance(func.__self__, ToolBase):
54
+ instance = func.__self__
55
+ if message_handler:
56
+ instance._progress_callback = message_handler.handle_message
57
+ # Emit tool_call event before calling the tool
58
+ if message_handler:
59
+ message_handler.handle_message({
60
+ 'type': 'tool_call',
61
+ 'tool': tool_call.function.name,
62
+ 'args': args,
63
+ 'call_id': call_id
64
+ })
65
+ try:
66
+ result = func(**args)
67
+ except Exception as e:
68
+ import traceback # Kept here: only needed on error
69
+ error_message = f"[Tool Error] {type(e).__name__}: {e}\n" + traceback.format_exc()
70
+ if message_handler:
71
+ message_handler.handle_message({'type': 'error', 'message': error_message})
72
+ result = error_message
73
+ # Emit tool_result event after tool execution
74
+ if message_handler:
75
+ message_handler.handle_message({
76
+ 'type': 'tool_result',
77
+ 'tool': tool_call.function.name,
78
+ 'call_id': call_id,
79
+ 'result': result
80
+ })
81
+ if verbose:
82
+ preview = result
83
+ if isinstance(result, str):
84
+ lines = result.splitlines()
85
+ if len(lines) > 10:
86
+ preview = "\n".join(lines[:10]) + "\n... (truncated)"
87
+ elif len(result) > 500:
88
+ preview = result[:500] + "... (truncated)"
89
+ print(f"[Tool Result] {tool_call.function.name} returned:\n{preview}")
90
+ if instance is not None:
91
+ instance._progress_callback = None
92
+ return result