janito 1.5.2__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janito/__init__.py +1 -1
- janito/__main__.py +0 -1
- janito/agent/config.py +11 -10
- janito/agent/config_defaults.py +3 -2
- janito/agent/conversation.py +93 -119
- janito/agent/conversation_api.py +98 -0
- janito/agent/conversation_exceptions.py +12 -0
- janito/agent/conversation_tool_calls.py +22 -0
- janito/agent/conversation_ui.py +17 -0
- janito/agent/message_handler.py +8 -9
- janito/agent/{agent.py → openai_client.py} +48 -16
- janito/agent/openai_schema_generator.py +53 -37
- janito/agent/profile_manager.py +172 -0
- janito/agent/queued_message_handler.py +13 -14
- janito/agent/rich_live.py +32 -0
- janito/agent/rich_message_handler.py +64 -0
- janito/agent/runtime_config.py +6 -1
- janito/agent/{tools/tool_base.py → tool_base.py} +15 -8
- janito/agent/tool_registry.py +118 -132
- janito/agent/tools/__init__.py +41 -2
- janito/agent/tools/ask_user.py +43 -33
- janito/agent/tools/create_directory.py +18 -16
- janito/agent/tools/create_file.py +31 -36
- janito/agent/tools/fetch_url.py +23 -19
- janito/agent/tools/find_files.py +40 -36
- janito/agent/tools/get_file_outline.py +100 -22
- janito/agent/tools/get_lines.py +40 -32
- janito/agent/tools/gitignore_utils.py +9 -6
- janito/agent/tools/move_file.py +22 -13
- janito/agent/tools/py_compile_file.py +40 -0
- janito/agent/tools/remove_directory.py +34 -24
- janito/agent/tools/remove_file.py +22 -20
- janito/agent/tools/replace_file.py +51 -0
- janito/agent/tools/replace_text_in_file.py +69 -42
- janito/agent/tools/rich_live.py +9 -2
- janito/agent/tools/run_bash_command.py +155 -107
- janito/agent/tools/run_python_command.py +139 -0
- janito/agent/tools/search_files.py +51 -34
- janito/agent/tools/tools_utils.py +4 -2
- janito/agent/tools/utils.py +6 -2
- janito/cli/_print_config.py +42 -16
- janito/cli/_utils.py +1 -0
- janito/cli/arg_parser.py +182 -29
- janito/cli/config_commands.py +54 -22
- janito/cli/logging_setup.py +9 -3
- janito/cli/main.py +11 -10
- janito/cli/runner/__init__.py +2 -0
- janito/cli/runner/cli_main.py +148 -0
- janito/cli/runner/config.py +33 -0
- janito/cli/runner/formatting.py +12 -0
- janito/cli/runner/scan.py +44 -0
- janito/cli_chat_shell/__init__.py +0 -1
- janito/cli_chat_shell/chat_loop.py +71 -92
- janito/cli_chat_shell/chat_state.py +38 -0
- janito/cli_chat_shell/chat_ui.py +43 -0
- janito/cli_chat_shell/commands/__init__.py +45 -0
- janito/cli_chat_shell/commands/config.py +22 -0
- janito/cli_chat_shell/commands/history_reset.py +29 -0
- janito/cli_chat_shell/commands/session.py +48 -0
- janito/cli_chat_shell/commands/session_control.py +12 -0
- janito/cli_chat_shell/commands/system.py +73 -0
- janito/cli_chat_shell/commands/utility.py +29 -0
- janito/cli_chat_shell/config_shell.py +39 -10
- janito/cli_chat_shell/load_prompt.py +5 -2
- janito/cli_chat_shell/session_manager.py +24 -27
- janito/cli_chat_shell/ui.py +75 -40
- janito/rich_utils.py +15 -2
- janito/web/__main__.py +10 -2
- janito/web/app.py +88 -52
- {janito-1.5.2.dist-info → janito-1.6.0.dist-info}/METADATA +76 -11
- janito-1.6.0.dist-info/RECORD +81 -0
- {janito-1.5.2.dist-info → janito-1.6.0.dist-info}/WHEEL +1 -1
- janito/agent/rich_tool_handler.py +0 -43
- janito/agent/templates/system_instructions.j2 +0 -38
- janito/agent/tool_auto_imports.py +0 -5
- janito/agent/tools/append_text_to_file.py +0 -41
- janito/agent/tools/py_compile.py +0 -39
- janito/agent/tools/python_exec.py +0 -83
- janito/cli/runner.py +0 -137
- janito/cli_chat_shell/commands.py +0 -204
- janito/render_prompt.py +0 -13
- janito-1.5.2.dist-info/RECORD +0 -66
- {janito-1.5.2.dist-info → janito-1.6.0.dist-info}/entry_points.txt +0 -0
- {janito-1.5.2.dist-info → janito-1.6.0.dist-info}/licenses/LICENSE +0 -0
- {janito-1.5.2.dist-info → janito-1.6.0.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,9 @@
|
|
2
2
|
|
3
3
|
import time
|
4
4
|
from openai import OpenAI
|
5
|
-
from janito.agent.conversation import ConversationHandler
|
5
|
+
from janito.agent.conversation import ConversationHandler
|
6
|
+
from janito.agent.conversation_exceptions import ProviderError
|
7
|
+
|
6
8
|
|
7
9
|
class Agent:
|
8
10
|
"""Agent capable of handling conversations and tool calls."""
|
@@ -14,11 +16,11 @@ class Agent:
|
|
14
16
|
self,
|
15
17
|
api_key: str,
|
16
18
|
model: str = None,
|
17
|
-
|
19
|
+
system_prompt_template: str | None = None,
|
18
20
|
verbose_tools: bool = False,
|
19
21
|
base_url: str = "https://openrouter.ai/api/v1",
|
20
22
|
azure_openai_api_version: str = "2023-05-15",
|
21
|
-
use_azure_openai: bool = False
|
23
|
+
use_azure_openai: bool = False,
|
22
24
|
):
|
23
25
|
"""
|
24
26
|
Initialize Agent.
|
@@ -26,7 +28,7 @@ class Agent:
|
|
26
28
|
Args:
|
27
29
|
api_key: API key for OpenAI-compatible service.
|
28
30
|
model: Model name to use.
|
29
|
-
|
31
|
+
system_prompt_template: Optional system prompt override.
|
30
32
|
verbose_tools: Enable verbose tool call logging.
|
31
33
|
base_url: API base URL.
|
32
34
|
azure_openai_api_version: Azure OpenAI API version (default: "2023-05-15").
|
@@ -34,10 +36,11 @@ class Agent:
|
|
34
36
|
"""
|
35
37
|
self.api_key = api_key
|
36
38
|
self.model = model
|
37
|
-
self.
|
39
|
+
self.system_prompt_template = system_prompt_template
|
38
40
|
if use_azure_openai:
|
39
41
|
# Import inside conditional to avoid requiring AzureOpenAI unless needed
|
40
42
|
from openai import AzureOpenAI
|
43
|
+
|
41
44
|
self.client = AzureOpenAI(
|
42
45
|
api_key=api_key,
|
43
46
|
azure_endpoint=base_url,
|
@@ -47,22 +50,45 @@ class Agent:
|
|
47
50
|
self.client = OpenAI(
|
48
51
|
base_url=base_url,
|
49
52
|
api_key=api_key,
|
50
|
-
default_headers={
|
51
|
-
"HTTP-Referer": self.REFERER,
|
52
|
-
"X-Title": self.TITLE
|
53
|
-
}
|
53
|
+
default_headers={"HTTP-Referer": self.REFERER, "X-Title": self.TITLE},
|
54
54
|
)
|
55
55
|
|
56
56
|
self.conversation_handler = ConversationHandler(
|
57
|
-
self.client,
|
57
|
+
self.client,
|
58
|
+
self.model,
|
58
59
|
)
|
59
60
|
|
60
61
|
@property
|
61
62
|
def usage_history(self):
|
62
63
|
return self.conversation_handler.usage_history
|
63
64
|
|
64
|
-
def chat(
|
65
|
+
def chat(
|
66
|
+
self,
|
67
|
+
messages,
|
68
|
+
message_handler=None,
|
69
|
+
verbose_response=False,
|
70
|
+
spinner=False,
|
71
|
+
max_tokens=None,
|
72
|
+
max_rounds=50,
|
73
|
+
verbose_events=False,
|
74
|
+
stream=False,
|
75
|
+
):
|
76
|
+
"""
|
77
|
+
Start a chat conversation with the agent.
|
65
78
|
|
79
|
+
Args:
|
80
|
+
messages: List of message dicts.
|
81
|
+
message_handler: Optional handler for streaming or event messages.
|
82
|
+
verbose_response: Print full response for debugging.
|
83
|
+
spinner: Show spinner during request.
|
84
|
+
max_tokens: Max tokens for completion.
|
85
|
+
max_rounds: Max conversation rounds.
|
86
|
+
verbose_events: Print all events for debugging.
|
87
|
+
stream: If True, enable OpenAI streaming mode (yields tokens incrementally).
|
88
|
+
Returns:
|
89
|
+
If stream=False: dict with 'content', 'usage', and 'usage_history'.
|
90
|
+
If stream=True: generator yielding content chunks or events.
|
91
|
+
"""
|
66
92
|
max_retries = 5
|
67
93
|
for attempt in range(1, max_retries + 1):
|
68
94
|
try:
|
@@ -72,21 +98,27 @@ class Agent:
|
|
72
98
|
message_handler=message_handler,
|
73
99
|
verbose_response=verbose_response,
|
74
100
|
spinner=spinner,
|
75
|
-
max_tokens=max_tokens
|
101
|
+
max_tokens=max_tokens,
|
102
|
+
verbose_events=verbose_events,
|
103
|
+
stream=stream,
|
76
104
|
)
|
77
105
|
except ProviderError as e:
|
78
|
-
error_data = getattr(e,
|
79
|
-
code = error_data.get(
|
106
|
+
error_data = getattr(e, "error_data", {}) or {}
|
107
|
+
code = error_data.get("code", "")
|
80
108
|
# Retry only on 5xx errors
|
81
109
|
if isinstance(code, int) and 500 <= code < 600:
|
82
110
|
pass
|
83
|
-
elif
|
111
|
+
elif (
|
112
|
+
isinstance(code, str) and code.isdigit() and 500 <= int(code) < 600
|
113
|
+
):
|
84
114
|
code = int(code)
|
85
115
|
else:
|
86
116
|
raise
|
87
117
|
|
88
118
|
if attempt < max_retries:
|
89
|
-
print(
|
119
|
+
print(
|
120
|
+
f"ProviderError with 5xx code encountered (attempt {attempt}/{max_retries}). Retrying in 5 seconds..."
|
121
|
+
)
|
90
122
|
time.sleep(5)
|
91
123
|
else:
|
92
124
|
print("Max retries reached. Raising error.")
|
@@ -5,7 +5,6 @@ MUST BE IMPLEMENTED:
|
|
5
5
|
- backward compatibility is not required
|
6
6
|
"""
|
7
7
|
|
8
|
-
|
9
8
|
import inspect
|
10
9
|
import re
|
11
10
|
import typing
|
@@ -19,10 +18,23 @@ PYTHON_TYPE_TO_JSON = {
|
|
19
18
|
dict: "object",
|
20
19
|
}
|
21
20
|
|
21
|
+
|
22
|
+
def _type_to_json_schema(annotation):
|
23
|
+
if hasattr(annotation, "__origin__"):
|
24
|
+
if annotation.__origin__ is list or annotation.__origin__ is typing.List:
|
25
|
+
return {
|
26
|
+
"type": "array",
|
27
|
+
"items": _type_to_json_schema(annotation.__args__[0]),
|
28
|
+
}
|
29
|
+
if annotation.__origin__ is dict or annotation.__origin__ is typing.Dict:
|
30
|
+
return {"type": "object"}
|
31
|
+
return {"type": PYTHON_TYPE_TO_JSON.get(annotation, "string")}
|
32
|
+
|
33
|
+
|
22
34
|
def _parse_docstring(docstring: str):
|
23
35
|
"""
|
24
36
|
Parses a docstring to extract summary, parameter descriptions, and return description.
|
25
|
-
|
37
|
+
Accepts Google, NumPy, and relaxed formats.
|
26
38
|
Returns: summary, {param: description}, return_description
|
27
39
|
"""
|
28
40
|
if not docstring:
|
@@ -33,52 +45,44 @@ def _parse_docstring(docstring: str):
|
|
33
45
|
return_desc = ""
|
34
46
|
in_params = False
|
35
47
|
in_returns = False
|
48
|
+
param_section_headers = ("args", "arguments", "params", "parameters")
|
36
49
|
for line in lines[1:]:
|
37
|
-
|
38
|
-
if
|
50
|
+
stripped_line = line.strip()
|
51
|
+
if any(
|
52
|
+
stripped_line.lower().startswith(h + ":") or stripped_line.lower() == h
|
53
|
+
for h in param_section_headers
|
54
|
+
):
|
39
55
|
in_params = True
|
40
56
|
in_returns = False
|
41
57
|
continue
|
42
|
-
if
|
58
|
+
if (
|
59
|
+
stripped_line.lower().startswith("returns:")
|
60
|
+
or stripped_line.lower() == "returns"
|
61
|
+
):
|
43
62
|
in_returns = True
|
44
63
|
in_params = False
|
45
64
|
continue
|
46
65
|
if in_params:
|
47
|
-
|
66
|
+
# Accept: name: desc, name (type): desc, name - desc, name desc
|
67
|
+
m = re.match(
|
68
|
+
r"([a-zA-Z_][a-zA-Z0-9_]*)\s*(?:\(([^)]+)\))?\s*[:\-]?\s*(.+)",
|
69
|
+
stripped_line,
|
70
|
+
)
|
48
71
|
if m:
|
49
72
|
param, _, desc = m.groups()
|
50
73
|
param_descs[param] = desc.strip()
|
51
|
-
elif
|
74
|
+
elif stripped_line and stripped_line[0] != "-":
|
52
75
|
# Continuation of previous param
|
53
76
|
if param_descs:
|
54
77
|
last = list(param_descs)[-1]
|
55
|
-
param_descs[last] += " " +
|
78
|
+
param_descs[last] += " " + stripped_line
|
56
79
|
elif in_returns:
|
57
|
-
if
|
58
|
-
return_desc += (" " if return_desc else "") +
|
80
|
+
if stripped_line:
|
81
|
+
return_desc += (" " if return_desc else "") + stripped_line
|
59
82
|
return summary, param_descs, return_desc
|
60
83
|
|
61
|
-
def _type_to_json_schema(tp):
|
62
|
-
# Handle typing.Optional, typing.Union, typing.List, etc.
|
63
|
-
origin = typing.get_origin(tp)
|
64
|
-
args = typing.get_args(tp)
|
65
|
-
if origin is None:
|
66
|
-
return {"type": PYTHON_TYPE_TO_JSON.get(tp, "string")}
|
67
|
-
if origin is list or origin is typing.List:
|
68
|
-
item_type = args[0] if args else str
|
69
|
-
return {"type": "array", "items": _type_to_json_schema(item_type)}
|
70
|
-
if origin is dict or origin is typing.Dict:
|
71
|
-
return {"type": "object"}
|
72
|
-
if origin is typing.Union:
|
73
|
-
# Optional[...] is Union[..., NoneType]
|
74
|
-
non_none = [a for a in args if a is not type(None)]
|
75
|
-
if len(non_none) == 1:
|
76
|
-
return _type_to_json_schema(non_none[0])
|
77
|
-
# Otherwise, fallback
|
78
|
-
return {"type": "string"}
|
79
|
-
return {"type": "string"}
|
80
84
|
|
81
|
-
def generate_openai_function_schema(func, tool_name: str):
|
85
|
+
def generate_openai_function_schema(func, tool_name: str, tool_class=None):
|
82
86
|
"""
|
83
87
|
Generates an OpenAI-compatible function schema for a callable.
|
84
88
|
Raises ValueError if the return type is not explicitly str.
|
@@ -86,13 +90,25 @@ def generate_openai_function_schema(func, tool_name: str):
|
|
86
90
|
sig = inspect.signature(func)
|
87
91
|
# Enforce explicit str return type
|
88
92
|
if sig.return_annotation is inspect._empty or sig.return_annotation is not str:
|
89
|
-
raise ValueError(
|
90
|
-
|
91
|
-
|
93
|
+
raise ValueError(
|
94
|
+
f"Tool '{tool_name}' must have an explicit return type of 'str'. Found: {sig.return_annotation}"
|
95
|
+
)
|
96
|
+
# Only use the class docstring for schema generation
|
97
|
+
class_doc = tool_class.__doc__.strip() if tool_class and tool_class.__doc__ else ""
|
98
|
+
summary, param_descs, return_desc = _parse_docstring(class_doc)
|
99
|
+
description = summary
|
100
|
+
if return_desc:
|
101
|
+
description += f"\n\nReturns: {return_desc}"
|
92
102
|
# Check that all parameters in the signature have documentation
|
93
|
-
undocumented = [
|
103
|
+
undocumented = [
|
104
|
+
name
|
105
|
+
for name, param in sig.parameters.items()
|
106
|
+
if name != "self" and name not in param_descs
|
107
|
+
]
|
94
108
|
if undocumented:
|
95
|
-
raise ValueError(
|
109
|
+
raise ValueError(
|
110
|
+
f"Tool '{tool_name}' is missing docstring documentation for parameter(s): {', '.join(undocumented)}"
|
111
|
+
)
|
96
112
|
properties = {}
|
97
113
|
required = []
|
98
114
|
for name, param in sig.parameters.items():
|
@@ -107,10 +123,10 @@ def generate_openai_function_schema(func, tool_name: str):
|
|
107
123
|
required.append(name)
|
108
124
|
return {
|
109
125
|
"name": tool_name,
|
110
|
-
"description":
|
126
|
+
"description": description,
|
111
127
|
"parameters": {
|
112
128
|
"type": "object",
|
113
129
|
"properties": properties,
|
114
130
|
"required": required,
|
115
|
-
}
|
131
|
+
},
|
116
132
|
}
|
@@ -0,0 +1,172 @@
|
|
1
|
+
from janito.agent.conversation import ConversationHandler
|
2
|
+
from openai import OpenAI
|
3
|
+
import jinja2
|
4
|
+
from pathlib import Path
|
5
|
+
import platform
|
6
|
+
import os
|
7
|
+
|
8
|
+
|
9
|
+
class AgentProfileManager:
|
10
|
+
REFERER = "www.janito.dev"
|
11
|
+
TITLE = "Janito"
|
12
|
+
|
13
|
+
def parse_style_string(self, style: str):
|
14
|
+
if "-" in style:
|
15
|
+
parts = style.split("-")
|
16
|
+
return parts[0], parts[1:]
|
17
|
+
return style, []
|
18
|
+
|
19
|
+
def get_platform_name(self):
|
20
|
+
sys_platform = platform.system().lower()
|
21
|
+
if sys_platform.startswith("win"):
|
22
|
+
return "windows"
|
23
|
+
elif sys_platform.startswith("linux"):
|
24
|
+
return "linux"
|
25
|
+
elif sys_platform.startswith("darwin"):
|
26
|
+
return "darwin"
|
27
|
+
return sys_platform
|
28
|
+
|
29
|
+
def get_python_version(self):
|
30
|
+
return platform.python_version()
|
31
|
+
|
32
|
+
def get_shell_info(self):
|
33
|
+
shell = os.environ.get("SHELL")
|
34
|
+
term = os.environ.get("TERM")
|
35
|
+
term_program = os.environ.get("TERM_PROGRAM")
|
36
|
+
if shell:
|
37
|
+
info = shell
|
38
|
+
elif os.environ.get("MSYSTEM"):
|
39
|
+
info = f"Git Bash ({os.environ.get('MSYSTEM')})"
|
40
|
+
elif os.environ.get("WSL_DISTRO_NAME"):
|
41
|
+
info = f"WSL ({os.environ.get('WSL_DISTRO_NAME')})"
|
42
|
+
else:
|
43
|
+
comspec = os.environ.get("COMSPEC")
|
44
|
+
if comspec:
|
45
|
+
if "powershell" in comspec.lower():
|
46
|
+
info = "PowerShell"
|
47
|
+
elif "cmd" in comspec.lower():
|
48
|
+
info = "cmd.exe"
|
49
|
+
else:
|
50
|
+
info = "Unknown shell"
|
51
|
+
else:
|
52
|
+
info = "Unknown shell"
|
53
|
+
if term:
|
54
|
+
info += f", TERM={term}"
|
55
|
+
if term_program and term_program.lower() == "vscode":
|
56
|
+
info += ", running in VSCode"
|
57
|
+
home_dir = os.path.expanduser("~")
|
58
|
+
if home_dir:
|
59
|
+
info += f", HOME={home_dir}"
|
60
|
+
return info
|
61
|
+
return "unknown"
|
62
|
+
|
63
|
+
def render_prompt(self):
|
64
|
+
main_style, features = self.parse_style_string(self.interaction_style)
|
65
|
+
base_dir = Path(__file__).parent / "templates"
|
66
|
+
profiles_dir = base_dir / "profiles"
|
67
|
+
features_dir = base_dir / "features"
|
68
|
+
loader = jinja2.ChoiceLoader(
|
69
|
+
[
|
70
|
+
jinja2.FileSystemLoader(str(profiles_dir)),
|
71
|
+
jinja2.FileSystemLoader(str(features_dir)),
|
72
|
+
]
|
73
|
+
)
|
74
|
+
env = jinja2.Environment(loader=loader)
|
75
|
+
if main_style == "technical":
|
76
|
+
main_template = "system_prompt_template_technical.j2"
|
77
|
+
else:
|
78
|
+
main_template = "system_prompt_template.j2"
|
79
|
+
platform_name = self.get_platform_name()
|
80
|
+
python_version = self.get_python_version()
|
81
|
+
shell_info = self.get_shell_info()
|
82
|
+
if not features:
|
83
|
+
# Inject tech.txt existence and content
|
84
|
+
tech_txt_path = Path(".janito") / "tech.txt"
|
85
|
+
tech_txt_exists = tech_txt_path.exists()
|
86
|
+
tech_txt_content = ""
|
87
|
+
if tech_txt_exists:
|
88
|
+
try:
|
89
|
+
tech_txt_content = tech_txt_path.read_text(encoding="utf-8")
|
90
|
+
except Exception:
|
91
|
+
tech_txt_content = "⚠️ Error reading janito/tech.txt."
|
92
|
+
template = env.get_template(main_template)
|
93
|
+
return template.render(
|
94
|
+
role=self.role,
|
95
|
+
interaction_mode=self.interaction_mode,
|
96
|
+
platform=platform_name,
|
97
|
+
python_version=python_version,
|
98
|
+
shell_info=shell_info,
|
99
|
+
tech_txt_exists=tech_txt_exists,
|
100
|
+
tech_txt_content=tech_txt_content,
|
101
|
+
)
|
102
|
+
parent_template = main_template
|
103
|
+
# Inject tech.txt existence and content for feature templates as well
|
104
|
+
tech_txt_path = Path(".janito") / "tech.txt"
|
105
|
+
tech_txt_exists = tech_txt_path.exists()
|
106
|
+
tech_txt_content = ""
|
107
|
+
if tech_txt_exists:
|
108
|
+
try:
|
109
|
+
tech_txt_content = tech_txt_path.read_text(encoding="utf-8")
|
110
|
+
except Exception:
|
111
|
+
tech_txt_content = "⚠️ Error reading janito/tech.txt."
|
112
|
+
context = {
|
113
|
+
"role": self.role,
|
114
|
+
"interaction_mode": self.interaction_mode,
|
115
|
+
"platform": platform_name,
|
116
|
+
"python_version": python_version,
|
117
|
+
"shell_info": shell_info,
|
118
|
+
"tech_txt_exists": tech_txt_exists,
|
119
|
+
"tech_txt_content": tech_txt_content,
|
120
|
+
}
|
121
|
+
for feature in features:
|
122
|
+
feature_template = f"system_prompt_template_{feature}.j2"
|
123
|
+
template = env.get_template(feature_template)
|
124
|
+
context["parent_template"] = parent_template
|
125
|
+
rendered = template.render(**context)
|
126
|
+
parent_template = feature_template
|
127
|
+
return rendered
|
128
|
+
|
129
|
+
def __init__(
|
130
|
+
self,
|
131
|
+
api_key,
|
132
|
+
model,
|
133
|
+
role,
|
134
|
+
interaction_style,
|
135
|
+
interaction_mode,
|
136
|
+
verbose_tools,
|
137
|
+
base_url,
|
138
|
+
azure_openai_api_version,
|
139
|
+
use_azure_openai,
|
140
|
+
):
|
141
|
+
self.api_key = api_key
|
142
|
+
self.model = model
|
143
|
+
self.role = role
|
144
|
+
self.interaction_style = interaction_style
|
145
|
+
self.interaction_mode = interaction_mode
|
146
|
+
self.verbose_tools = verbose_tools
|
147
|
+
self.base_url = base_url
|
148
|
+
self.azure_openai_api_version = azure_openai_api_version
|
149
|
+
self.use_azure_openai = use_azure_openai
|
150
|
+
if use_azure_openai:
|
151
|
+
from openai import AzureOpenAI
|
152
|
+
|
153
|
+
self.client = AzureOpenAI(
|
154
|
+
api_key=api_key,
|
155
|
+
azure_endpoint=base_url,
|
156
|
+
api_version=azure_openai_api_version,
|
157
|
+
)
|
158
|
+
else:
|
159
|
+
self.client = OpenAI(
|
160
|
+
base_url=base_url,
|
161
|
+
api_key=api_key,
|
162
|
+
default_headers={"HTTP-Referer": self.REFERER, "X-Title": self.TITLE},
|
163
|
+
)
|
164
|
+
self.agent = ConversationHandler(self.client, model)
|
165
|
+
self.system_prompt_template = None
|
166
|
+
|
167
|
+
def refresh_prompt(self):
|
168
|
+
self.system_prompt_template = self.render_prompt()
|
169
|
+
self.agent.system_prompt_template = self.system_prompt_template
|
170
|
+
|
171
|
+
|
172
|
+
# All prompt rendering is now handled by AgentProfileManager.
|
@@ -4,29 +4,28 @@ class QueuedMessageHandler:
|
|
4
4
|
|
5
5
|
def handle_message(self, msg, msg_type=None):
|
6
6
|
# Unified: send content (agent/LLM) messages to the frontend via queue
|
7
|
-
if isinstance(msg, dict):
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
7
|
+
if not isinstance(msg, dict):
|
8
|
+
raise TypeError(
|
9
|
+
f"QueuedMessageHandler.handle_message expects a dict with 'type' and 'message', got {type(msg)}: {msg!r}"
|
10
|
+
)
|
11
|
+
msg_type = msg.get("type", "info")
|
12
|
+
# For tool_call and tool_result, print and forward the full dict
|
13
|
+
if msg_type in ("tool_call", "tool_result"):
|
14
|
+
print(f"[QueuedMessageHandler] {msg_type}: {msg}")
|
15
|
+
self._queue.put(msg)
|
16
|
+
return
|
17
|
+
message = msg.get("message", "")
|
18
18
|
# For normal agent/user/info messages, emit type 'content' for frontend compatibility
|
19
19
|
print(f"[QueuedMessageHandler] {msg_type}: {message}")
|
20
20
|
if msg_type == "content":
|
21
21
|
self._queue.put({"type": "content", "content": message})
|
22
22
|
elif msg_type == "info":
|
23
23
|
out = {"type": "info", "message": message}
|
24
|
-
if
|
24
|
+
if "tool" in msg:
|
25
25
|
out["tool"] = msg["tool"]
|
26
26
|
self._queue.put(out)
|
27
27
|
else:
|
28
28
|
out = {"type": msg_type, "message": message}
|
29
|
-
if
|
29
|
+
if "tool" in msg:
|
30
30
|
out["tool"] = msg["tool"]
|
31
31
|
self._queue.put(out)
|
32
|
-
|
@@ -0,0 +1,32 @@
|
|
1
|
+
from rich.live import Live
|
2
|
+
from rich.markdown import Markdown
|
3
|
+
from rich.console import Console
|
4
|
+
|
5
|
+
|
6
|
+
class LiveMarkdownDisplay:
|
7
|
+
def __init__(self, console=None):
|
8
|
+
self.console = console or Console()
|
9
|
+
self._accumulated = ""
|
10
|
+
self._live = None
|
11
|
+
|
12
|
+
def start(self):
|
13
|
+
self._live = Live(
|
14
|
+
Markdown(self._accumulated), console=self.console, refresh_per_second=8
|
15
|
+
)
|
16
|
+
self._live.__enter__()
|
17
|
+
|
18
|
+
def update(self, part):
|
19
|
+
self._accumulated += part
|
20
|
+
# Only re-render on newlines for efficiency
|
21
|
+
if "\n" in part:
|
22
|
+
self._live.update(Markdown(self._accumulated))
|
23
|
+
|
24
|
+
def stop(self):
|
25
|
+
if self._live:
|
26
|
+
self._live.__exit__(None, None, None)
|
27
|
+
self._live = None
|
28
|
+
|
29
|
+
def reset(self):
|
30
|
+
self._accumulated = ""
|
31
|
+
if self._live:
|
32
|
+
self._live.update(Markdown(self._accumulated))
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from rich.console import Console
|
2
|
+
from janito.agent.runtime_config import runtime_config, unified_config
|
3
|
+
|
4
|
+
console = Console()
|
5
|
+
|
6
|
+
|
7
|
+
class RichMessageHandler:
|
8
|
+
"""
|
9
|
+
Unified message handler for all output (tool, agent, system) using Rich for styled output.
|
10
|
+
"""
|
11
|
+
|
12
|
+
def __init__(self):
|
13
|
+
self.console = console
|
14
|
+
|
15
|
+
def handle_message(self, msg, msg_type=None):
|
16
|
+
"""
|
17
|
+
Handles a dict with 'type' and 'message'.
|
18
|
+
All messages must be dicts. Raises if not.
|
19
|
+
"""
|
20
|
+
# Check trust config: suppress all output except 'content' if enabled
|
21
|
+
trust = runtime_config.get("trust")
|
22
|
+
if trust is None:
|
23
|
+
trust = unified_config.get("trust", False)
|
24
|
+
|
25
|
+
from rich.markdown import Markdown
|
26
|
+
|
27
|
+
if not isinstance(msg, dict):
|
28
|
+
raise TypeError(
|
29
|
+
f"RichMessageHandler.handle_message expects a dict with 'type' and 'message', got {type(msg)}: {msg!r}"
|
30
|
+
)
|
31
|
+
|
32
|
+
msg_type = msg.get("type", "info")
|
33
|
+
message = msg.get("message", "")
|
34
|
+
if trust and msg_type != "content":
|
35
|
+
return # Suppress all except content
|
36
|
+
if msg_type == "content":
|
37
|
+
self.console.print(Markdown(message))
|
38
|
+
elif msg_type == "info":
|
39
|
+
self.console.print(message, style="cyan", end="")
|
40
|
+
elif msg_type == "success":
|
41
|
+
self.console.print(message, style="bold green", end="\n")
|
42
|
+
elif msg_type == "error":
|
43
|
+
self.console.print(message, style="bold red", end="\n")
|
44
|
+
elif msg_type == "progress":
|
45
|
+
self._handle_progress(message)
|
46
|
+
elif msg_type == "warning":
|
47
|
+
self.console.print(message, style="bold yellow", end="\n")
|
48
|
+
elif msg_type == "stdout":
|
49
|
+
from rich.text import Text
|
50
|
+
|
51
|
+
self.console.print(
|
52
|
+
Text(message, style="on #003300", no_wrap=True, overflow=None),
|
53
|
+
end="",
|
54
|
+
)
|
55
|
+
elif msg_type == "stderr":
|
56
|
+
from rich.text import Text
|
57
|
+
|
58
|
+
self.console.print(
|
59
|
+
Text(message, style="on #330000", no_wrap=True, overflow=None),
|
60
|
+
end="",
|
61
|
+
)
|
62
|
+
else:
|
63
|
+
# Ignore unsupported message types silently
|
64
|
+
return
|
janito/agent/runtime_config.py
CHANGED
@@ -1,22 +1,26 @@
|
|
1
1
|
from .config import BaseConfig, effective_config
|
2
2
|
|
3
|
+
|
3
4
|
class RuntimeConfig(BaseConfig):
|
4
5
|
"""In-memory only config, reset on restart"""
|
6
|
+
|
5
7
|
pass
|
6
8
|
|
9
|
+
|
7
10
|
runtime_config = RuntimeConfig()
|
8
11
|
|
12
|
+
|
9
13
|
class UnifiedConfig:
|
10
14
|
"""
|
11
15
|
Config lookup order:
|
12
16
|
1. runtime_config (in-memory, highest priority)
|
13
17
|
2. effective_config (local/global, read-only)
|
14
18
|
"""
|
19
|
+
|
15
20
|
def __init__(self, runtime_cfg, effective_cfg):
|
16
21
|
self.runtime_cfg = runtime_cfg
|
17
22
|
self.effective_cfg = effective_cfg
|
18
23
|
|
19
|
-
|
20
24
|
def get(self, key, default=None):
|
21
25
|
val = self.runtime_cfg.get(key)
|
22
26
|
if val is not None:
|
@@ -28,4 +32,5 @@ class UnifiedConfig:
|
|
28
32
|
merged.update(self.runtime_cfg.all())
|
29
33
|
return merged
|
30
34
|
|
35
|
+
|
31
36
|
unified_config = UnifiedConfig(runtime_config, effective_config)
|
@@ -1,9 +1,11 @@
|
|
1
1
|
from abc import ABC, abstractmethod
|
2
2
|
|
3
|
+
|
3
4
|
class ToolBase(ABC):
|
4
5
|
"""
|
5
6
|
Base class for all tools. Inherit from this class to implement a new tool.
|
6
7
|
"""
|
8
|
+
|
7
9
|
def __init__(self):
|
8
10
|
self.progress_messages = []
|
9
11
|
self._progress_callback = None # Will be set by ToolHandler if available
|
@@ -25,9 +27,6 @@ class ToolBase(ABC):
|
|
25
27
|
Returns:
|
26
28
|
Any: The result of the tool execution.
|
27
29
|
"""
|
28
|
-
"""
|
29
|
-
Trigger the tool's action. Must be implemented by subclasses.
|
30
|
-
"""
|
31
30
|
pass
|
32
31
|
|
33
32
|
def update_progress(self, progress: dict):
|
@@ -35,17 +34,25 @@ class ToolBase(ABC):
|
|
35
34
|
Report progress. Subclasses can override this to customize progress reporting.
|
36
35
|
"""
|
37
36
|
self.progress_messages.append(progress)
|
38
|
-
if hasattr(self,
|
37
|
+
if hasattr(self, "_progress_callback") and self._progress_callback:
|
39
38
|
self._progress_callback(progress)
|
40
39
|
|
41
40
|
def report_info(self, message: str):
|
42
|
-
self.update_progress(
|
41
|
+
self.update_progress(
|
42
|
+
{"type": "info", "tool": self.__class__.__name__, "message": message}
|
43
|
+
)
|
43
44
|
|
44
45
|
def report_success(self, message: str):
|
45
|
-
self.update_progress(
|
46
|
+
self.update_progress(
|
47
|
+
{"type": "success", "tool": self.__class__.__name__, "message": message}
|
48
|
+
)
|
46
49
|
|
47
50
|
def report_error(self, message: str):
|
48
|
-
self.update_progress(
|
51
|
+
self.update_progress(
|
52
|
+
{"type": "error", "tool": self.__class__.__name__, "message": message}
|
53
|
+
)
|
49
54
|
|
50
55
|
def report_warning(self, message: str):
|
51
|
-
self.update_progress(
|
56
|
+
self.update_progress(
|
57
|
+
{"type": "warning", "tool": self.__class__.__name__, "message": message}
|
58
|
+
)
|