janito 1.6.0__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janito/__init__.py +1 -1
- janito/agent/config.py +2 -2
- janito/agent/config_defaults.py +1 -0
- janito/agent/conversation.py +4 -1
- janito/agent/openai_client.py +2 -0
- janito/agent/platform_discovery.py +90 -0
- janito/agent/profile_manager.py +83 -91
- janito/agent/rich_message_handler.py +72 -64
- janito/agent/templates/profiles/system_prompt_template_base.toml +76 -0
- janito/agent/templates/profiles/system_prompt_template_default.toml +3 -0
- janito/agent/templates/profiles/system_prompt_template_technical.toml +13 -0
- janito/agent/tests/test_prompt_toml.py +61 -0
- janito/agent/tools/__init__.py +4 -0
- janito/agent/tools/ask_user.py +8 -2
- janito/agent/tools/create_directory.py +27 -10
- janito/agent/tools/find_files.py +2 -10
- janito/agent/tools/get_file_outline.py +29 -0
- janito/agent/tools/get_lines.py +9 -10
- janito/agent/tools/memory.py +68 -0
- janito/agent/tools/run_bash_command.py +79 -60
- janito/agent/tools/run_powershell_command.py +153 -0
- janito/agent/tools/run_python_command.py +4 -0
- janito/agent/tools/search_files.py +0 -6
- janito/cli/_print_config.py +1 -1
- janito/cli/config_commands.py +1 -1
- janito/cli/main.py +1 -1
- janito/cli/runner/__init__.py +0 -2
- janito/cli/runner/cli_main.py +3 -13
- janito/cli/runner/config.py +4 -2
- janito/cli/runner/scan.py +22 -9
- janito/cli_chat_shell/chat_loop.py +13 -9
- janito/cli_chat_shell/chat_ui.py +2 -2
- janito/cli_chat_shell/commands/__init__.py +2 -0
- janito/cli_chat_shell/commands/sum.py +49 -0
- janito/cli_chat_shell/load_prompt.py +47 -8
- janito/cli_chat_shell/ui.py +8 -2
- janito/web/app.py +6 -9
- {janito-1.6.0.dist-info → janito-1.7.0.dist-info}/METADATA +17 -9
- {janito-1.6.0.dist-info → janito-1.7.0.dist-info}/RECORD +43 -35
- {janito-1.6.0.dist-info → janito-1.7.0.dist-info}/WHEEL +0 -0
- {janito-1.6.0.dist-info → janito-1.7.0.dist-info}/entry_points.txt +0 -0
- {janito-1.6.0.dist-info → janito-1.7.0.dist-info}/licenses/LICENSE +0 -0
- {janito-1.6.0.dist-info → janito-1.7.0.dist-info}/top_level.txt +0 -0
janito/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "1.
|
1
|
+
__version__ = "1.7.0"
|
janito/agent/config.py
CHANGED
@@ -38,7 +38,7 @@ class FileConfig(BaseConfig):
|
|
38
38
|
|
39
39
|
def load(self):
|
40
40
|
if self.path.exists():
|
41
|
-
with open(self.path, "r") as f:
|
41
|
+
with open(self.path, "r", encoding="utf-8") as f:
|
42
42
|
self._data = json.load(f)
|
43
43
|
# Remove keys with value None (null in JSON)
|
44
44
|
self._data = {k: v for k, v in self._data.items() if v is not None}
|
@@ -48,7 +48,7 @@ class FileConfig(BaseConfig):
|
|
48
48
|
|
49
49
|
def save(self):
|
50
50
|
self.path.parent.mkdir(parents=True, exist_ok=True)
|
51
|
-
with open(self.path, "w") as f:
|
51
|
+
with open(self.path, "w", encoding="utf-8") as f:
|
52
52
|
json.dump(self._data, f, indent=2)
|
53
53
|
|
54
54
|
|
janito/agent/config_defaults.py
CHANGED
janito/agent/conversation.py
CHANGED
@@ -72,6 +72,7 @@ class ConversationHandler:
|
|
72
72
|
)
|
73
73
|
else:
|
74
74
|
response = retry_api_call(api_call)
|
75
|
+
print("[DEBUG] OpenAI API raw response:", repr(response))
|
75
76
|
|
76
77
|
if verbose_response:
|
77
78
|
pprint.pprint(response)
|
@@ -83,6 +84,8 @@ class ConversationHandler:
|
|
83
84
|
usage = getattr(response, "usage", None)
|
84
85
|
usage_info = (
|
85
86
|
{
|
87
|
+
# DEBUG: Show usage extraction
|
88
|
+
"_debug_raw_usage": getattr(response, "usage", None),
|
86
89
|
"prompt_tokens": getattr(usage, "prompt_tokens", None),
|
87
90
|
"completion_tokens": getattr(usage, "completion_tokens", None),
|
88
91
|
"total_tokens": getattr(usage, "total_tokens", None),
|
@@ -126,4 +129,4 @@ class ConversationHandler:
|
|
126
129
|
"content": tr["content"],
|
127
130
|
}
|
128
131
|
)
|
129
|
-
raise MaxRoundsExceededError("Max conversation rounds exceeded")
|
132
|
+
raise MaxRoundsExceededError(f"Max conversation rounds exceeded ({max_rounds})")
|
janito/agent/openai_client.py
CHANGED
@@ -72,6 +72,7 @@ class Agent:
|
|
72
72
|
max_rounds=50,
|
73
73
|
verbose_events=False,
|
74
74
|
stream=False,
|
75
|
+
verbose_stream=False,
|
75
76
|
):
|
76
77
|
"""
|
77
78
|
Start a chat conversation with the agent.
|
@@ -101,6 +102,7 @@ class Agent:
|
|
101
102
|
max_tokens=max_tokens,
|
102
103
|
verbose_events=verbose_events,
|
103
104
|
stream=stream,
|
105
|
+
verbose_stream=verbose_stream,
|
104
106
|
)
|
105
107
|
except ProviderError as e:
|
106
108
|
error_data = getattr(e, "error_data", {}) or {}
|
@@ -0,0 +1,90 @@
|
|
1
|
+
import platform
|
2
|
+
import sys
|
3
|
+
|
4
|
+
|
5
|
+
def detect_shell():
|
6
|
+
import os
|
7
|
+
import subprocess
|
8
|
+
|
9
|
+
shell_info = None
|
10
|
+
|
11
|
+
# 1. Detect shell (prefer Git Bash if detected)
|
12
|
+
if os.environ.get("MSYSTEM"):
|
13
|
+
shell_info = f"Git Bash ({os.environ.get('MSYSTEM')})"
|
14
|
+
# 2. Detect WSL (before PowerShell)
|
15
|
+
elif os.environ.get("WSL_DISTRO_NAME"):
|
16
|
+
shell = os.environ.get("SHELL")
|
17
|
+
shell_name = shell.split("/")[-1] if shell else "unknown"
|
18
|
+
distro = os.environ.get("WSL_DISTRO_NAME")
|
19
|
+
shell_info = f"{shell_name} (WSL: {distro})"
|
20
|
+
else:
|
21
|
+
# 3. Try to detect PowerShell by running $host.Name
|
22
|
+
try:
|
23
|
+
result = subprocess.run(
|
24
|
+
["powershell.exe", "-NoProfile", "-Command", "$host.Name"],
|
25
|
+
capture_output=True,
|
26
|
+
text=True,
|
27
|
+
timeout=2,
|
28
|
+
)
|
29
|
+
if result.returncode == 0 and "ConsoleHost" in result.stdout:
|
30
|
+
shell_info = "PowerShell"
|
31
|
+
else:
|
32
|
+
shell_info = None
|
33
|
+
except Exception:
|
34
|
+
shell_info = None
|
35
|
+
|
36
|
+
# 4. If not PowerShell, check SHELL
|
37
|
+
if not shell_info:
|
38
|
+
shell = os.environ.get("SHELL")
|
39
|
+
if shell:
|
40
|
+
shell_info = shell
|
41
|
+
else:
|
42
|
+
# 5. If not, check COMSPEC for PowerShell or cmd.exe
|
43
|
+
comspec = os.environ.get("COMSPEC")
|
44
|
+
if comspec:
|
45
|
+
if "powershell" in comspec.lower():
|
46
|
+
shell_info = "PowerShell"
|
47
|
+
elif "cmd" in comspec.lower():
|
48
|
+
shell_info = "cmd.exe"
|
49
|
+
else:
|
50
|
+
shell_info = "Unknown shell"
|
51
|
+
else:
|
52
|
+
shell_info = "Unknown shell"
|
53
|
+
|
54
|
+
# 6. Always append TERM and TERM_PROGRAM if present
|
55
|
+
term_env = os.environ.get("TERM")
|
56
|
+
if term_env:
|
57
|
+
shell_info += f" [TERM={term_env}]"
|
58
|
+
|
59
|
+
term_program = os.environ.get("TERM_PROGRAM")
|
60
|
+
if term_program:
|
61
|
+
shell_info += f" [TERM_PROGRAM={term_program}]"
|
62
|
+
|
63
|
+
return shell_info
|
64
|
+
|
65
|
+
|
66
|
+
def get_platform_name():
|
67
|
+
sys_platform = platform.system().lower()
|
68
|
+
if sys_platform.startswith("win"):
|
69
|
+
return "windows"
|
70
|
+
elif sys_platform.startswith("linux"):
|
71
|
+
return "linux"
|
72
|
+
elif sys_platform.startswith("darwin"):
|
73
|
+
return "darwin"
|
74
|
+
return sys_platform
|
75
|
+
|
76
|
+
|
77
|
+
def get_python_version():
|
78
|
+
return platform.python_version()
|
79
|
+
|
80
|
+
|
81
|
+
def is_windows():
|
82
|
+
return sys.platform.startswith("win")
|
83
|
+
|
84
|
+
|
85
|
+
def is_linux():
|
86
|
+
return sys.platform.startswith("linux")
|
87
|
+
|
88
|
+
|
89
|
+
def is_mac():
|
90
|
+
return sys.platform.startswith("darwin")
|
janito/agent/profile_manager.py
CHANGED
@@ -1,106 +1,48 @@
|
|
1
|
-
from janito.agent.conversation import ConversationHandler
|
2
1
|
from openai import OpenAI
|
3
|
-
import
|
2
|
+
import toml
|
3
|
+
from string import Template
|
4
4
|
from pathlib import Path
|
5
|
-
import
|
6
|
-
import
|
5
|
+
from janito.agent.platform_discovery import get_platform_name, get_python_version
|
6
|
+
from janito.agent.platform_discovery import detect_shell
|
7
7
|
|
8
8
|
|
9
9
|
class AgentProfileManager:
|
10
|
+
def _report_template_not_found(self, template_name, search_dirs):
|
11
|
+
import sys
|
12
|
+
|
13
|
+
search_dirs_str = ", ".join(str(d) for d in search_dirs)
|
14
|
+
print(
|
15
|
+
f"❗ TemplateNotFound: '{template_name}'\n Searched paths: {search_dirs_str}",
|
16
|
+
file=sys.stderr,
|
17
|
+
)
|
18
|
+
|
10
19
|
REFERER = "www.janito.dev"
|
11
20
|
TITLE = "Janito"
|
12
21
|
|
22
|
+
def set_role(self, new_role):
|
23
|
+
"""Set the agent's role and force prompt re-rendering."""
|
24
|
+
self.role = new_role
|
25
|
+
self.refresh_prompt()
|
26
|
+
|
13
27
|
def parse_style_string(self, style: str):
|
14
28
|
if "-" in style:
|
15
29
|
parts = style.split("-")
|
16
30
|
return parts[0], parts[1:]
|
17
31
|
return style, []
|
18
32
|
|
19
|
-
def get_platform_name(self):
|
20
|
-
sys_platform = platform.system().lower()
|
21
|
-
if sys_platform.startswith("win"):
|
22
|
-
return "windows"
|
23
|
-
elif sys_platform.startswith("linux"):
|
24
|
-
return "linux"
|
25
|
-
elif sys_platform.startswith("darwin"):
|
26
|
-
return "darwin"
|
27
|
-
return sys_platform
|
28
|
-
|
29
|
-
def get_python_version(self):
|
30
|
-
return platform.python_version()
|
31
|
-
|
32
|
-
def get_shell_info(self):
|
33
|
-
shell = os.environ.get("SHELL")
|
34
|
-
term = os.environ.get("TERM")
|
35
|
-
term_program = os.environ.get("TERM_PROGRAM")
|
36
|
-
if shell:
|
37
|
-
info = shell
|
38
|
-
elif os.environ.get("MSYSTEM"):
|
39
|
-
info = f"Git Bash ({os.environ.get('MSYSTEM')})"
|
40
|
-
elif os.environ.get("WSL_DISTRO_NAME"):
|
41
|
-
info = f"WSL ({os.environ.get('WSL_DISTRO_NAME')})"
|
42
|
-
else:
|
43
|
-
comspec = os.environ.get("COMSPEC")
|
44
|
-
if comspec:
|
45
|
-
if "powershell" in comspec.lower():
|
46
|
-
info = "PowerShell"
|
47
|
-
elif "cmd" in comspec.lower():
|
48
|
-
info = "cmd.exe"
|
49
|
-
else:
|
50
|
-
info = "Unknown shell"
|
51
|
-
else:
|
52
|
-
info = "Unknown shell"
|
53
|
-
if term:
|
54
|
-
info += f", TERM={term}"
|
55
|
-
if term_program and term_program.lower() == "vscode":
|
56
|
-
info += ", running in VSCode"
|
57
|
-
home_dir = os.path.expanduser("~")
|
58
|
-
if home_dir:
|
59
|
-
info += f", HOME={home_dir}"
|
60
|
-
return info
|
61
|
-
return "unknown"
|
62
|
-
|
63
33
|
def render_prompt(self):
|
64
34
|
main_style, features = self.parse_style_string(self.interaction_style)
|
65
35
|
base_dir = Path(__file__).parent / "templates"
|
66
36
|
profiles_dir = base_dir / "profiles"
|
67
|
-
|
68
|
-
loader = jinja2.ChoiceLoader(
|
69
|
-
[
|
70
|
-
jinja2.FileSystemLoader(str(profiles_dir)),
|
71
|
-
jinja2.FileSystemLoader(str(features_dir)),
|
72
|
-
]
|
73
|
-
)
|
74
|
-
env = jinja2.Environment(loader=loader)
|
37
|
+
# Determine which TOML profile to use
|
75
38
|
if main_style == "technical":
|
76
|
-
main_template = "system_prompt_template_technical.
|
39
|
+
main_template = profiles_dir / "system_prompt_template_technical.toml"
|
77
40
|
else:
|
78
|
-
main_template = "
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
# Inject tech.txt existence and content
|
84
|
-
tech_txt_path = Path(".janito") / "tech.txt"
|
85
|
-
tech_txt_exists = tech_txt_path.exists()
|
86
|
-
tech_txt_content = ""
|
87
|
-
if tech_txt_exists:
|
88
|
-
try:
|
89
|
-
tech_txt_content = tech_txt_path.read_text(encoding="utf-8")
|
90
|
-
except Exception:
|
91
|
-
tech_txt_content = "⚠️ Error reading janito/tech.txt."
|
92
|
-
template = env.get_template(main_template)
|
93
|
-
return template.render(
|
94
|
-
role=self.role,
|
95
|
-
interaction_mode=self.interaction_mode,
|
96
|
-
platform=platform_name,
|
97
|
-
python_version=python_version,
|
98
|
-
shell_info=shell_info,
|
99
|
-
tech_txt_exists=tech_txt_exists,
|
100
|
-
tech_txt_content=tech_txt_content,
|
101
|
-
)
|
102
|
-
parent_template = main_template
|
103
|
-
# Inject tech.txt existence and content for feature templates as well
|
41
|
+
main_template = profiles_dir / "system_prompt_template_default.toml"
|
42
|
+
# Gather context variables
|
43
|
+
platform_name = get_platform_name()
|
44
|
+
python_version = get_python_version()
|
45
|
+
shell_info = detect_shell()
|
104
46
|
tech_txt_path = Path(".janito") / "tech.txt"
|
105
47
|
tech_txt_exists = tech_txt_path.exists()
|
106
48
|
tech_txt_content = ""
|
@@ -115,16 +57,58 @@ class AgentProfileManager:
|
|
115
57
|
"platform": platform_name,
|
116
58
|
"python_version": python_version,
|
117
59
|
"shell_info": shell_info,
|
118
|
-
"tech_txt_exists": tech_txt_exists,
|
60
|
+
"tech_txt_exists": str(tech_txt_exists),
|
119
61
|
"tech_txt_content": tech_txt_content,
|
120
62
|
}
|
63
|
+
|
64
|
+
# Load and merge TOML templates (handle inheritance)
|
65
|
+
def load_toml_with_inheritance(path):
|
66
|
+
data = toml.load(path)
|
67
|
+
if "extends" in data:
|
68
|
+
base_path = profiles_dir / data["extends"]
|
69
|
+
base_data = toml.load(base_path)
|
70
|
+
base_data.update({k: v for k, v in data.items() if k != "extends"})
|
71
|
+
return base_data
|
72
|
+
return data
|
73
|
+
|
74
|
+
toml_data = load_toml_with_inheritance(main_template)
|
75
|
+
# Merge in feature-specific TOML if any
|
121
76
|
for feature in features:
|
122
|
-
feature_template = f"system_prompt_template_{feature}.
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
77
|
+
feature_template = profiles_dir / f"system_prompt_template_{feature}.toml"
|
78
|
+
if feature_template.exists():
|
79
|
+
feature_data = toml.load(feature_template)
|
80
|
+
toml_data.update(
|
81
|
+
{k: v for k, v in feature_data.items() if k != "extends"}
|
82
|
+
)
|
83
|
+
|
84
|
+
# Render the TOML structure as a prompt string
|
85
|
+
def render_section(section):
|
86
|
+
if isinstance(section, dict):
|
87
|
+
out = []
|
88
|
+
for k, v in section.items():
|
89
|
+
if isinstance(v, list):
|
90
|
+
out.append(f"{k}:")
|
91
|
+
for item in v:
|
92
|
+
out.append(f" - {item}")
|
93
|
+
else:
|
94
|
+
out.append(f"{k}: {v}")
|
95
|
+
return "\n".join(out)
|
96
|
+
elif isinstance(section, list):
|
97
|
+
return "\n".join(f"- {item}" for item in section)
|
98
|
+
else:
|
99
|
+
return str(section)
|
100
|
+
|
101
|
+
prompt_sections = []
|
102
|
+
for section, value in toml_data.items():
|
103
|
+
if section == "extends":
|
104
|
+
continue
|
105
|
+
prompt_sections.append(f"[{section}]")
|
106
|
+
prompt_sections.append(render_section(value))
|
107
|
+
prompt_sections.append("")
|
108
|
+
prompt_template = "\n".join(prompt_sections)
|
109
|
+
# Substitute variables
|
110
|
+
prompt = Template(prompt_template).safe_substitute(context)
|
111
|
+
return prompt
|
128
112
|
|
129
113
|
def __init__(
|
130
114
|
self,
|
@@ -161,7 +145,15 @@ class AgentProfileManager:
|
|
161
145
|
api_key=api_key,
|
162
146
|
default_headers={"HTTP-Referer": self.REFERER, "X-Title": self.TITLE},
|
163
147
|
)
|
164
|
-
|
148
|
+
from janito.agent.openai_client import Agent
|
149
|
+
|
150
|
+
self.agent = Agent(
|
151
|
+
api_key=api_key,
|
152
|
+
model=model,
|
153
|
+
base_url=base_url,
|
154
|
+
use_azure_openai=use_azure_openai,
|
155
|
+
azure_openai_api_version=azure_openai_api_version,
|
156
|
+
)
|
165
157
|
self.system_prompt_template = None
|
166
158
|
|
167
159
|
def refresh_prompt(self):
|
@@ -1,64 +1,72 @@
|
|
1
|
-
from rich.console import Console
|
2
|
-
from janito.agent.runtime_config import runtime_config, unified_config
|
3
|
-
|
4
|
-
console = Console()
|
5
|
-
|
6
|
-
|
7
|
-
class RichMessageHandler:
|
8
|
-
"""
|
9
|
-
Unified message handler for all output (tool, agent, system) using Rich for styled output.
|
10
|
-
"""
|
11
|
-
|
12
|
-
def __init__(self):
|
13
|
-
self.console = console
|
14
|
-
|
15
|
-
def handle_message(self, msg, msg_type=None):
|
16
|
-
"""
|
17
|
-
Handles a dict with 'type' and 'message'.
|
18
|
-
All messages must be dicts. Raises if not.
|
19
|
-
"""
|
20
|
-
# Check trust config: suppress all output except 'content' if enabled
|
21
|
-
trust = runtime_config.get("trust")
|
22
|
-
if trust is None:
|
23
|
-
trust = unified_config.get("trust", False)
|
24
|
-
|
25
|
-
from rich.markdown import Markdown
|
26
|
-
|
27
|
-
if not isinstance(msg, dict):
|
28
|
-
raise TypeError(
|
29
|
-
f"RichMessageHandler.handle_message expects a dict with 'type' and 'message', got {type(msg)}: {msg!r}"
|
30
|
-
)
|
31
|
-
|
32
|
-
msg_type = msg.get("type", "info")
|
33
|
-
message = msg.get("message", "")
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
self.
|
46
|
-
elif msg_type == "
|
47
|
-
self.console.print(
|
48
|
-
elif msg_type == "
|
49
|
-
|
50
|
-
|
51
|
-
self.console.print(
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
1
|
+
from rich.console import Console
|
2
|
+
from janito.agent.runtime_config import runtime_config, unified_config
|
3
|
+
|
4
|
+
console = Console()
|
5
|
+
|
6
|
+
|
7
|
+
class RichMessageHandler:
|
8
|
+
"""
|
9
|
+
Unified message handler for all output (tool, agent, system) using Rich for styled output.
|
10
|
+
"""
|
11
|
+
|
12
|
+
def __init__(self):
|
13
|
+
self.console = console
|
14
|
+
|
15
|
+
def handle_message(self, msg, msg_type=None):
|
16
|
+
"""
|
17
|
+
Handles a dict with 'type' and 'message'.
|
18
|
+
All messages must be dicts. Raises if not.
|
19
|
+
"""
|
20
|
+
# Check trust config: suppress all output except 'content' if enabled
|
21
|
+
trust = runtime_config.get("trust")
|
22
|
+
if trust is None:
|
23
|
+
trust = unified_config.get("trust", False)
|
24
|
+
|
25
|
+
from rich.markdown import Markdown
|
26
|
+
|
27
|
+
if not isinstance(msg, dict):
|
28
|
+
raise TypeError(
|
29
|
+
f"RichMessageHandler.handle_message expects a dict with 'type' and 'message', got {type(msg)}: {msg!r}"
|
30
|
+
)
|
31
|
+
|
32
|
+
msg_type = msg.get("type", "info")
|
33
|
+
message = msg.get("message", "")
|
34
|
+
|
35
|
+
def _remove_surrogates(text):
|
36
|
+
return "".join(c for c in text if not 0xD800 <= ord(c) <= 0xDFFF)
|
37
|
+
|
38
|
+
safe_message = (
|
39
|
+
_remove_surrogates(message) if isinstance(message, str) else message
|
40
|
+
)
|
41
|
+
|
42
|
+
if trust and msg_type != "content":
|
43
|
+
return # Suppress all except content
|
44
|
+
if msg_type == "content":
|
45
|
+
self.console.print(Markdown(safe_message))
|
46
|
+
elif msg_type == "info":
|
47
|
+
self.console.print(safe_message, style="cyan", end="")
|
48
|
+
elif msg_type == "success":
|
49
|
+
self.console.print(safe_message, style="bold green", end="\n")
|
50
|
+
elif msg_type == "error":
|
51
|
+
self.console.print(safe_message, style="bold red", end="\n")
|
52
|
+
elif msg_type == "progress":
|
53
|
+
self._handle_progress(safe_message)
|
54
|
+
elif msg_type == "warning":
|
55
|
+
self.console.print(safe_message, style="bold yellow", end="\n")
|
56
|
+
elif msg_type == "stdout":
|
57
|
+
from rich.text import Text
|
58
|
+
|
59
|
+
self.console.print(
|
60
|
+
Text(safe_message, style="on #003300", no_wrap=True, overflow=None),
|
61
|
+
end="",
|
62
|
+
)
|
63
|
+
elif msg_type == "stderr":
|
64
|
+
from rich.text import Text
|
65
|
+
|
66
|
+
self.console.print(
|
67
|
+
Text(safe_message, style="on #330000", no_wrap=True, overflow=None),
|
68
|
+
end="",
|
69
|
+
)
|
70
|
+
else:
|
71
|
+
# Ignore unsupported message types silently
|
72
|
+
return
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# Base system prompt template for Janito agent styles
|
2
|
+
[agent_profile]
|
3
|
+
role = "${role}"
|
4
|
+
description = "Agent for analysis and development tool operating on files and directories using text-based operations."
|
5
|
+
|
6
|
+
[platform]
|
7
|
+
platform = "${platform}"
|
8
|
+
python_version = "${python_version}"
|
9
|
+
shell_info = "${shell_info}"
|
10
|
+
notes = """
|
11
|
+
- Always pay careful attention to platform-specific path conventions (e.g., path separator, drive letters, case sensitivity), Python version differences, and shell/command syntax when reading, writing, or executing files and commands.
|
12
|
+
- If running in a non-standard shell (such as Git Bash or WSL on Windows), be extra cautious about path and command compatibility.
|
13
|
+
"""
|
14
|
+
|
15
|
+
[tech]
|
16
|
+
exists = "${tech_txt_exists}"
|
17
|
+
content = """
|
18
|
+
${tech_txt_content}
|
19
|
+
"""
|
20
|
+
|
21
|
+
[operational_workflow]
|
22
|
+
steps = """
|
23
|
+
- Provide a concise plan before calling any tool.
|
24
|
+
- Plan changes only after gathering all the necessary information.
|
25
|
+
- Always execute the plan immediately after presenting it, unless the user requests otherwise.
|
26
|
+
"""
|
27
|
+
|
28
|
+
[safety_guidelines]
|
29
|
+
steps = """
|
30
|
+
- Always update all references and validate the system before removing or renaming files.
|
31
|
+
- Only remove or rename files after all dependent code has been updated and validated.
|
32
|
+
- Prefer the following order for destructive operations:
|
33
|
+
1. Update all imports/references.
|
34
|
+
2. Validate the changes using the tools available to test the type of file changed.
|
35
|
+
3. Remove or rename the file.
|
36
|
+
4. Re-validate the system.
|
37
|
+
- Roll back changes if any validation step fails.
|
38
|
+
"""
|
39
|
+
|
40
|
+
[context]
|
41
|
+
steps = """
|
42
|
+
- Before answering, always examine the contents of files that are directly related to the user's question or request, and explore the project structure to understand existing directories, files, and their purposes.
|
43
|
+
- Always review `README_structure.txt` before conducting file-specific searches.
|
44
|
+
- Unless specified otherwise, look for the files that match the questions context.
|
45
|
+
- Explore files that might be relevant to the current task.
|
46
|
+
- Before providing a detailed plan, always examine the contents of files that are directly related to the user's question or request.
|
47
|
+
"""
|
48
|
+
|
49
|
+
[analysis]
|
50
|
+
steps = """
|
51
|
+
- In case of missing code or functions, look into the .bak files and check git diff/history for recent changes.
|
52
|
+
"""
|
53
|
+
|
54
|
+
[decision_policy]
|
55
|
+
steps = """
|
56
|
+
- Whenever there is uncertainty, ambiguity, missing information, or multiple valid options, request clarification or input from the user. Otherwise, proceed and inform the user of the decision_policy made.
|
57
|
+
- When making changes, prefer optimal, effective, and natural edits. Perform larger refactors, reorganizations, or multi-region edits when they lead to better results, rather than restricting to minimal or single-region changes. Avoid unnecessary splitting of text ranges. Validate changes using available tools before proceeding.
|
58
|
+
"""
|
59
|
+
|
60
|
+
[finalization]
|
61
|
+
steps = """
|
62
|
+
- Review the README content if there are user-exposed or public API changes.
|
63
|
+
- Update documentation and metadata (e.g., README_structure.txt) to reflect new or modified files.
|
64
|
+
- When updating documentation, recommend (when appropriate) adding a footer or note such as: _'generated by janito.dev'_
|
65
|
+
"""
|
66
|
+
|
67
|
+
[interaction_mode]
|
68
|
+
current = "${interaction_mode}"
|
69
|
+
notes = """
|
70
|
+
- Always adapt your clarification and interaction strategy to the current mode.
|
71
|
+
"""
|
72
|
+
|
73
|
+
[function_call_summary]
|
74
|
+
steps = """
|
75
|
+
- Before executing any function calls, emit a concise summary message describing the planned actions, reasoning, and expected outcomes.
|
76
|
+
"""
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# Inherits from system_prompt_template_base.toml
|
2
|
+
extends = "system_prompt_template_base.toml"
|
3
|
+
|
4
|
+
[agent_profile]
|
5
|
+
description = "This Agent follows its Agent Profile, including system settings, role, and toolchain constraints."
|
6
|
+
|
7
|
+
[technical_workflow]
|
8
|
+
steps = """
|
9
|
+
- Enumerate and validate the current file system state before any operation.
|
10
|
+
- Use the tools available to test and validate the type of file changed, inferring dependencies, side effects, and potential conflicts.
|
11
|
+
- Explicitly handle errors, edge cases, and race conditions. Log all exceptions and recovery actions.
|
12
|
+
- Maintain traceability: document all actions, decision_policys, and state transitions.
|
13
|
+
"""
|
@@ -0,0 +1,61 @@
|
|
1
|
+
from janito.agent.profile_manager import AgentProfileManager
|
2
|
+
|
3
|
+
|
4
|
+
def test_prompt_default(monkeypatch):
|
5
|
+
mgr = AgentProfileManager(
|
6
|
+
api_key="sk-test",
|
7
|
+
model="gpt-test",
|
8
|
+
role="software engineer",
|
9
|
+
interaction_style="default",
|
10
|
+
interaction_mode="chat",
|
11
|
+
verbose_tools=False,
|
12
|
+
base_url="https://test",
|
13
|
+
azure_openai_api_version="2023-05-15",
|
14
|
+
use_azure_openai=False,
|
15
|
+
)
|
16
|
+
prompt = mgr.render_prompt()
|
17
|
+
assert "[agent_profile]" in prompt
|
18
|
+
assert "software engineer" in prompt
|
19
|
+
assert "[platform]" in prompt
|
20
|
+
assert "Always pay careful attention" in prompt
|
21
|
+
assert "[function_call_summary]" in prompt
|
22
|
+
|
23
|
+
|
24
|
+
def test_prompt_technical(monkeypatch):
|
25
|
+
mgr = AgentProfileManager(
|
26
|
+
api_key="sk-test",
|
27
|
+
model="gpt-test",
|
28
|
+
role="software engineer",
|
29
|
+
interaction_style="technical",
|
30
|
+
interaction_mode="chat",
|
31
|
+
verbose_tools=False,
|
32
|
+
base_url="https://test",
|
33
|
+
azure_openai_api_version="2023-05-15",
|
34
|
+
use_azure_openai=False,
|
35
|
+
)
|
36
|
+
prompt = mgr.render_prompt()
|
37
|
+
assert "[agent_profile]" in prompt
|
38
|
+
assert "strict adherence" in prompt
|
39
|
+
assert "[technical_workflow]" in prompt
|
40
|
+
assert "Enumerate and validate the current file system state" in prompt
|
41
|
+
assert "[function_call_summary]" in prompt
|
42
|
+
|
43
|
+
|
44
|
+
def test_prompt_inheritance(monkeypatch):
|
45
|
+
mgr = AgentProfileManager(
|
46
|
+
api_key="sk-test",
|
47
|
+
model="gpt-test",
|
48
|
+
role="software engineer",
|
49
|
+
interaction_style="technical",
|
50
|
+
interaction_mode="chat",
|
51
|
+
verbose_tools=False,
|
52
|
+
base_url="https://test",
|
53
|
+
azure_openai_api_version="2023-05-15",
|
54
|
+
use_azure_openai=False,
|
55
|
+
)
|
56
|
+
prompt = mgr.render_prompt()
|
57
|
+
# Should inherit context, analysis, etc. from base
|
58
|
+
assert "[context]" in prompt
|
59
|
+
assert "[analysis]" in prompt
|
60
|
+
assert "[decision_policy]" in prompt
|
61
|
+
assert "[finalization]" in prompt
|