janito 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
janito/chat_history.py ADDED
@@ -0,0 +1,117 @@
1
+ """
2
+ Chat history module for Janito.
3
+ Handles storing and loading chat history.
4
+ """
5
+ import os
6
+ import json
7
+ import datetime
8
+ from pathlib import Path
9
+ from typing import List, Dict, Any, Optional
10
+ from janito.config import get_config
11
+
12
+ def ensure_chat_history_dir() -> Path:
13
+ """
14
+ Ensure the chat history directory exists.
15
+
16
+ Returns:
17
+ Path: Path to the chat history directory
18
+ """
19
+ workspace_dir = get_config().workspace_dir
20
+ chat_history_dir = Path(workspace_dir) / ".janito" / "chat_history"
21
+ chat_history_dir.mkdir(parents=True, exist_ok=True)
22
+ return chat_history_dir
23
+
24
+
25
+
26
+ def store_conversation(query: str, response: str, agent=None) -> None:
27
+ """
28
+ Store a conversation in the chat history.
29
+
30
+ Args:
31
+ query: The user's query
32
+ response: The agent's response
33
+ agent: Optional agent instance for using get_messages method
34
+ """
35
+ chat_history_dir = ensure_chat_history_dir()
36
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
37
+ filename = f"{timestamp}.json"
38
+
39
+ # Create the conversation data
40
+ conversation = {
41
+ "timestamp": timestamp,
42
+ "query": query,
43
+ "response": response
44
+ }
45
+
46
+ # Write to file
47
+ with open(chat_history_dir / filename, "w", encoding="utf-8") as f:
48
+ json.dump(conversation, f, ensure_ascii=False, indent=2)
49
+
50
+ def load_recent_conversations(count: int = 5) -> List[Dict[str, str]]:
51
+ """
52
+ Load the most recent conversations from the chat history.
53
+
54
+ Args:
55
+ count: Number of conversations to load
56
+
57
+ Returns:
58
+ List[Dict[str, str]]: List of conversations
59
+ """
60
+ chat_history_dir = ensure_chat_history_dir()
61
+
62
+ # Get all JSON files in the chat history directory
63
+ history_files = list(chat_history_dir.glob("*.json"))
64
+
65
+ # Sort by filename (which includes timestamp)
66
+ history_files.sort(reverse=True)
67
+
68
+ # Load the most recent conversations
69
+ conversations = []
70
+ for file_path in history_files[:count]:
71
+ try:
72
+ with open(file_path, "r", encoding="utf-8") as f:
73
+ conversation = json.load(f)
74
+ conversations.append(conversation)
75
+ except Exception as e:
76
+ print(f"Error loading chat history file {file_path}: {e}")
77
+
78
+ return conversations
79
+
80
+ def format_conversation_for_context(conversation: Dict[str, str]) -> str:
81
+ """
82
+ Format a conversation for inclusion in the context.
83
+
84
+ Args:
85
+ conversation: The conversation to format
86
+
87
+ Returns:
88
+ str: The formatted conversation
89
+ """
90
+ timestamp = conversation.get("timestamp", "Unknown time")
91
+ query = conversation.get("query", "")
92
+ response = conversation.get("response", "")
93
+
94
+ formatted_time = datetime.datetime.strptime(timestamp, "%Y%m%d_%H%M%S").strftime("%Y-%m-%d %H:%M:%S")
95
+
96
+ return f"--- Conversation from {formatted_time} ---\nUser: {query}\n\nAssistant: {response}\n\n"
97
+
98
+ def get_chat_history_context(count: int = 5) -> str:
99
+ """
100
+ Get the chat history formatted for inclusion in the agent's context.
101
+
102
+ Args:
103
+ count: Number of conversations to include
104
+
105
+ Returns:
106
+ str: The formatted chat history
107
+ """
108
+ conversations = load_recent_conversations(count)
109
+
110
+ if not conversations:
111
+ return ""
112
+
113
+ context = "# Previous conversations:\n\n"
114
+ for conversation in conversations:
115
+ context += format_conversation_for_context(conversation)
116
+
117
+ return context
janito/config.py CHANGED
@@ -3,8 +3,9 @@ Configuration module for Janito.
3
3
  Provides a singleton Config class to access configuration values.
4
4
  """
5
5
  import os
6
+ import json
6
7
  from pathlib import Path
7
- from typing import Optional
8
+ from typing import Optional, Any, Dict
8
9
  import typer
9
10
 
10
11
  class Config:
@@ -15,8 +16,41 @@ class Config:
15
16
  if cls._instance is None:
16
17
  cls._instance = super(Config, cls).__new__(cls)
17
18
  cls._instance._workspace_dir = os.getcwd()
18
- cls._instance._debug_mode = False
19
+ cls._instance._verbose = False
20
+ cls._instance._history_context_count = 5
21
+ cls._instance._load_config()
19
22
  return cls._instance
23
+
24
+ def _load_config(self) -> None:
25
+ """Load configuration from file."""
26
+ config_path = Path(self._workspace_dir) / ".janito" / "config.json"
27
+ if config_path.exists():
28
+ try:
29
+ with open(config_path, "r", encoding="utf-8") as f:
30
+ config_data = json.load(f)
31
+ if "history_context_count" in config_data:
32
+ self._history_context_count = config_data["history_context_count"]
33
+ if "debug_mode" in config_data:
34
+ self._verbose = config_data["debug_mode"]
35
+ except Exception as e:
36
+ print(f"Warning: Failed to load configuration: {str(e)}")
37
+
38
+ def _save_config(self) -> None:
39
+ """Save configuration to file."""
40
+ config_dir = Path(self._workspace_dir) / ".janito"
41
+ config_dir.mkdir(parents=True, exist_ok=True)
42
+ config_path = config_dir / "config.json"
43
+
44
+ config_data = {
45
+ "history_context_count": self._history_context_count,
46
+ "verbose": self._verbose
47
+ }
48
+
49
+ try:
50
+ with open(config_path, "w", encoding="utf-8") as f:
51
+ json.dump(config_data, f, indent=2)
52
+ except Exception as e:
53
+ print(f"Warning: Failed to save configuration: {str(e)}")
20
54
 
21
55
  @property
22
56
  def workspace_dir(self) -> str:
@@ -47,15 +81,39 @@ class Config:
47
81
 
48
82
  self._workspace_dir = path
49
83
 
84
+ @property
85
+ def verbose(self) -> bool:
86
+ """Get the verbose mode status."""
87
+ return self._verbose
88
+
89
+ @verbose.setter
90
+ def verbose(self, value: bool) -> None:
91
+ """Set the verbose mode status."""
92
+ self._verbose = value
93
+
94
+ # For backward compatibility
50
95
  @property
51
96
  def debug_mode(self) -> bool:
52
- """Get the debug mode status."""
53
- return self._debug_mode
97
+ """Get the debug mode status (alias for verbose)."""
98
+ return self._verbose
54
99
 
55
100
  @debug_mode.setter
56
101
  def debug_mode(self, value: bool) -> None:
57
- """Set the debug mode status."""
58
- self._debug_mode = value
102
+ """Set the debug mode status (alias for verbose)."""
103
+ self._verbose = value
104
+
105
+ @property
106
+ def history_context_count(self) -> int:
107
+ """Get the number of previous conversations to include in context."""
108
+ return self._history_context_count
109
+
110
+ @history_context_count.setter
111
+ def history_context_count(self, count: int) -> None:
112
+ """Set the number of previous conversations to include in context."""
113
+ if count < 0:
114
+ raise ValueError("History context count must be a non-negative integer")
115
+ self._history_context_count = count
116
+ self._save_config()
59
117
 
60
118
  # Convenience function to get the config instance
61
119
  def get_config() -> Config:
@@ -1,6 +1,4 @@
1
- You are a helpful AI assistant, working in a repository.
2
- Answer the user's questions accurately and concisely.
3
-
4
- When using str_replace_editor be aware that our files starting path is "." .
5
-
6
- Before performing any action, always check the structure of the project for paths that might be related to the request.
1
+ You are an expert software engineer, working in a project.
2
+ When using str_replace_editor be aware that our files starting path is "." .
3
+
4
+ If creating or editing files with a large number of lines, organize them into smaller files.
janito/token_report.py CHANGED
@@ -1,73 +1,145 @@
1
- """
2
- Module for generating token usage reports.
3
- """
4
-
5
- from rich.console import Console
6
- from claudine.token_tracking import MODEL_PRICING, DEFAULT_MODEL
7
-
8
- def generate_token_report(agent, verbose=False):
9
- """
10
- Generate a token usage report.
11
-
12
- Args:
13
- agent: The Claude agent instance
14
- verbose: Whether to show detailed token usage information
15
-
16
- Returns:
17
- None - prints the report to the console
18
- """
19
- console = Console()
20
- usage = agent.get_token_usage()
21
- text_usage = usage.text_usage
22
- tools_usage = usage.tools_usage
23
-
24
- if verbose:
25
- total_usage = usage.total_usage
26
-
27
- # Get the pricing model
28
- pricing = MODEL_PRICING.get(DEFAULT_MODEL)
29
-
30
- # Calculate costs manually
31
- text_input_cost = pricing.input_tokens.calculate_cost(text_usage.input_tokens)
32
- text_output_cost = pricing.output_tokens.calculate_cost(text_usage.output_tokens)
33
- tools_input_cost = pricing.input_tokens.calculate_cost(tools_usage.input_tokens)
34
- tools_output_cost = pricing.output_tokens.calculate_cost(tools_usage.output_tokens)
35
-
36
- # Format costs
37
- format_cost = lambda cost: f"{cost * 100:.2f}¢" if cost < 1.0 else f"${cost:.6f}"
38
-
39
- console.print("\n[bold blue]Detailed Token Usage:[/bold blue]")
40
- console.print(f"Text Input tokens: {text_usage.input_tokens}")
41
- console.print(f"Text Output tokens: {text_usage.output_tokens}")
42
- console.print(f"Text Total tokens: {text_usage.input_tokens + text_usage.output_tokens}")
43
- console.print(f"Tool Input tokens: {tools_usage.input_tokens}")
44
- console.print(f"Tool Output tokens: {tools_usage.output_tokens}")
45
- console.print(f"Tool Total tokens: {tools_usage.input_tokens + tools_usage.output_tokens}")
46
- console.print(f"Total tokens: {total_usage.input_tokens + total_usage.output_tokens}")
47
-
48
- console.print("\n[bold blue]Pricing Information:[/bold blue]")
49
- console.print(f"Input pricing: ${pricing.input_tokens.cost_per_million_tokens}/million tokens")
50
- console.print(f"Output pricing: ${pricing.output_tokens.cost_per_million_tokens}/million tokens")
51
- console.print(f"Text Input cost: {format_cost(text_input_cost)}")
52
- console.print(f"Text Output cost: {format_cost(text_output_cost)}")
53
- console.print(f"Text Total cost: {format_cost(text_input_cost + text_output_cost)}")
54
- console.print(f"Tool Input cost: {format_cost(tools_input_cost)}")
55
- console.print(f"Tool Output cost: {format_cost(tools_output_cost)}")
56
- console.print(f"Tool Total cost: {format_cost(tools_input_cost + tools_output_cost)}")
57
- console.print(f"Total cost: {format_cost(text_input_cost + text_output_cost + tools_input_cost + tools_output_cost)}")
58
-
59
- # Display per-tool breakdown if available
60
- if usage.by_tool:
61
- console.print("\n[bold blue]Per-Tool Breakdown:[/bold blue]")
62
- for tool_name, tool_usage in usage.by_tool.items():
63
- tool_input_cost = pricing.input_tokens.calculate_cost(tool_usage.input_tokens)
64
- tool_output_cost = pricing.output_tokens.calculate_cost(tool_usage.output_tokens)
65
- console.print(f" Tool: {tool_name}")
66
- console.print(f" Input tokens: {tool_usage.input_tokens}")
67
- console.print(f" Output tokens: {tool_usage.output_tokens}")
68
- console.print(f" Total tokens: {tool_usage.input_tokens + tool_usage.output_tokens}")
69
- console.print(f" Total cost: {format_cost(tool_input_cost + tool_output_cost)}")
70
- else:
71
- total_tokens = text_usage.input_tokens + text_usage.output_tokens + tools_usage.input_tokens + tools_usage.output_tokens
72
- cost_info = agent.get_cost()
73
- console.rule(f"[bold blue]Total tokens: {total_tokens} | Cost: {cost_info.format_total_cost()}[/bold blue]")
1
+ """
2
+ Module for generating token usage reports.
3
+ """
4
+
5
+ from rich.console import Console
6
+ from rich.panel import Panel
7
+
8
+ def generate_token_report(agent, verbose=False):
9
+ """
10
+ Generate a token usage report.
11
+
12
+ Args:
13
+ agent: The Claude agent instance
14
+ verbose: Whether to show detailed token usage information
15
+
16
+ Returns:
17
+ None - prints the report to the console
18
+ """
19
+ console = Console()
20
+ usage = agent.get_tokens()
21
+ cost = agent.get_token_cost()
22
+
23
+ text_usage = usage.text_usage
24
+ tools_usage = usage.tools_usage
25
+
26
+ if verbose:
27
+ total_usage = usage.total_usage
28
+
29
+ # Get costs from the cost object
30
+ text_input_cost = cost.input_cost
31
+ text_output_cost = cost.output_cost
32
+ text_cache_creation_cost = cost.cache_creation_cost
33
+ text_cache_read_cost = cost.cache_read_cost
34
+
35
+ tools_input_cost = cost.input_cost
36
+ tools_output_cost = cost.output_cost
37
+ tools_cache_creation_cost = cost.cache_creation_cost
38
+ tools_cache_read_cost = cost.cache_read_cost
39
+
40
+ # Format costs
41
+ format_cost = lambda cost: f"{cost * 100:.2f}¢" if cost < 1.0 else f"${cost:.6f}"
42
+
43
+ console.print("\n[bold blue]Detailed Token Usage:[/bold blue]")
44
+ console.print(f"Text Input tokens: {text_usage.input_tokens}")
45
+ console.print(f"Text Output tokens: {text_usage.output_tokens}")
46
+ console.print(f"Text Cache Creation tokens: {text_usage.cache_creation_input_tokens}")
47
+ console.print(f"Text Cache Read tokens: {text_usage.cache_read_input_tokens}")
48
+ console.print(f"Text Total tokens: {text_usage.input_tokens + text_usage.output_tokens + text_usage.cache_creation_input_tokens + text_usage.cache_read_input_tokens}")
49
+
50
+ console.print(f"Tool Input tokens: {tools_usage.input_tokens}")
51
+ console.print(f"Tool Output tokens: {tools_usage.output_tokens}")
52
+ console.print(f"Tool Cache Creation tokens: {tools_usage.cache_creation_input_tokens}")
53
+ console.print(f"Tool Cache Read tokens: {tools_usage.cache_read_input_tokens}")
54
+ console.print(f"Tool Total tokens: {tools_usage.input_tokens + tools_usage.output_tokens + tools_usage.cache_creation_input_tokens + tools_usage.cache_read_input_tokens}")
55
+
56
+ console.print(f"Total tokens: {total_usage.input_tokens + total_usage.output_tokens + total_usage.cache_creation_input_tokens + total_usage.cache_read_input_tokens}")
57
+
58
+ console.print("\n[bold blue]Pricing Information:[/bold blue]")
59
+ console.print(f"Text Input cost: {format_cost(text_input_cost)}")
60
+ console.print(f"Text Output cost: {format_cost(text_output_cost)}")
61
+ console.print(f"Text Cache Creation cost: {format_cost(text_cache_creation_cost)}")
62
+ console.print(f"Text Cache Read cost: {format_cost(text_cache_read_cost)}")
63
+ console.print(f"Text Total cost: {format_cost(text_input_cost + text_output_cost + text_cache_creation_cost + text_cache_read_cost)}")
64
+
65
+ console.print(f"Tool Input cost: {format_cost(tools_input_cost)}")
66
+ console.print(f"Tool Output cost: {format_cost(tools_output_cost)}")
67
+ console.print(f"Tool Cache Creation cost: {format_cost(tools_cache_creation_cost)}")
68
+ console.print(f"Tool Cache Read cost: {format_cost(tools_cache_read_cost)}")
69
+ console.print(f"Tool Total cost: {format_cost(tools_input_cost + tools_output_cost + tools_cache_creation_cost + tools_cache_read_cost)}")
70
+
71
+ console.print(f"Total cost: {format_cost(text_input_cost + text_output_cost + text_cache_creation_cost + text_cache_read_cost + tools_input_cost + tools_output_cost + tools_cache_creation_cost + tools_cache_read_cost)}")
72
+
73
+ # Show cache delta if available
74
+ if hasattr(cost, 'cache_delta') and cost.cache_delta:
75
+ cache_delta = cost.cache_delta
76
+ console.print(f"\n[bold green]Cache Savings:[/bold green] {format_cost(cache_delta)}")
77
+
78
+ # Calculate percentage savings
79
+ total_cost_without_cache = cost.total_cost + cache_delta
80
+ if total_cost_without_cache > 0:
81
+ savings_percentage = (cache_delta / total_cost_without_cache) * 100
82
+ console.print(f"[bold green]Cache Savings Percentage:[/bold green] {savings_percentage:.2f}%")
83
+ console.print(f"[bold green]Cost without cache:[/bold green] {format_cost(total_cost_without_cache)}")
84
+ console.print(f"[bold green]Cost with cache:[/bold green] {format_cost(cost.total_cost)}")
85
+
86
+ # Per-tool breakdown
87
+ if usage.by_tool:
88
+ console.print("\n[bold blue]Per-Tool Breakdown:[/bold blue]")
89
+ try:
90
+ if hasattr(cost, 'by_tool') and cost.by_tool:
91
+ for tool_name, tool_usage in usage.by_tool.items():
92
+ tool_input_cost = cost.by_tool[tool_name].input_cost
93
+ tool_output_cost = cost.by_tool[tool_name].output_cost
94
+ tool_cache_creation_cost = cost.by_tool[tool_name].cache_creation_cost
95
+ tool_cache_read_cost = cost.by_tool[tool_name].cache_read_cost
96
+ tool_total_cost = tool_input_cost + tool_output_cost + tool_cache_creation_cost + tool_cache_read_cost
97
+
98
+ console.print(f" Tool: {tool_name}")
99
+ console.print(f" Input tokens: {tool_usage.input_tokens}")
100
+ console.print(f" Output tokens: {tool_usage.output_tokens}")
101
+ console.print(f" Cache Creation tokens: {tool_usage.cache_creation_input_tokens}")
102
+ console.print(f" Cache Read tokens: {tool_usage.cache_read_input_tokens}")
103
+ console.print(f" Total tokens: {tool_usage.input_tokens + tool_usage.output_tokens + tool_usage.cache_creation_input_tokens + tool_usage.cache_read_input_tokens}")
104
+ console.print(f" Total cost: {format_cost(tool_total_cost)}")
105
+ else:
106
+ # Calculate costs manually for each tool if cost.by_tool is not available
107
+ for tool_name, tool_usage in usage.by_tool.items():
108
+ # Estimate costs based on overall pricing
109
+ total_tokens = tool_usage.input_tokens + tool_usage.output_tokens + tool_usage.cache_creation_input_tokens + tool_usage.cache_read_input_tokens
110
+ estimated_cost = (total_tokens / (usage.total_usage.total_tokens + usage.total_usage.total_cache_tokens)) * cost.total_cost if usage.total_usage.total_tokens > 0 else 0
111
+
112
+ console.print(f" Tool: {tool_name}")
113
+ console.print(f" Input tokens: {tool_usage.input_tokens}")
114
+ console.print(f" Output tokens: {tool_usage.output_tokens}")
115
+ console.print(f" Cache Creation tokens: {tool_usage.cache_creation_input_tokens}")
116
+ console.print(f" Cache Read tokens: {tool_usage.cache_read_input_tokens}")
117
+ console.print(f" Total tokens: {tool_usage.input_tokens + tool_usage.output_tokens + tool_usage.cache_creation_input_tokens + tool_usage.cache_read_input_tokens}")
118
+ console.print(f" Total cost: {format_cost(estimated_cost)}")
119
+ except Exception as e:
120
+ console.print(f"Error: {str(e)}")
121
+ else:
122
+ total_tokens = (text_usage.input_tokens + text_usage.output_tokens +
123
+ text_usage.cache_creation_input_tokens + text_usage.cache_read_input_tokens +
124
+ tools_usage.input_tokens + tools_usage.output_tokens +
125
+ tools_usage.cache_creation_input_tokens + tools_usage.cache_read_input_tokens)
126
+
127
+ # Format costs
128
+ format_cost = lambda cost: f"{cost * 100:.2f}¢" if cost < 1.0 else f"${cost:.6f}"
129
+
130
+ # Prepare summary message
131
+ summary = f"Total tokens: {total_tokens} | Cost: {format_cost(cost.total_cost)}"
132
+
133
+ # Add cache savings if available
134
+ if hasattr(cost, 'cache_delta') and cost.cache_delta != 0:
135
+ cache_delta = cost.cache_delta
136
+ total_cost_without_cache = cost.total_cost + cache_delta
137
+ savings_percentage = 0
138
+ if total_cost_without_cache > 0:
139
+ savings_percentage = (cache_delta / total_cost_without_cache) * 100
140
+
141
+ summary += f" | Cache savings: {format_cost(cache_delta)} ({savings_percentage:.1f}%)"
142
+
143
+ # Display with a rule
144
+ console.rule("[blue]Token Usage[/blue]")
145
+ console.print(f"[blue]{summary}[/blue]", justify="center")
janito/tools/__init__.py CHANGED
@@ -1,10 +1,21 @@
1
- """
2
- Janito tools package.
3
- """
4
-
5
- from .str_replace_editor import str_replace_editor
6
- from .find_files import find_files
7
- from .delete_file import delete_file
8
- from .search_text import search_text
9
-
10
- __all__ = ["str_replace_editor", "find_files", "delete_file", "search_text"]
1
+ """
2
+ Janito tools package.
3
+ """
4
+
5
+ from .str_replace_editor import str_replace_editor
6
+ from .find_files import find_files
7
+ from .delete_file import delete_file
8
+ from .search_text import search_text
9
+ from .replace_file import replace_file
10
+ from .prompt_user import prompt_user
11
+
12
+ __all__ = ["str_replace_editor", "find_files", "delete_file", "search_text", "replace_file", "prompt_user", "get_tools"]
13
+
14
+ def get_tools():
15
+ """
16
+ Get a list of all available tools.
17
+
18
+ Returns:
19
+ List of tool functions (excluding str_replace_editor which is passed separately)
20
+ """
21
+ return [find_files, delete_file, search_text, replace_file, prompt_user]
janito/tools/bash.py ADDED
@@ -0,0 +1,22 @@
1
+ from typing import Optional
2
+ from typing import Tuple
3
+
4
+
5
+ def bash_tool(command: str, restart: Optional[bool] = False) -> Tuple[str, bool]:
6
+ """
7
+ A simple bash tool implementation that just prints the command and restart flag.
8
+
9
+ Args:
10
+ command: The bash command to execute
11
+ restart: Whether to restart the process
12
+
13
+ Returns:
14
+ A tuple containing (output message, is_error flag)
15
+ """
16
+ # In a real implementation, this would execute the command
17
+ # Here we just print what would be executed
18
+ output = f"Would execute bash command: '{command}'\n"
19
+ output += f"Restart flag is set to: {restart}"
20
+
21
+ # Return the output with is_error=False
22
+ return output, False