janito 0.15.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. janito/__init__.py +1 -5
  2. janito/__main__.py +3 -5
  3. janito/agent/__init__.py +1 -0
  4. janito/agent/agent.py +96 -0
  5. janito/agent/config.py +113 -0
  6. janito/agent/config_defaults.py +10 -0
  7. janito/agent/conversation.py +107 -0
  8. janito/agent/queued_tool_handler.py +16 -0
  9. janito/agent/runtime_config.py +30 -0
  10. janito/agent/tool_handler.py +124 -0
  11. janito/agent/tools/__init__.py +11 -0
  12. janito/agent/tools/ask_user.py +63 -0
  13. janito/agent/tools/bash_exec.py +58 -0
  14. janito/agent/tools/create_directory.py +19 -0
  15. janito/agent/tools/create_file.py +43 -0
  16. janito/agent/tools/fetch_url.py +48 -0
  17. janito/agent/tools/file_str_replace.py +48 -0
  18. janito/agent/tools/find_files.py +37 -0
  19. janito/agent/tools/gitignore_utils.py +40 -0
  20. janito/agent/tools/move_file.py +37 -0
  21. janito/agent/tools/remove_file.py +19 -0
  22. janito/agent/tools/rich_live.py +37 -0
  23. janito/agent/tools/rich_utils.py +31 -0
  24. janito/agent/tools/search_text.py +41 -0
  25. janito/agent/tools/view_file.py +34 -0
  26. janito/cli/__init__.py +0 -6
  27. janito/cli/_print_config.py +68 -0
  28. janito/cli/_utils.py +8 -0
  29. janito/cli/arg_parser.py +26 -0
  30. janito/cli/config_commands.py +131 -0
  31. janito/cli/logging_setup.py +27 -0
  32. janito/cli/main.py +39 -0
  33. janito/cli/runner.py +138 -0
  34. janito/cli_chat_shell/__init__.py +1 -0
  35. janito/cli_chat_shell/chat_loop.py +148 -0
  36. janito/cli_chat_shell/commands.py +202 -0
  37. janito/cli_chat_shell/config_shell.py +75 -0
  38. janito/cli_chat_shell/load_prompt.py +15 -0
  39. janito/cli_chat_shell/session_manager.py +60 -0
  40. janito/cli_chat_shell/ui.py +136 -0
  41. janito/render_prompt.py +12 -0
  42. janito/templates/system_instructions.j2 +38 -0
  43. janito/web/__init__.py +0 -0
  44. janito/web/__main__.py +17 -0
  45. janito/web/app.py +132 -0
  46. janito-1.0.1.dist-info/METADATA +144 -0
  47. janito-1.0.1.dist-info/RECORD +51 -0
  48. {janito-0.15.0.dist-info → janito-1.0.1.dist-info}/WHEEL +2 -1
  49. janito-1.0.1.dist-info/entry_points.txt +2 -0
  50. {janito-0.15.0.dist-info → janito-1.0.1.dist-info}/licenses/LICENSE +2 -2
  51. janito-1.0.1.dist-info/top_level.txt +1 -0
  52. janito/callbacks.py +0 -34
  53. janito/cli/agent/__init__.py +0 -7
  54. janito/cli/agent/conversation.py +0 -149
  55. janito/cli/agent/initialization.py +0 -168
  56. janito/cli/agent/query.py +0 -112
  57. janito/cli/agent.py +0 -12
  58. janito/cli/app.py +0 -178
  59. janito/cli/commands/__init__.py +0 -12
  60. janito/cli/commands/config.py +0 -30
  61. janito/cli/commands/history.py +0 -119
  62. janito/cli/commands/profile.py +0 -93
  63. janito/cli/commands/validation.py +0 -24
  64. janito/cli/commands/workspace.py +0 -31
  65. janito/cli/commands.py +0 -12
  66. janito/cli/output.py +0 -29
  67. janito/cli/utils.py +0 -22
  68. janito/config/README.md +0 -104
  69. janito/config/__init__.py +0 -16
  70. janito/config/cli/__init__.py +0 -28
  71. janito/config/cli/commands.py +0 -397
  72. janito/config/cli/validators.py +0 -77
  73. janito/config/core/__init__.py +0 -23
  74. janito/config/core/file_operations.py +0 -90
  75. janito/config/core/properties.py +0 -316
  76. janito/config/core/singleton.py +0 -282
  77. janito/config/profiles/__init__.py +0 -8
  78. janito/config/profiles/definitions.py +0 -38
  79. janito/config/profiles/manager.py +0 -80
  80. janito/data/instructions_template.txt +0 -34
  81. janito/token_report.py +0 -154
  82. janito/tools/__init__.py +0 -44
  83. janito/tools/bash/bash.py +0 -157
  84. janito/tools/bash/unix_persistent_bash.py +0 -215
  85. janito/tools/bash/win_persistent_bash.py +0 -341
  86. janito/tools/decorators.py +0 -90
  87. janito/tools/delete_file.py +0 -65
  88. janito/tools/fetch_webpage/__init__.py +0 -23
  89. janito/tools/fetch_webpage/core.py +0 -182
  90. janito/tools/find_files.py +0 -220
  91. janito/tools/move_file.py +0 -72
  92. janito/tools/prompt_user.py +0 -57
  93. janito/tools/replace_file.py +0 -63
  94. janito/tools/rich_console.py +0 -176
  95. janito/tools/search_text.py +0 -226
  96. janito/tools/str_replace_editor/__init__.py +0 -6
  97. janito/tools/str_replace_editor/editor.py +0 -55
  98. janito/tools/str_replace_editor/handlers/__init__.py +0 -16
  99. janito/tools/str_replace_editor/handlers/create.py +0 -60
  100. janito/tools/str_replace_editor/handlers/insert.py +0 -100
  101. janito/tools/str_replace_editor/handlers/str_replace.py +0 -94
  102. janito/tools/str_replace_editor/handlers/undo.py +0 -64
  103. janito/tools/str_replace_editor/handlers/view.py +0 -165
  104. janito/tools/str_replace_editor/utils.py +0 -33
  105. janito/tools/think.py +0 -37
  106. janito/tools/usage_tracker.py +0 -137
  107. janito-0.15.0.dist-info/METADATA +0 -481
  108. janito-0.15.0.dist-info/RECORD +0 -64
  109. janito-0.15.0.dist-info/entry_points.txt +0 -2
@@ -1,65 +0,0 @@
1
- """
2
- Tool for deleting files through the claudine agent.
3
- """
4
- from pathlib import Path
5
- from typing import Tuple
6
- from janito.tools.str_replace_editor.utils import normalize_path
7
- from janito.tools.rich_console import print_info, print_success, print_error
8
- from janito.tools.usage_tracker import track_usage, get_tracker
9
-
10
-
11
- @track_usage('files_deleted')
12
- def delete_file(
13
- file_path: str,
14
- ) -> Tuple[str, bool]:
15
- """
16
- Delete an existing file.
17
-
18
- Args:
19
- file_path: Path to the file to delete, relative to the workspace directory
20
-
21
- Returns:
22
- A tuple containing (message, is_error)
23
- """
24
- print_info(f"Deleting file {file_path}", "Delete Operation")
25
- # Store the original path for display purposes
26
- original_path = file_path
27
-
28
- # Normalize the file path (converts to absolute path)
29
- path = normalize_path(file_path)
30
-
31
- # Convert to Path object for better path handling
32
- path_obj = Path(path)
33
-
34
- # Check if the file exists
35
- if not path_obj.exists():
36
- error_msg = f"File {original_path} does not exist."
37
- print_error(error_msg, "Error")
38
- return (error_msg, True)
39
-
40
- # Check if it's a directory
41
- if path_obj.is_dir():
42
- error_msg = f"{original_path} is a directory, not a file. Use delete_directory for directories."
43
- print_error(error_msg, "Error")
44
- return (error_msg, True)
45
-
46
- # Delete the file
47
- try:
48
- # Count the number of lines in the file before deleting
49
- try:
50
- with open(path_obj, 'r', encoding='utf-8') as f:
51
- line_count = len(f.readlines())
52
- # Track negative line delta for deleted file
53
- get_tracker().increment('lines_delta', -line_count)
54
- except Exception:
55
- # If we can't read the file, we can't count lines
56
- pass
57
-
58
- path_obj.unlink()
59
- success_msg = f"Successfully deleted file {original_path}"
60
- print_success("", "Success")
61
- return (success_msg, False)
62
- except Exception as e:
63
- error_msg = f"Error deleting file {original_path}: {str(e)}"
64
- print_error(error_msg, "Error")
65
- return (error_msg, True)
@@ -1,23 +0,0 @@
1
- """
2
- Webpage Content Extractor Package
3
-
4
- A simplified tool for extracting clean, relevant content from web pages
5
- for processing with LLMs. Features include:
6
- - Streamlined content extraction using BeautifulSoup
7
- - Clean HTML text extraction
8
- - Efficient content chunking
9
-
10
- Dependencies:
11
- - requests
12
- - beautifulsoup4
13
-
14
- Author: Claude (Anthropic)
15
- """
16
-
17
- from janito.tools.fetch_webpage.core import fetch_webpage, fetch_and_extract, chunk_content
18
-
19
- __all__ = [
20
- 'fetch_webpage',
21
- 'fetch_and_extract',
22
- 'chunk_content'
23
- ]
@@ -1,182 +0,0 @@
1
- """
2
- Core functionality for fetching web pages and extracting content.
3
- """
4
-
5
- import requests
6
- from typing import Tuple, List, Optional
7
- from urllib.parse import urlparse, unquote
8
- from janito.tools.rich_console import print_info, print_success, print_error, print_warning
9
- from janito.tools.usage_tracker import track_usage
10
- from bs4 import BeautifulSoup
11
-
12
- @track_usage('web_requests')
13
- def fetch_webpage(url: str, headers: dict = None, timeout: int = 30, max_size: int = 5000000) -> Tuple[str, bool]:
14
- """
15
- Fetch the content of a web page from a given URL.
16
-
17
- Args:
18
- url: The URL of the web page to fetch
19
- headers: Optional HTTP headers to include in the request (default: None)
20
- timeout: Request timeout in seconds (default: 30)
21
- max_size: Maximum size in bytes to download (default: 5MB)
22
-
23
- Returns:
24
- A tuple containing (message, is_error)
25
- """
26
- print_info(f"Fetching content from URL: {url}", "Web Fetch")
27
-
28
- try:
29
- # Set default headers if none provided
30
- if headers is None:
31
- headers = {
32
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
33
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
34
- 'Accept-Language': 'en-US,en;q=0.9',
35
- 'Accept-Encoding': 'gzip, deflate, br',
36
- 'Referer': 'https://www.google.com/',
37
- 'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
38
- 'Sec-Ch-Ua-Mobile': '?0',
39
- 'Sec-Ch-Ua-Platform': '"Windows"',
40
- 'Sec-Fetch-Dest': 'document',
41
- 'Sec-Fetch-Mode': 'navigate',
42
- 'Sec-Fetch-Site': 'cross-site',
43
- 'Sec-Fetch-User': '?1',
44
- 'Upgrade-Insecure-Requests': '1'
45
- }
46
-
47
- # Make the HTTP request with streaming enabled
48
- response = requests.get(url, headers=headers, timeout=timeout, stream=True)
49
-
50
- # Raise an exception for HTTP errors
51
- response.raise_for_status()
52
-
53
- # Check content length before downloading fully
54
- content_length = response.headers.get('Content-Length')
55
- if content_length and int(content_length) > max_size:
56
- warning_msg = f"Web Fetch: Content size ({int(content_length)/1000000:.1f}MB) exceeds max size ({max_size/1000000:.1f}MB). Aborting download."
57
- print_warning(warning_msg)
58
- return warning_msg, True
59
-
60
- # Download content with size limit
61
- content_bytes = b''
62
- for chunk in response.iter_content(chunk_size=1024 * 1024): # 1MB chunks
63
- content_bytes += chunk
64
- if len(content_bytes) > max_size:
65
- warning_msg = f"Web Fetch: Download exceeded max size ({max_size/1000000:.1f}MB). Truncating."
66
- print_warning(warning_msg)
67
- break
68
-
69
- # Get the content
70
- content = content_bytes.decode('utf-8', errors='replace')
71
-
72
- print_success(f"Successfully fetched content ({len(content)} bytes)", "Web Fetch")
73
-
74
- # Return the full content
75
- return content, False
76
-
77
- except requests.exceptions.RequestException as e:
78
- error_msg = f"Error fetching web page: {str(e)}"
79
- print_error(error_msg, "Web Fetch Error")
80
- return error_msg, True
81
-
82
-
83
- @track_usage('web_content')
84
- def fetch_and_extract(url: str, max_length: int = 10000, keywords: List[str] = None) -> Tuple[str, bool]:
85
- """
86
- Fetch a webpage and extract its main content using BeautifulSoup.
87
-
88
- Args:
89
- url: The URL to fetch
90
- max_length: Maximum length of text to return
91
- keywords: Optional list of URL-encoded keywords to prioritize content containing these terms
92
-
93
- Returns:
94
- A tuple containing (extracted_content, is_error)
95
- """
96
- html_content, is_error = fetch_webpage(url)
97
-
98
- if is_error:
99
- return html_content, True
100
-
101
- try:
102
- # Use BeautifulSoup to parse and extract content
103
- soup = BeautifulSoup(html_content, 'html.parser')
104
-
105
- # Remove script, style, and other non-content elements
106
- for element in soup(['script', 'style', 'header', 'footer', 'nav', 'aside']):
107
- element.decompose()
108
-
109
- # URL-decode keywords if provided
110
- decoded_keywords = []
111
- if keywords:
112
- decoded_keywords = [unquote(keyword).lower() for keyword in keywords]
113
- print_info(f"Prioritizing content with keywords: {', '.join(decoded_keywords)}", "Content Extraction")
114
-
115
- # Extract text from main content elements
116
- paragraphs = []
117
- keyword_paragraphs = []
118
-
119
- for tag in soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'article', 'section', 'div']):
120
- text = tag.get_text(strip=True)
121
- if text and len(text) > 20: # Skip very short pieces that might be UI elements
122
- # Check if the paragraph contains any of the keywords
123
- if decoded_keywords and any(keyword in text.lower() for keyword in decoded_keywords):
124
- keyword_paragraphs.append(text)
125
- else:
126
- paragraphs.append(text)
127
-
128
- # Join paragraphs, prioritizing those with keywords
129
- if keyword_paragraphs:
130
- print_info(f"Found {len(keyword_paragraphs)} paragraphs containing keywords", "Content Extraction")
131
- extracted_text = "\n\n".join(keyword_paragraphs + paragraphs)
132
- else:
133
- extracted_text = "\n\n".join(paragraphs)
134
-
135
- # If no paragraphs found, fall back to all text
136
- if not extracted_text or len(extracted_text) < 100:
137
- extracted_text = soup.get_text(separator='\n\n')
138
-
139
- # Clean up extra whitespace
140
- extracted_text = ' '.join(extracted_text.split())
141
- extracted_text = extracted_text.replace('. ', '.\n\n')
142
-
143
- # Truncate if needed
144
- if len(extracted_text) > max_length:
145
- print_info(f"Truncating content from {len(extracted_text)} to {max_length} characters", "Content Extraction")
146
- extracted_text = extracted_text[:max_length] + "..."
147
-
148
- print_success(f"Successfully extracted {len(extracted_text)} characters of content", "Content Extraction")
149
- return extracted_text, False
150
-
151
- except Exception as e:
152
- error_msg = f"Error extracting content: {str(e)}"
153
- print_error(error_msg, "Content Extraction Error")
154
- return error_msg, True
155
-
156
-
157
- def chunk_content(content: str, chunk_size: int = 2000, overlap: int = 200) -> List[str]:
158
- """
159
- Split content into overlapping chunks of a specified size.
160
-
161
- Args:
162
- content: The text content to chunk
163
- chunk_size: Maximum size of each chunk
164
- overlap: Number of characters to overlap between chunks
165
-
166
- Returns:
167
- List of text chunks
168
- """
169
- if not content:
170
- return []
171
-
172
- chunks = []
173
-
174
- # Simple chunking with overlap
175
- for i in range(0, len(content), chunk_size - overlap):
176
- chunk_end = min(i + chunk_size, len(content))
177
- chunks.append(content[i:chunk_end])
178
- if chunk_end == len(content):
179
- break
180
-
181
- print_success(f"Content successfully chunked into {len(chunks)} parts", "Content Chunking")
182
- return chunks
@@ -1,220 +0,0 @@
1
- import os
2
- import glob
3
- import fnmatch # Still needed for gitignore pattern matching
4
- from typing import List, Tuple
5
- from janito.tools.rich_console import print_info, print_success, print_error, print_warning
6
-
7
-
8
- def find_files(pattern: str, root_dir: str = ".", recursive: bool = True) -> Tuple[str, bool]:
9
- """
10
- Find files whose path matches a glob pattern.
11
- Files in .gitignore are always ignored.
12
-
13
- Args:
14
- pattern: pattern to match file paths against (e.g., "*.py", "*/tools/*.py")
15
- root_dir: root directory to start search from (default: current directory)
16
- recursive: Whether to search recursively in subdirectories (default: True)
17
-
18
- Returns:
19
- A tuple containing (message, is_error)
20
- """
21
- # Print start message without newline
22
- print_info(
23
- f"Finding files matching path pattern {pattern}, on {root_dir} " +
24
- f"({'recursive' if recursive else 'non-recursive'})",
25
- title="Text Search"
26
- )
27
- try:
28
- # Convert to absolute path if relative
29
- abs_root = os.path.abspath(root_dir)
30
-
31
- if not os.path.isdir(abs_root):
32
- error_msg = f"Error: Directory '{root_dir}' does not exist"
33
- print_error(error_msg, title="File Operation")
34
- return error_msg, True
35
-
36
- matching_files = []
37
-
38
- # Get gitignore patterns
39
- ignored_patterns = _get_gitignore_patterns(abs_root)
40
-
41
- # Check if the search pattern itself is in the gitignore
42
- if _is_pattern_ignored(pattern, ignored_patterns):
43
- warning_msg = f"Warning: The search pattern '{pattern}' matches patterns in .gitignore. Search may not yield expected results."
44
- print_error(warning_msg, title="Text Search")
45
- return warning_msg, True
46
-
47
- # Use glob for pattern matching
48
- # Construct the glob pattern with the root directory
49
- glob_pattern = os.path.join(abs_root, pattern) if not pattern.startswith(os.path.sep) else pattern
50
-
51
- # Use recursive glob if needed
52
- if recursive:
53
- # Use ** pattern for recursive search if not already in the pattern
54
- if '**' not in glob_pattern:
55
- # Check if the pattern already has a directory component
56
- if os.path.sep in pattern or '/' in pattern:
57
- # Pattern already has directory component, keep as is
58
- pass
59
- else:
60
- # Add ** to search in all subdirectories
61
- glob_pattern = os.path.join(abs_root, '**', pattern)
62
-
63
- # Use recursive=True for Python 3.5+ glob
64
- glob_files = glob.glob(glob_pattern, recursive=True)
65
- else:
66
- # Non-recursive mode - only search in the specified directory
67
- glob_files = glob.glob(glob_pattern)
68
-
69
- # Process the glob results
70
- for file_path in glob_files:
71
- # Skip directories
72
- if not os.path.isfile(file_path):
73
- continue
74
-
75
- # Skip ignored files
76
- if _is_ignored(file_path, ignored_patterns, abs_root):
77
- continue
78
-
79
- # Convert to relative path from root_dir
80
- rel_path = os.path.relpath(file_path, abs_root)
81
- matching_files.append(rel_path)
82
-
83
- # Sort the files for consistent output
84
- matching_files.sort()
85
-
86
- if matching_files:
87
- file_list = "\n- ".join(matching_files)
88
- result_msg = f"{len(matching_files)} files found"
89
- print_success(result_msg, title="Search Results")
90
- return file_list, False
91
- else:
92
- result_msg = "No files found"
93
- print_success(result_msg, title="Search Results")
94
- return result_msg, False
95
-
96
- except Exception as e:
97
- error_msg = f"Error finding files: {str(e)}"
98
- print_error(error_msg, title="Text Search")
99
- return error_msg, True
100
-
101
-
102
- def _get_gitignore_patterns(root_dir: str) -> List[str]:
103
- """
104
- Get patterns from .gitignore files.
105
-
106
- Args:
107
- root_dir: Root directory to start from
108
-
109
- Returns:
110
- List of gitignore patterns
111
- """
112
- patterns = []
113
-
114
- # Check for .gitignore in the root directory
115
- gitignore_path = os.path.join(root_dir, '.gitignore')
116
- if os.path.isfile(gitignore_path):
117
- try:
118
- with open(gitignore_path, 'r', encoding='utf-8') as f:
119
- for line in f:
120
- line = line.strip()
121
- # Skip empty lines and comments
122
- if line and not line.startswith('#'):
123
- patterns.append(line)
124
- except Exception:
125
- pass
126
-
127
- # Add common patterns that are always ignored
128
- common_patterns = [
129
- '.git/', '.venv/', 'venv/', '__pycache__/', '*.pyc',
130
- '*.pyo', '*.pyd', '.DS_Store', '*.so', '*.egg-info/'
131
- ]
132
- patterns.extend(common_patterns)
133
-
134
- return patterns
135
-
136
-
137
- def _is_pattern_ignored(search_pattern: str, gitignore_patterns: List[str]) -> bool:
138
- """
139
- Check if a search pattern conflicts with gitignore patterns.
140
-
141
- Args:
142
- search_pattern: The search pattern to check
143
- gitignore_patterns: List of gitignore patterns
144
-
145
- Returns:
146
- True if the search pattern conflicts with gitignore patterns, False otherwise
147
- """
148
- # Remove any directory part from the search pattern
149
- pattern_only = search_pattern.split('/')[-1]
150
-
151
- for git_pattern in gitignore_patterns:
152
- # Skip negation patterns
153
- if git_pattern.startswith('!'):
154
- continue
155
-
156
- # Remove trailing slash for directory patterns
157
- if git_pattern.endswith('/'):
158
- git_pattern = git_pattern[:-1]
159
-
160
- # Direct match
161
- if git_pattern == search_pattern or git_pattern == pattern_only:
162
- return True
163
-
164
- # Check if the gitignore pattern is a prefix of the search pattern
165
- if search_pattern.startswith(git_pattern) and (
166
- len(git_pattern) == len(search_pattern) or
167
- search_pattern[len(git_pattern)] in ['/', '\\']
168
- ):
169
- return True
170
-
171
- # Check for wildcard matches
172
- if '*' in git_pattern or '?' in git_pattern:
173
- # Check if the search pattern would be caught by this gitignore pattern
174
- if fnmatch.fnmatch(search_pattern, git_pattern) or fnmatch.fnmatch(pattern_only, git_pattern):
175
- return True
176
-
177
- return False
178
-
179
-
180
- def _is_ignored(path: str, patterns: List[str], root_dir: str) -> bool:
181
- """
182
- Check if a path should be ignored based on gitignore patterns.
183
-
184
- Args:
185
- path: Path to check
186
- patterns: List of gitignore patterns
187
- root_dir: Root directory for relative paths
188
-
189
- Returns:
190
- True if the path should be ignored, False otherwise
191
- """
192
- # Get the relative path from the root directory
193
- rel_path = os.path.relpath(path, root_dir)
194
-
195
- # Convert to forward slashes for consistency with gitignore patterns
196
- rel_path = rel_path.replace(os.sep, '/')
197
-
198
- # Add trailing slash for directories
199
- if os.path.isdir(path) and not rel_path.endswith('/'):
200
- rel_path += '/'
201
-
202
- for pattern in patterns:
203
- # Handle negation patterns (those starting with !)
204
- if pattern.startswith('!'):
205
- continue # Skip negation patterns for simplicity
206
-
207
- # Handle directory-specific patterns (those ending with /)
208
- if pattern.endswith('/'):
209
- if os.path.isdir(path) and fnmatch.fnmatch(rel_path, pattern + '*'):
210
- return True
211
-
212
- # Handle file patterns
213
- if fnmatch.fnmatch(rel_path, pattern):
214
- return True
215
-
216
- # Handle patterns without wildcards as path prefixes
217
- if '*' not in pattern and '?' not in pattern and rel_path.startswith(pattern):
218
- return True
219
-
220
- return False
janito/tools/move_file.py DELETED
@@ -1,72 +0,0 @@
1
- """
2
- Tool for moving files through the claudine agent.
3
- """
4
- import shutil
5
- from pathlib import Path
6
- from typing import Tuple
7
- from janito.tools.str_replace_editor.utils import normalize_path
8
- from janito.tools.rich_console import print_info, print_success, print_error
9
- from janito.tools.usage_tracker import track_usage
10
-
11
-
12
- @track_usage('files_moved')
13
- def move_file(
14
- source_path: str,
15
- destination_path: str,
16
- ) -> Tuple[str, bool]:
17
- """
18
- Move a file from source path to destination path.
19
-
20
- Args:
21
- source_path: Path to the file to move, relative to the workspace directory
22
- destination_path: Destination path where the file should be moved, relative to the workspace directory
23
-
24
- Returns:
25
- A tuple containing (message, is_error)
26
- """
27
- print_info(f"Moving file from {source_path} to {destination_path}", "Move Operation")
28
-
29
- # Store the original paths for display purposes
30
- original_source = source_path
31
- original_destination = destination_path
32
-
33
- # Normalize the file paths (converts to absolute paths)
34
- source = normalize_path(source_path)
35
- destination = normalize_path(destination_path)
36
-
37
- # Convert to Path objects for better path handling
38
- source_obj = Path(source)
39
- destination_obj = Path(destination)
40
-
41
- # Check if the source file exists
42
- if not source_obj.exists():
43
- error_msg = f"Source file {original_source} does not exist."
44
- print_error(error_msg, "Error")
45
- return (error_msg, True)
46
-
47
- # Check if source is a directory
48
- if source_obj.is_dir():
49
- error_msg = f"{original_source} is a directory, not a file. Use move_directory for directories."
50
- print_error(error_msg, "Error")
51
- return (error_msg, True)
52
-
53
- # Check if destination directory exists
54
- if not destination_obj.parent.exists():
55
- try:
56
- destination_obj.parent.mkdir(parents=True, exist_ok=True)
57
- print_info(f"Created directory: {destination_obj.parent}", "Info")
58
- except Exception as e:
59
- error_msg = f"Error creating destination directory: {str(e)}"
60
- print_error(error_msg, "Error")
61
- return (error_msg, True)
62
-
63
- # Move the file
64
- try:
65
- shutil.move(str(source_obj), str(destination_obj))
66
- success_msg = f"Successfully moved file from {original_source} to {original_destination}"
67
- print_success("", "Success")
68
- return (success_msg, False)
69
- except Exception as e:
70
- error_msg = f"Error moving file from {original_source} to {original_destination}: {str(e)}"
71
- print_error(error_msg, "Error")
72
- return (error_msg, True)
@@ -1,57 +0,0 @@
1
- """
2
- Tool for prompting the user for input through the claudine agent.
3
- """
4
- from typing import Tuple, List
5
- import sys
6
- import textwrap
7
- from rich.console import Console
8
- from janito.tools.rich_console import print_info, print_error, print_warning
9
- from janito.tools.usage_tracker import track_usage
10
- from janito.cli.utils import get_stdin_termination_hint
11
-
12
-
13
- console = Console()
14
-
15
- @track_usage('user_prompts')
16
- def prompt_user(
17
- prompt_text: str,
18
- ) -> Tuple[str, bool]:
19
- """
20
- Prompt the user for input and return their response.
21
- Displays the prompt in a panel and uses stdin for input.
22
-
23
- Args:
24
- prompt_text: Text to display to the user as a prompt
25
-
26
- Returns:
27
- A tuple containing (user_response, is_error)
28
- """
29
- try:
30
- # Display the prompt with ASCII header
31
- console.print("\n" + "="*50)
32
- console.print("USER PROMPT")
33
- console.print("="*50)
34
- console.print(prompt_text)
35
-
36
- # Show input instructions with stdin termination hint
37
- termination_hint = get_stdin_termination_hint().replace("[bold yellow]", "").replace("[/bold yellow]", "")
38
- print_info(f"Enter your response below. {termination_hint}\n", "Input Instructions")
39
-
40
- # Read input from stdin
41
- lines = []
42
- for line in sys.stdin:
43
- lines.append(line.rstrip('\n'))
44
-
45
- # Join the lines with newlines to preserve the multiline format
46
- user_response = "\n".join(lines)
47
-
48
- # If no input was provided, return a message
49
- if not user_response.strip():
50
- print_warning("No input was provided. Empty Input.")
51
- return ("", False)
52
-
53
- return (user_response, False)
54
- except Exception as e:
55
- error_msg = f"Error prompting user: {str(e)}"
56
- print_error(error_msg, "Prompt Error")
57
- return (error_msg, True)
@@ -1,63 +0,0 @@
1
- """
2
- Replace file tool that overwrites a file with new content.
3
- """
4
- import os
5
- from typing import Tuple
6
-
7
- from janito.tools.decorators import tool
8
- from janito.tools.rich_console import print_info, print_success, print_error
9
- from janito.tools.usage_tracker import track_usage, get_tracker
10
-
11
-
12
- @tool
13
- @track_usage('files_modified')
14
- def replace_file(file_path: str, new_content: str) -> Tuple[str, bool]:
15
- """
16
- Replace an existing file with new content.
17
-
18
- Args:
19
- file_path: Path to the file to replace, relative to the workspace directory
20
- new_content: New content to write to the file
21
-
22
- Returns:
23
- A tuple containing (message, is_error)
24
- """
25
- try:
26
- print_info(f"Replacing file '{file_path}'", "File Operation")
27
-
28
- # Convert relative path to absolute path
29
- abs_path = os.path.abspath(file_path)
30
-
31
- # Check if file exists
32
- if not os.path.isfile(abs_path):
33
- error_msg = f"Error: File '{file_path}' does not exist"
34
- print_error(error_msg, "File Error")
35
- return error_msg, True
36
-
37
- # Read the original content to calculate line delta
38
- try:
39
- with open(abs_path, 'r', encoding='utf-8') as f:
40
- old_content = f.read()
41
-
42
- # Calculate line delta
43
- old_lines_count = len(old_content.splitlines()) if old_content else 0
44
- new_lines_count = len(new_content.splitlines()) if new_content else 0
45
- line_delta = new_lines_count - old_lines_count
46
-
47
- # Track line delta
48
- get_tracker().increment('lines_delta', line_delta)
49
- except Exception:
50
- # If we can't read the file, we can't calculate line delta
51
- pass
52
-
53
- # Write new content to the file
54
- with open(abs_path, 'w', encoding='utf-8') as f:
55
- f.write(new_content)
56
-
57
- success_msg = f"Successfully replaced file '{file_path}'"
58
- print_success(success_msg, "Success")
59
- return success_msg, False
60
- except Exception as e:
61
- error_msg = f"Error replacing file '{file_path}': {str(e)}"
62
- print_error(error_msg, "Error")
63
- return error_msg, True