janito 3.5.0__py3-none-any.whl → 3.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. janito/README.md +4 -4
  2. janito/cli/cli_commands/list_tools.py +6 -1
  3. janito/cli/main_cli.py +1 -1
  4. janito/cli/rich_terminal_reporter.py +2 -2
  5. janito/data/blocked.txt +96 -0
  6. janito/docs/GETTING_STARTED.md +8 -9
  7. janito/plugins/core/system/tools/run_bash_command.py +23 -2
  8. janito/plugins/core/system/tools/run_powershell_command.py +16 -0
  9. janito/plugins/tools/__init__.py +34 -5
  10. janito/plugins/tools/core/__init__.py +65 -0
  11. janito/plugins/tools/{ask_user.py → core/ask_user.py} +1 -1
  12. janito/plugins/tools/{copy_file.py → core/copy_file.py} +1 -1
  13. janito/plugins/tools/{create_directory.py → core/create_directory.py} +1 -1
  14. janito/plugins/tools/{create_file.py → core/create_file.py} +2 -2
  15. janito/plugins/tools/{delete_text_in_file.py → core/delete_text_in_file.py} +2 -2
  16. janito/plugins/tools/{fetch_url.py → core/fetch_url.py} +18 -21
  17. janito/plugins/tools/{find_files.py → core/find_files.py} +1 -1
  18. janito/plugins/tools/{get_file_outline → core/get_file_outline}/core.py +1 -1
  19. janito/plugins/tools/{move_file.py → core/move_file.py} +1 -1
  20. janito/plugins/tools/{open_html_in_browser.py → core/open_html_in_browser.py} +1 -1
  21. janito/plugins/tools/{open_url.py → core/open_url.py} +1 -1
  22. janito/plugins/tools/{python_code_run.py → core/python_code_run.py} +21 -5
  23. janito/plugins/tools/{python_command_run.py → core/python_command_run.py} +19 -3
  24. janito/plugins/tools/{python_file_run.py → core/python_file_run.py} +19 -3
  25. janito/plugins/tools/{read_chart.py → core/read_chart.py} +1 -1
  26. janito/plugins/tools/{read_files.py → core/read_files.py} +1 -1
  27. janito/plugins/tools/{remove_directory.py → core/remove_directory.py} +1 -1
  28. janito/plugins/tools/{remove_file.py → core/remove_file.py} +1 -1
  29. janito/plugins/tools/{replace_text_in_file.py → core/replace_text_in_file.py} +2 -2
  30. janito/plugins/tools/{run_bash_command.py → core/run_bash_command.py} +1 -1
  31. janito/plugins/tools/{run_powershell_command.py → core/run_powershell_command.py} +1 -1
  32. janito/plugins/tools/{search_text → core/search_text}/core.py +1 -1
  33. janito/plugins/tools/{show_image.py → core/show_image.py} +1 -1
  34. janito/plugins/tools/{show_image_grid.py → core/show_image_grid.py} +1 -1
  35. janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/core.py +1 -1
  36. janito/plugins/tools/{view_file.py → core/view_file.py} +1 -1
  37. janito/plugins/web/webtools/__init__.py +1 -11
  38. janito/providers/moonshot/model_info.py +15 -4
  39. janito/providers/moonshot/provider.py +1 -1
  40. janito/tools/__init__.py +14 -4
  41. janito/tools/adapters/local/copy_file.py +0 -0
  42. janito/tools/adapters/local/create_file.py +0 -0
  43. janito/tools/adapters/local/move_file.py +0 -0
  44. janito/tools/adapters/local/remove_file.py +0 -0
  45. janito/tools/adapters/local/view_file.py +0 -0
  46. janito/tools/blocked_sites.py +74 -0
  47. {janito-3.5.0.dist-info → janito-3.6.1.dist-info}/METADATA +7 -8
  48. {janito-3.5.0.dist-info → janito-3.6.1.dist-info}/RECORD +73 -70
  49. janito/plugins/dev/pythondev/tools/python_code_run.py +0 -172
  50. janito/plugins/dev/pythondev/tools/python_command_run.py +0 -171
  51. janito/plugins/dev/pythondev/tools/python_file_run.py +0 -172
  52. janito/plugins/tools/core_tools_plugin.py +0 -87
  53. janito/plugins/web/webtools/tools/fetch_url.py +0 -458
  54. /janito/plugins/tools/{decorators.py → core/decorators.py} +0 -0
  55. /janito/plugins/tools/{get_file_outline → core/get_file_outline}/__init__.py +0 -0
  56. /janito/plugins/tools/{get_file_outline → core/get_file_outline}/java_outline.py +0 -0
  57. /janito/plugins/tools/{get_file_outline → core/get_file_outline}/markdown_outline.py +0 -0
  58. /janito/plugins/tools/{get_file_outline → core/get_file_outline}/python_outline.py +0 -0
  59. /janito/plugins/tools/{get_file_outline → core/get_file_outline}/search_outline.py +0 -0
  60. /janito/plugins/tools/{search_text → core/search_text}/__init__.py +0 -0
  61. /janito/plugins/tools/{search_text → core/search_text}/match_lines.py +0 -0
  62. /janito/plugins/tools/{search_text → core/search_text}/pattern_utils.py +0 -0
  63. /janito/plugins/tools/{search_text → core/search_text}/traverse_directory.py +0 -0
  64. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/__init__.py +0 -0
  65. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/css_validator.py +0 -0
  66. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/html_validator.py +0 -0
  67. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/jinja2_validator.py +0 -0
  68. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/js_validator.py +0 -0
  69. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/json_validator.py +0 -0
  70. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/markdown_validator.py +0 -0
  71. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/ps1_validator.py +0 -0
  72. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/python_validator.py +0 -0
  73. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/xml_validator.py +0 -0
  74. /janito/plugins/tools/{validate_file_syntax → core/validate_file_syntax}/yaml_validator.py +0 -0
  75. {janito-3.5.0.dist-info → janito-3.6.1.dist-info}/WHEEL +0 -0
  76. {janito-3.5.0.dist-info → janito-3.6.1.dist-info}/entry_points.txt +0 -0
  77. {janito-3.5.0.dist-info → janito-3.6.1.dist-info}/licenses/LICENSE +0 -0
  78. {janito-3.5.0.dist-info → janito-3.6.1.dist-info}/top_level.txt +0 -0
@@ -1,458 +0,0 @@
1
- import requests
2
- import time
3
- import os
4
- import json
5
- from pathlib import Path
6
- from bs4 import BeautifulSoup
7
- from typing import Dict, Any, Optional
8
- from janito.tools.adapters.local.adapter import register_local_tool
9
- from janito.tools.tool_base import ToolBase, ToolPermissions
10
- from janito.report_events import ReportAction
11
- from janito.i18n import tr
12
- from janito.tools.tool_utils import pluralize
13
- from janito.tools.loop_protection_decorator import protect_against_loops
14
-
15
-
16
- @register_local_tool
17
- class FetchUrlTool(ToolBase):
18
- """
19
- Fetch the content of a web page and extract its text.
20
-
21
- This tool implements a **session-based caching mechanism** that provides
22
- **in-memory caching** for the lifetime of the tool instance. URLs are cached
23
- in RAM during the session, providing instant access to previously fetched
24
- content without making additional HTTP requests.
25
-
26
- **Session Cache Behavior:**
27
- - **Lifetime**: Cache exists for the lifetime of the FetchUrlTool instance
28
- - **Scope**: In-memory (RAM) cache, not persisted to disk
29
- - **Storage**: Successful responses are cached as raw HTML content
30
- - **Key**: Cache key is the exact URL string
31
- - **Invalidation**: Cache is automatically cleared when the tool instance is destroyed
32
- - **Performance**: Subsequent requests for the same URL return instantly
33
-
34
- **Error Cache Behavior:**
35
- - HTTP 403 errors: Cached for 24 hours (more permanent)
36
- - HTTP 404 errors: Cached for 1 hour (temporary)
37
- - Other 4xx errors: Cached for 30 minutes
38
- - 5xx errors: Not cached (retried on each request)
39
-
40
- Args:
41
- url (str): The URL of the web page to fetch.
42
- search_strings (list[str], optional): Strings to search for in the page content.
43
- max_length (int, optional): Maximum number of characters to return. Defaults to 5000.
44
- max_lines (int, optional): Maximum number of lines to return. Defaults to 200.
45
- context_chars (int, optional): Characters of context around search matches. Defaults to 400.
46
- timeout (int, optional): Timeout in seconds for the HTTP request. Defaults to 10.
47
- save_to_file (str, optional): File path to save the full resource content. If provided,
48
- the complete response will be saved to this file instead of being processed.
49
- headers (Dict[str, str], optional): Custom HTTP headers to send with the request.
50
- cookies (Dict[str, str], optional): Custom cookies to send with the request.
51
- follow_redirects (bool, optional): Whether to follow HTTP redirects. Defaults to True.
52
- Returns:
53
- str: Extracted text content from the web page, or a warning message. Example:
54
- - "<main text content...>"
55
- - "No lines found for the provided search strings."
56
- - "Warning: Empty URL provided. Operation skipped."
57
- """
58
-
59
- permissions = ToolPermissions(read=True)
60
- tool_name = "fetch_url"
61
-
62
- def __init__(self):
63
- super().__init__()
64
- self.cache_dir = Path.home() / ".janito" / "cache" / "fetch_url"
65
- self.cache_dir.mkdir(parents=True, exist_ok=True)
66
- self.cache_file = self.cache_dir / "error_cache.json"
67
- self.session_cache = (
68
- {}
69
- ) # In-memory session cache - lifetime matches tool instance
70
- self._load_cache()
71
-
72
- # Browser-like session with cookies and headers
73
- self.session = requests.Session()
74
- self.session.headers.update(
75
- {
76
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
77
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
78
- "Accept-Language": "en-US,en;q=0.5",
79
- "Accept-Encoding": "gzip, deflate, br",
80
- "DNT": "1",
81
- "Connection": "keep-alive",
82
- "Upgrade-Insecure-Requests": "1",
83
- }
84
- )
85
-
86
- # Load cookies from disk if they exist
87
- self.cookies_file = self.cache_dir / "cookies.json"
88
- self._load_cookies()
89
-
90
- def _load_cache(self):
91
- """Load error cache from disk."""
92
- if self.cache_file.exists():
93
- try:
94
- with open(self.cache_file, "r", encoding="utf-8") as f:
95
- self.error_cache = json.load(f)
96
- except (json.JSONDecodeError, IOError):
97
- self.error_cache = {}
98
- else:
99
- self.error_cache = {}
100
-
101
- def _save_cache(self):
102
- """Save error cache to disk."""
103
- try:
104
- with open(self.cache_file, "w", encoding="utf-8") as f:
105
- json.dump(self.error_cache, f, indent=2)
106
- except IOError:
107
- pass # Silently fail if we can't write cache
108
-
109
- def _load_cookies(self):
110
- """Load cookies from disk into session."""
111
- if self.cookies_file.exists():
112
- try:
113
- with open(self.cookies_file, "r", encoding="utf-8") as f:
114
- cookies_data = json.load(f)
115
- for cookie in cookies_data:
116
- self.session.cookies.set(**cookie)
117
- except (json.JSONDecodeError, IOError):
118
- pass # Silently fail if we can't load cookies
119
-
120
- def _save_cookies(self):
121
- """Save session cookies to disk."""
122
- try:
123
- cookies_data = []
124
- for cookie in self.session.cookies:
125
- cookies_data.append(
126
- {
127
- "name": cookie.name,
128
- "value": cookie.value,
129
- "domain": cookie.domain,
130
- "path": cookie.path,
131
- }
132
- )
133
- with open(self.cookies_file, "w", encoding="utf-8") as f:
134
- json.dump(cookies_data, f, indent=2)
135
- except IOError:
136
- pass # Silently fail if we can't write cookies
137
-
138
- def _get_cached_error(self, url: str) -> tuple[str, bool]:
139
- """
140
- Check if we have a cached error for this URL.
141
- Returns (error_message, is_cached) tuple.
142
- """
143
- if url not in self.error_cache:
144
- return None, False
145
-
146
- entry = self.error_cache[url]
147
- current_time = time.time()
148
-
149
- # Different expiration times for different status codes
150
- if entry["status_code"] == 403:
151
- # Cache 403 errors for 24 hours (more permanent)
152
- expiration_time = 24 * 3600
153
- elif entry["status_code"] == 404:
154
- # Cache 404 errors for 1 hour (more temporary)
155
- expiration_time = 3600
156
- else:
157
- # Cache other 4xx errors for 30 minutes
158
- expiration_time = 1800
159
-
160
- if current_time - entry["timestamp"] > expiration_time:
161
- # Cache expired, remove it
162
- del self.error_cache[url]
163
- self._save_cache()
164
- return None, False
165
-
166
- return entry["message"], True
167
-
168
- def _cache_error(self, url: str, status_code: int, message: str):
169
- """Cache an HTTP error response."""
170
- self.error_cache[url] = {
171
- "status_code": status_code,
172
- "message": message,
173
- "timestamp": time.time(),
174
- }
175
- self._save_cache()
176
-
177
- def _fetch_url_content(
178
- self,
179
- url: str,
180
- timeout: int = 10,
181
- headers: Optional[Dict[str, str]] = None,
182
- cookies: Optional[Dict[str, str]] = None,
183
- follow_redirects: bool = True,
184
- ) -> str:
185
- """Fetch URL content and handle HTTP errors.
186
-
187
- Implements two-tier caching:
188
- 1. Session cache: In-memory cache for successful responses (lifetime = tool instance)
189
- 2. Error cache: Persistent disk cache for HTTP errors with different expiration times
190
-
191
- Also implements URL whitelist checking and browser-like behavior.
192
- """
193
- # Check URL whitelist
194
- from janito.tools.url_whitelist import get_url_whitelist_manager
195
-
196
- whitelist_manager = get_url_whitelist_manager()
197
-
198
- if not whitelist_manager.is_url_allowed(url):
199
- error_message = tr("Blocked")
200
- self.report_error(
201
- tr("❗ Blocked"),
202
- ReportAction.READ,
203
- )
204
- return error_message
205
-
206
- # Check session cache first
207
- if url in self.session_cache:
208
- return self.session_cache[url]
209
-
210
- # Check persistent cache for known errors
211
- cached_error, is_cached = self._get_cached_error(url)
212
- if cached_error:
213
- self.report_warning(
214
- tr(
215
- "ℹ️ Using cached HTTP error for URL: {url}",
216
- url=url,
217
- ),
218
- ReportAction.READ,
219
- )
220
- return cached_error
221
-
222
- try:
223
- # Merge custom headers with default ones
224
- request_headers = self.session.headers.copy()
225
- if headers:
226
- request_headers.update(headers)
227
-
228
- # Merge custom cookies
229
- if cookies:
230
- self.session.cookies.update(cookies)
231
-
232
- response = self.session.get(
233
- url,
234
- timeout=timeout,
235
- headers=request_headers,
236
- allow_redirects=follow_redirects,
237
- )
238
- response.raise_for_status()
239
- content = response.text
240
-
241
- # Save cookies after successful request
242
- self._save_cookies()
243
-
244
- # Cache successful responses in session cache
245
- self.session_cache[url] = content
246
- return content
247
- except requests.exceptions.HTTPError as http_err:
248
- status_code = http_err.response.status_code if http_err.response else None
249
-
250
- # Map status codes to descriptions
251
- status_descriptions = {
252
- 400: "Bad Request",
253
- 401: "Unauthorized",
254
- 403: "Forbidden",
255
- 404: "Not Found",
256
- 405: "Method Not Allowed",
257
- 408: "Request Timeout",
258
- 409: "Conflict",
259
- 410: "Gone",
260
- 413: "Payload Too Large",
261
- 414: "URI Too Long",
262
- 415: "Unsupported Media Type",
263
- 429: "Too Many Requests",
264
- 500: "Internal Server Error",
265
- 501: "Not Implemented",
266
- 502: "Bad Gateway",
267
- 503: "Service Unavailable",
268
- 504: "Gateway Timeout",
269
- 505: "HTTP Version Not Supported",
270
- }
271
-
272
- if status_code and 400 <= status_code < 500:
273
- description = status_descriptions.get(status_code, "Client Error")
274
- error_message = f"HTTP {status_code} {description}"
275
- # Cache 403 and 404 errors
276
- if status_code in [403, 404]:
277
- self._cache_error(url, status_code, error_message)
278
-
279
- self.report_error(
280
- f"❗ HTTP {status_code} {description}",
281
- ReportAction.READ,
282
- )
283
- return error_message
284
- else:
285
- status_code_str = str(status_code) if status_code else "Error"
286
- description = status_descriptions.get(
287
- status_code,
288
- (
289
- "Server Error"
290
- if status_code and status_code >= 500
291
- else "Client Error"
292
- ),
293
- )
294
- self.report_error(
295
- f"❗ HTTP {status_code_str} {description}",
296
- ReportAction.READ,
297
- )
298
- return f"HTTP {status_code_str} {description}"
299
- except requests.exceptions.ConnectionError as conn_err:
300
- self.report_error(
301
- "❗ Network Error",
302
- ReportAction.READ,
303
- )
304
- return f"Network Error: Failed to connect to {url}"
305
- except requests.exceptions.Timeout as timeout_err:
306
- self.report_error(
307
- "❗ Timeout Error",
308
- ReportAction.READ,
309
- )
310
- return f"Timeout Error: Request timed out after {timeout} seconds"
311
- except requests.exceptions.RequestException as req_err:
312
- self.report_error(
313
- "❗ Request Error",
314
- ReportAction.READ,
315
- )
316
- return f"Request Error: {str(req_err)}"
317
- except Exception as err:
318
- self.report_error(
319
- "❗ Error fetching URL",
320
- ReportAction.READ,
321
- )
322
- return f"Error: {str(err)}"
323
-
324
- def _extract_and_clean_text(self, html_content: str) -> str:
325
- """Extract and clean text from HTML content."""
326
- soup = BeautifulSoup(html_content, "html.parser")
327
- text = soup.get_text(separator="\n")
328
-
329
- # Clean up excessive whitespace
330
- lines = [line.strip() for line in text.splitlines() if line.strip()]
331
- return "\n".join(lines)
332
-
333
- def _filter_by_search_strings(
334
- self, text: str, search_strings: list[str], context_chars: int
335
- ) -> str:
336
- """Filter text by search strings with context."""
337
- filtered = []
338
- for s in search_strings:
339
- idx = text.find(s)
340
- if idx != -1:
341
- start = max(0, idx - context_chars)
342
- end = min(len(text), idx + len(s) + context_chars)
343
- snippet = text[start:end]
344
- filtered.append(snippet)
345
-
346
- if filtered:
347
- return "\n...\n".join(filtered)
348
- else:
349
- return tr("No lines found for the provided search strings.")
350
-
351
- def _apply_limits(self, text: str, max_length: int, max_lines: int) -> str:
352
- """Apply length and line limits to text."""
353
- # Apply length limit
354
- if len(text) > max_length:
355
- text = text[:max_length] + "\n... (content truncated due to length limit)"
356
-
357
- # Apply line limit
358
- lines = text.splitlines()
359
- if len(lines) > max_lines:
360
- text = (
361
- "\n".join(lines[:max_lines])
362
- + "\n... (content truncated due to line limit)"
363
- )
364
-
365
- return text
366
-
367
- @protect_against_loops(max_calls=5, time_window=10.0, key_field="url")
368
- def run(
369
- self,
370
- url: str,
371
- search_strings: list[str] = None,
372
- max_length: int = 5000,
373
- max_lines: int = 200,
374
- context_chars: int = 400,
375
- timeout: int = 10,
376
- save_to_file: str = None,
377
- headers: Dict[str, str] = None,
378
- cookies: Dict[str, str] = None,
379
- follow_redirects: bool = True,
380
- ) -> str:
381
- if not url.strip():
382
- self.report_warning(tr("ℹ️ Empty URL provided."), ReportAction.READ)
383
- return tr("Warning: Empty URL provided. Operation skipped.")
384
-
385
- self.report_action(tr("🌐 Fetch URL '{url}' ...", url=url), ReportAction.READ)
386
-
387
- # Check if we should save to file
388
- if save_to_file:
389
- html_content = self._fetch_url_content(
390
- url,
391
- timeout=timeout,
392
- headers=headers,
393
- cookies=cookies,
394
- follow_redirects=follow_redirects,
395
- )
396
- if (
397
- html_content.startswith("HTTP Error ")
398
- or html_content == "Error"
399
- or html_content == "Blocked"
400
- ):
401
- return html_content
402
-
403
- try:
404
- with open(save_to_file, "w", encoding="utf-8") as f:
405
- f.write(html_content)
406
- file_size = len(html_content)
407
- self.report_success(
408
- tr(
409
- "✅ Saved {size} bytes to {file}",
410
- size=file_size,
411
- file=save_to_file,
412
- ),
413
- ReportAction.READ,
414
- )
415
- return tr("Successfully saved content to: {file}", file=save_to_file)
416
- except IOError as e:
417
- error_msg = tr("Error saving to file: {error}", error=str(e))
418
- self.report_error(error_msg, ReportAction.READ)
419
- return error_msg
420
-
421
- # Normal processing path
422
- html_content = self._fetch_url_content(
423
- url,
424
- timeout=timeout,
425
- headers=headers,
426
- cookies=cookies,
427
- follow_redirects=follow_redirects,
428
- )
429
- if (
430
- html_content.startswith("HTTP Error ")
431
- or html_content == "Error"
432
- or html_content == "Blocked"
433
- ):
434
- return html_content
435
-
436
- # Extract and clean text
437
- text = self._extract_and_clean_text(html_content)
438
-
439
- # Filter by search strings if provided
440
- if search_strings:
441
- text = self._filter_by_search_strings(text, search_strings, context_chars)
442
-
443
- # Apply limits
444
- text = self._apply_limits(text, max_length, max_lines)
445
-
446
- # Report success
447
- num_lines = len(text.splitlines())
448
- total_chars = len(text)
449
- self.report_success(
450
- tr(
451
- "✅ {num_lines} {line_word}, {chars} chars",
452
- num_lines=num_lines,
453
- line_word=pluralize("line", num_lines),
454
- chars=total_chars,
455
- ),
456
- ReportAction.READ,
457
- )
458
- return text
File without changes