janito 3.12.1__py3-none-any.whl → 3.12.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janito/agent/setup_agent.py +378 -377
- janito/cli/chat_mode/session.py +505 -505
- janito/cli/cli_commands/list_profiles.py +104 -107
- janito/cli/cli_commands/show_system_prompt.py +166 -166
- janito/cli/core/runner.py +250 -266
- janito/cli/main_cli.py +520 -519
- janito/cli/single_shot_mode/handler.py +167 -167
- janito/llm/__init__.py +6 -5
- janito/llm/driver.py +290 -254
- janito/llm/response_cache.py +57 -0
- janito/plugins/builtin.py +64 -88
- janito/plugins/tools/local/__init__.py +82 -80
- janito/plugins/tools/local/markdown_view.py +94 -0
- janito/plugins/tools/local/read_files.py +1 -1
- janito/plugins/tools/local/replace_text_in_file.py +1 -1
- janito/plugins/tools/local/search_text/core.py +2 -2
- janito/plugins/tools/local/show_image.py +119 -74
- janito/plugins/tools/local/show_image_grid.py +134 -76
- janito/plugins/tools/local/view_file.py +3 -3
- janito/providers/alibaba/model_info.py +136 -105
- janito/providers/alibaba/provider.py +104 -104
- {janito-3.12.1.dist-info → janito-3.12.3.dist-info}/METADATA +1 -1
- {janito-3.12.1.dist-info → janito-3.12.3.dist-info}/RECORD +27 -25
- {janito-3.12.1.dist-info → janito-3.12.3.dist-info}/WHEEL +0 -0
- {janito-3.12.1.dist-info → janito-3.12.3.dist-info}/entry_points.txt +0 -0
- {janito-3.12.1.dist-info → janito-3.12.3.dist-info}/licenses/LICENSE +0 -0
- {janito-3.12.1.dist-info → janito-3.12.3.dist-info}/top_level.txt +0 -0
@@ -1,76 +1,134 @@
|
|
1
|
-
from janito.tools.tool_base import ToolBase, ToolPermissions
|
2
|
-
from janito.report_events import ReportAction
|
3
|
-
from janito.plugins.tools.local.adapter import register_local_tool
|
4
|
-
from janito.i18n import tr
|
5
|
-
from janito.tools.loop_protection_decorator import protect_against_loops
|
6
|
-
from typing import Sequence
|
7
|
-
|
8
|
-
|
9
|
-
@register_local_tool
|
10
|
-
class ShowImageGridTool(ToolBase):
|
11
|
-
"""Display multiple images in a grid inline in the terminal using rich.
|
12
|
-
|
13
|
-
Args:
|
14
|
-
paths (list[str]): List of image file paths.
|
15
|
-
columns (int, optional): Number of columns in the grid. Default: 2.
|
16
|
-
width (int, optional): Max width for each image cell. Default: None (auto).
|
17
|
-
height (int, optional): Max height for each image cell. Default: None (auto).
|
18
|
-
preserve_aspect (bool, optional): Preserve aspect ratio. Default: True.
|
19
|
-
|
20
|
-
Returns:
|
21
|
-
str: Status string summarizing the grid display.
|
22
|
-
"""
|
23
|
-
|
24
|
-
permissions = ToolPermissions(read=True)
|
25
|
-
tool_name = "show_image_grid"
|
26
|
-
|
27
|
-
@protect_against_loops(max_calls=5, time_window=10.0, key_field="paths")
|
28
|
-
def run(
|
29
|
-
self,
|
30
|
-
paths: Sequence[str],
|
31
|
-
columns: int = 2,
|
32
|
-
width: int | None = None,
|
33
|
-
height: int | None = None,
|
34
|
-
preserve_aspect: bool = True,
|
35
|
-
) -> str:
|
36
|
-
from janito.tools.path_utils import expand_path
|
37
|
-
from janito.tools.tool_utils import display_path
|
38
|
-
import os
|
39
|
-
|
40
|
-
try:
|
41
|
-
from rich.console import Console
|
42
|
-
from rich.columns import Columns
|
43
|
-
from PIL import Image as PILImage
|
44
|
-
from rich.panel import Panel
|
45
|
-
except Exception as e:
|
46
|
-
msg = tr("⚠️ Missing dependency: PIL/Pillow ({error})", error=e)
|
47
|
-
self.report_error(msg)
|
48
|
-
return msg
|
49
|
-
|
50
|
-
if not paths:
|
51
|
-
return tr("No images provided")
|
52
|
-
|
53
|
-
self.report_action(tr("🖼️ Show image grid ({n} images)", n=len(paths)), ReportAction.READ)
|
54
|
-
|
55
|
-
console = Console()
|
56
|
-
images = []
|
57
|
-
shown = 0
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
1
|
+
from janito.tools.tool_base import ToolBase, ToolPermissions
|
2
|
+
from janito.report_events import ReportAction
|
3
|
+
from janito.plugins.tools.local.adapter import register_local_tool
|
4
|
+
from janito.i18n import tr
|
5
|
+
from janito.tools.loop_protection_decorator import protect_against_loops
|
6
|
+
from typing import Sequence
|
7
|
+
|
8
|
+
|
9
|
+
@register_local_tool
|
10
|
+
class ShowImageGridTool(ToolBase):
|
11
|
+
"""Display multiple images in a grid inline in the terminal using rich.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
paths (list[str]): List of image file paths.
|
15
|
+
columns (int, optional): Number of columns in the grid. Default: 2.
|
16
|
+
width (int, optional): Max width for each image cell. Default: None (auto).
|
17
|
+
height (int, optional): Max height for each image cell. Default: None (auto).
|
18
|
+
preserve_aspect (bool, optional): Preserve aspect ratio. Default: True.
|
19
|
+
|
20
|
+
Returns:
|
21
|
+
str: Status string summarizing the grid display.
|
22
|
+
"""
|
23
|
+
|
24
|
+
permissions = ToolPermissions(read=True)
|
25
|
+
tool_name = "show_image_grid"
|
26
|
+
|
27
|
+
@protect_against_loops(max_calls=5, time_window=10.0, key_field="paths")
|
28
|
+
def run(
|
29
|
+
self,
|
30
|
+
paths: Sequence[str],
|
31
|
+
columns: int = 2,
|
32
|
+
width: int | None = None,
|
33
|
+
height: int | None = None,
|
34
|
+
preserve_aspect: bool = True,
|
35
|
+
) -> str:
|
36
|
+
from janito.tools.path_utils import expand_path
|
37
|
+
from janito.tools.tool_utils import display_path
|
38
|
+
import os
|
39
|
+
|
40
|
+
try:
|
41
|
+
from rich.console import Console
|
42
|
+
from rich.columns import Columns
|
43
|
+
from PIL import Image as PILImage
|
44
|
+
from rich.panel import Panel
|
45
|
+
except Exception as e:
|
46
|
+
msg = tr("⚠️ Missing dependency: PIL/Pillow ({error})", error=e)
|
47
|
+
self.report_error(msg)
|
48
|
+
return msg
|
49
|
+
|
50
|
+
if not paths:
|
51
|
+
return tr("No images provided")
|
52
|
+
|
53
|
+
self.report_action(tr("🖼️ Show image grid ({n} images)", n=len(paths)), ReportAction.READ)
|
54
|
+
|
55
|
+
console = Console()
|
56
|
+
images = []
|
57
|
+
shown = 0
|
58
|
+
|
59
|
+
# Import numpy for ASCII art conversion
|
60
|
+
import numpy as np
|
61
|
+
|
62
|
+
# Create ASCII art representation function
|
63
|
+
def image_to_ascii(image, target_width=20, target_height=10):
|
64
|
+
try:
|
65
|
+
# Convert to grayscale and resize
|
66
|
+
img_gray = image.convert('L')
|
67
|
+
img_resized = img_gray.resize((target_width, target_height))
|
68
|
+
|
69
|
+
# Convert to numpy array
|
70
|
+
pixels = np.array(img_resized)
|
71
|
+
|
72
|
+
# ASCII characters from dark to light
|
73
|
+
ascii_chars = "@%#*+=-:. "
|
74
|
+
|
75
|
+
# Normalize pixels to ASCII range
|
76
|
+
ascii_art = ""
|
77
|
+
for row in pixels:
|
78
|
+
for pixel in row:
|
79
|
+
# Map pixel value (0-255) to ASCII index
|
80
|
+
ascii_index = int((pixel / 255) * (len(ascii_chars) - 1))
|
81
|
+
ascii_art += ascii_chars[ascii_index]
|
82
|
+
ascii_art += "\n"
|
83
|
+
|
84
|
+
return ascii_art.strip()
|
85
|
+
except Exception:
|
86
|
+
return None
|
87
|
+
|
88
|
+
for p in paths:
|
89
|
+
fp = expand_path(p)
|
90
|
+
if not os.path.exists(fp):
|
91
|
+
self.report_warning(tr("❗ not found: {p}", p=display_path(fp)))
|
92
|
+
continue
|
93
|
+
try:
|
94
|
+
img = PILImage.open(fp)
|
95
|
+
|
96
|
+
# Create ASCII art preview
|
97
|
+
ascii_art = image_to_ascii(img, 20, 10)
|
98
|
+
|
99
|
+
if ascii_art:
|
100
|
+
from rich.text import Text
|
101
|
+
title_text = Text(f"{display_path(fp)}\n{img.width}×{img.height}", style="bold")
|
102
|
+
ascii_text = Text(ascii_art, style="dim")
|
103
|
+
combined_text = Text.assemble(title_text, "\n", ascii_text)
|
104
|
+
panel = Panel(combined_text, title="Image", border_style="dim")
|
105
|
+
else:
|
106
|
+
# Fallback to just info if ASCII art fails
|
107
|
+
title = f"{display_path(fp)} ({img.width}x{img.height})"
|
108
|
+
panel = Panel.fit(title, title=display_path(fp), border_style="dim")
|
109
|
+
|
110
|
+
images.append(panel)
|
111
|
+
shown += 1
|
112
|
+
except Exception as e:
|
113
|
+
self.report_warning(tr("⚠️ Skipped {p}: {e}", p=display_path(fp), e=e))
|
114
|
+
|
115
|
+
if not images:
|
116
|
+
return tr("No images could be displayed")
|
117
|
+
|
118
|
+
# Use manual column layout since Columns doesn't support columns parameter
|
119
|
+
if columns > 1:
|
120
|
+
# Group images into rows
|
121
|
+
rows = []
|
122
|
+
for i in range(0, len(images), columns):
|
123
|
+
row_images = images[i:i+columns]
|
124
|
+
rows.append(Columns(row_images, equal=True, expand=True))
|
125
|
+
|
126
|
+
# Print all rows
|
127
|
+
for row in rows:
|
128
|
+
console.print(row)
|
129
|
+
else:
|
130
|
+
# Single column - print each image panel separately
|
131
|
+
for image_panel in images:
|
132
|
+
console.print(image_panel)
|
133
|
+
self.report_success(tr("✅ Displayed {n} images", n=shown))
|
134
|
+
return tr("Displayed {shown}/{total} images in a {cols}x? grid", shown=shown, total=len(paths), cols=columns)
|
@@ -21,7 +21,7 @@ class ViewFileTool(ToolBase):
|
|
21
21
|
Returns:
|
22
22
|
str: File content with a header indicating the file name and line range. Example:
|
23
23
|
- "---\nFile: /path/to/file.py | Lines: 1-10 (of 100)\n---\n<lines...>"
|
24
|
-
- "---\nFile: /path/to/file.py | All lines (total: 100
|
24
|
+
- "---\nFile: /path/to/file.py | All lines (total: 100 ⚪)\n---\n<all lines...>"
|
25
25
|
- "Error reading file: <error message>"
|
26
26
|
- "❗ not found"
|
27
27
|
"""
|
@@ -123,7 +123,7 @@ class ViewFileTool(ToolBase):
|
|
123
123
|
else:
|
124
124
|
self.report_success(
|
125
125
|
tr(
|
126
|
-
" ✅ {selected_len} {line_word}
|
126
|
+
" ✅ {selected_len} {line_word} ⚪",
|
127
127
|
selected_len=selected_len,
|
128
128
|
line_word=pluralize("line", selected_len),
|
129
129
|
)
|
@@ -158,7 +158,7 @@ class ViewFileTool(ToolBase):
|
|
158
158
|
)
|
159
159
|
else:
|
160
160
|
return tr(
|
161
|
-
"---\n{disp_path} All lines (total: {total_lines}
|
161
|
+
"---\n{disp_path} All lines (total: {total_lines} ⚪)\n---\n",
|
162
162
|
disp_path=disp_path,
|
163
163
|
total_lines=total_lines,
|
164
164
|
)
|
@@ -1,105 +1,136 @@
|
|
1
|
-
from janito.llm.model import LLMModelInfo
|
2
|
-
|
3
|
-
MODEL_SPECS = {
|
4
|
-
"qwen-turbo": LLMModelInfo(
|
5
|
-
name="qwen-turbo",
|
6
|
-
context=1008192,
|
7
|
-
max_response=8192,
|
8
|
-
category="Alibaba Qwen Turbo Model (OpenAI-compatible)",
|
9
|
-
driver="OpenAIModelDriver",
|
10
|
-
thinking_supported=True,
|
11
|
-
thinking=False,
|
12
|
-
max_cot=8192,
|
13
|
-
),
|
14
|
-
"qwen-plus": LLMModelInfo(
|
15
|
-
name="qwen-plus",
|
16
|
-
context=131072,
|
17
|
-
max_response=8192,
|
18
|
-
category="Alibaba Qwen Plus Model (OpenAI-compatible)",
|
19
|
-
driver="OpenAIModelDriver",
|
20
|
-
thinking_supported=True,
|
21
|
-
thinking=False,
|
22
|
-
max_cot=8192,
|
23
|
-
),
|
24
|
-
"qwen-
|
25
|
-
name="qwen-
|
26
|
-
context=
|
27
|
-
max_response=8192,
|
28
|
-
category="Alibaba Qwen
|
29
|
-
driver="OpenAIModelDriver",
|
30
|
-
thinking_supported=True,
|
31
|
-
thinking=False,
|
32
|
-
max_cot=8192,
|
33
|
-
),
|
34
|
-
"
|
35
|
-
name="
|
36
|
-
context=
|
37
|
-
max_response=
|
38
|
-
category="Alibaba
|
39
|
-
driver="OpenAIModelDriver",
|
40
|
-
thinking_supported=True,
|
41
|
-
thinking=False,
|
42
|
-
max_cot=
|
43
|
-
),
|
44
|
-
"qwen3-coder-
|
45
|
-
name="qwen3-coder-
|
46
|
-
context=
|
47
|
-
max_response=65536,
|
48
|
-
category="Alibaba Qwen3 Coder
|
49
|
-
driver="OpenAIModelDriver",
|
50
|
-
thinking_supported=True,
|
51
|
-
thinking=False,
|
52
|
-
max_cot=65536,
|
53
|
-
),
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
thinking=
|
62
|
-
|
63
|
-
|
64
|
-
)
|
65
|
-
"qwen3-235b-a22b-
|
66
|
-
name="qwen3-235b-a22b-
|
67
|
-
context=
|
68
|
-
max_response=32768,
|
69
|
-
category="Alibaba Qwen3 235B A22B
|
70
|
-
driver="OpenAIModelDriver",
|
71
|
-
|
72
|
-
|
73
|
-
max_cot=32768,
|
74
|
-
),
|
75
|
-
"qwen3-
|
76
|
-
name="qwen3-
|
77
|
-
context=
|
78
|
-
max_response=32768,
|
79
|
-
category="Alibaba Qwen3
|
80
|
-
driver="OpenAIModelDriver",
|
81
|
-
|
82
|
-
|
83
|
-
max_cot=32768,
|
84
|
-
),
|
85
|
-
"qwen3-30b-a3b-
|
86
|
-
name="qwen3-30b-a3b-
|
87
|
-
context=
|
88
|
-
max_response=32768,
|
89
|
-
category="Alibaba Qwen3 30B A3B
|
90
|
-
driver="OpenAIModelDriver",
|
91
|
-
|
92
|
-
|
93
|
-
max_cot=32768,
|
94
|
-
),
|
95
|
-
"qwen3-
|
96
|
-
name="qwen3-
|
97
|
-
context=
|
98
|
-
max_response=32768,
|
99
|
-
category="Alibaba Qwen3
|
100
|
-
driver="OpenAIModelDriver",
|
101
|
-
thinking_supported=True,
|
102
|
-
thinking=False,
|
103
|
-
max_cot=32768,
|
104
|
-
),
|
105
|
-
|
1
|
+
from janito.llm.model import LLMModelInfo
|
2
|
+
|
3
|
+
MODEL_SPECS = {
|
4
|
+
"qwen-turbo": LLMModelInfo(
|
5
|
+
name="qwen-turbo",
|
6
|
+
context=1008192,
|
7
|
+
max_response=8192,
|
8
|
+
category="Alibaba Qwen Turbo Model (OpenAI-compatible)",
|
9
|
+
driver="OpenAIModelDriver",
|
10
|
+
thinking_supported=True,
|
11
|
+
thinking=False,
|
12
|
+
max_cot=8192,
|
13
|
+
),
|
14
|
+
"qwen-plus": LLMModelInfo(
|
15
|
+
name="qwen-plus",
|
16
|
+
context=131072,
|
17
|
+
max_response=8192,
|
18
|
+
category="Alibaba Qwen Plus Model (OpenAI-compatible)",
|
19
|
+
driver="OpenAIModelDriver",
|
20
|
+
thinking_supported=True,
|
21
|
+
thinking=False,
|
22
|
+
max_cot=8192,
|
23
|
+
),
|
24
|
+
"qwen-flash": LLMModelInfo(
|
25
|
+
name="qwen-flash",
|
26
|
+
context=1000000,
|
27
|
+
max_response=8192,
|
28
|
+
category="Alibaba Qwen Flash Model (OpenAI-compatible)",
|
29
|
+
driver="OpenAIModelDriver",
|
30
|
+
thinking_supported=True,
|
31
|
+
thinking=False,
|
32
|
+
max_cot=8192,
|
33
|
+
),
|
34
|
+
"qwen-max": LLMModelInfo(
|
35
|
+
name="qwen-max",
|
36
|
+
context=32768,
|
37
|
+
max_response=8192,
|
38
|
+
category="Alibaba Qwen Max Model (OpenAI-compatible)",
|
39
|
+
driver="OpenAIModelDriver",
|
40
|
+
thinking_supported=True,
|
41
|
+
thinking=False,
|
42
|
+
max_cot=8192,
|
43
|
+
),
|
44
|
+
"qwen3-coder-plus": LLMModelInfo(
|
45
|
+
name="qwen3-coder-plus",
|
46
|
+
context=1048576,
|
47
|
+
max_response=65536,
|
48
|
+
category="Alibaba Qwen3 Coder Plus Model (OpenAI-compatible)",
|
49
|
+
driver="OpenAIModelDriver",
|
50
|
+
thinking_supported=True,
|
51
|
+
thinking=False,
|
52
|
+
max_cot=65536,
|
53
|
+
),
|
54
|
+
"qwen3-coder-480b-a35b-instruct": LLMModelInfo(
|
55
|
+
name="qwen3-coder-480b-a35b-instruct",
|
56
|
+
context=262144,
|
57
|
+
max_response=65536,
|
58
|
+
category="Alibaba Qwen3 Coder 480B A35B Instruct Model (OpenAI-compatible)",
|
59
|
+
driver="OpenAIModelDriver",
|
60
|
+
thinking_supported=True,
|
61
|
+
thinking=False,
|
62
|
+
max_cot=65536,
|
63
|
+
),
|
64
|
+
# Qwen3 1M context models (July 2025 update)
|
65
|
+
"qwen3-235b-a22b-thinking-2507": LLMModelInfo(
|
66
|
+
name="qwen3-235b-a22b-thinking-2507",
|
67
|
+
context=131072, # Supports up to 1M with special config
|
68
|
+
max_response=32768,
|
69
|
+
category="Alibaba Qwen3 235B A22B Thinking Model (OpenAI-compatible)",
|
70
|
+
driver="OpenAIModelDriver",
|
71
|
+
thinking=True,
|
72
|
+
thinking_supported=True,
|
73
|
+
max_cot=32768,
|
74
|
+
),
|
75
|
+
"qwen3-235b-a22b-instruct-2507": LLMModelInfo(
|
76
|
+
name="qwen3-235b-a22b-instruct-2507",
|
77
|
+
context=129024, # Supports up to 1M with special config
|
78
|
+
max_response=32768,
|
79
|
+
category="Alibaba Qwen3 235B A22B Instruct Model (OpenAI-compatible)",
|
80
|
+
driver="OpenAIModelDriver",
|
81
|
+
thinking_supported=True,
|
82
|
+
thinking=False,
|
83
|
+
max_cot=32768,
|
84
|
+
),
|
85
|
+
"qwen3-30b-a3b-thinking-2507": LLMModelInfo(
|
86
|
+
name="qwen3-30b-a3b-thinking-2507",
|
87
|
+
context=126976, # Supports up to 1M with special config
|
88
|
+
max_response=32768,
|
89
|
+
category="Alibaba Qwen3 30B A3B Thinking Model (OpenAI-compatible)",
|
90
|
+
driver="OpenAIModelDriver",
|
91
|
+
thinking=True,
|
92
|
+
thinking_supported=True,
|
93
|
+
max_cot=32768,
|
94
|
+
),
|
95
|
+
"qwen3-30b-a3b-instruct-2507": LLMModelInfo(
|
96
|
+
name="qwen3-30b-a3b-instruct-2507",
|
97
|
+
context=129024, # Supports up to 1M with special config
|
98
|
+
max_response=32768,
|
99
|
+
category="Alibaba Qwen3 30B A3B Instruct Model (OpenAI-compatible)",
|
100
|
+
driver="OpenAIModelDriver",
|
101
|
+
thinking_supported=True,
|
102
|
+
thinking=False,
|
103
|
+
max_cot=32768,
|
104
|
+
),
|
105
|
+
# Qwen3 Next models (September 2025 update)
|
106
|
+
"qwen3-next-80b-a3b-instruct": LLMModelInfo(
|
107
|
+
name="qwen3-next-80b-a3b-instruct",
|
108
|
+
context=262144, # 256K context window (Qwen3-Max Preview)
|
109
|
+
max_response=65536, # Matches Qwen3-Max Preview output limit
|
110
|
+
category="Alibaba Qwen3-Max Preview (256K) - 80B A3B Instruct Model (OpenAI-compatible)",
|
111
|
+
driver="OpenAIModelDriver",
|
112
|
+
thinking_supported=True,
|
113
|
+
thinking=False,
|
114
|
+
max_cot=65536,
|
115
|
+
),
|
116
|
+
"qwen3-next-80b-a3b-thinking": LLMModelInfo(
|
117
|
+
name="qwen3-next-80b-a3b-thinking",
|
118
|
+
context=262144, # 256K context window (Qwen3-Max Preview)
|
119
|
+
max_response=65536, # Matches Qwen3-Max Preview output limit
|
120
|
+
category="Alibaba Qwen3-Max Preview (256K) - 80B A3B Thinking Model (OpenAI-compatible)",
|
121
|
+
driver="OpenAIModelDriver",
|
122
|
+
thinking=True,
|
123
|
+
thinking_supported=True,
|
124
|
+
max_cot=65536,
|
125
|
+
),
|
126
|
+
"qwen3-max-preview": LLMModelInfo(
|
127
|
+
name="qwen3-max-preview",
|
128
|
+
context=262144, # 256K context window (Qwen3-Max Preview)
|
129
|
+
max_response=65536, # Matches Qwen3-Max Preview output limit
|
130
|
+
category="Alibaba Qwen3-Max Preview (256K) - Standard Model (OpenAI-compatible)",
|
131
|
+
driver="OpenAIModelDriver",
|
132
|
+
thinking_supported=True,
|
133
|
+
thinking=False,
|
134
|
+
max_cot=65536,
|
135
|
+
),
|
136
|
+
}
|