kollabor 0.4.9__py3-none-any.whl → 0.4.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +2 -0
- agents/coder/__init__.py +0 -0
- agents/coder/agent.json +4 -0
- agents/coder/api-integration.md +2150 -0
- agents/coder/cli-pretty.md +765 -0
- agents/coder/code-review.md +1092 -0
- agents/coder/database-design.md +1525 -0
- agents/coder/debugging.md +1102 -0
- agents/coder/dependency-management.md +1397 -0
- agents/coder/git-workflow.md +1099 -0
- agents/coder/refactoring.md +1454 -0
- agents/coder/security-hardening.md +1732 -0
- agents/coder/system_prompt.md +1448 -0
- agents/coder/tdd.md +1367 -0
- agents/creative-writer/__init__.py +0 -0
- agents/creative-writer/agent.json +4 -0
- agents/creative-writer/character-development.md +1852 -0
- agents/creative-writer/dialogue-craft.md +1122 -0
- agents/creative-writer/plot-structure.md +1073 -0
- agents/creative-writer/revision-editing.md +1484 -0
- agents/creative-writer/system_prompt.md +690 -0
- agents/creative-writer/worldbuilding.md +2049 -0
- agents/data-analyst/__init__.py +30 -0
- agents/data-analyst/agent.json +4 -0
- agents/data-analyst/data-visualization.md +992 -0
- agents/data-analyst/exploratory-data-analysis.md +1110 -0
- agents/data-analyst/pandas-data-manipulation.md +1081 -0
- agents/data-analyst/sql-query-optimization.md +881 -0
- agents/data-analyst/statistical-analysis.md +1118 -0
- agents/data-analyst/system_prompt.md +928 -0
- agents/default/__init__.py +0 -0
- agents/default/agent.json +4 -0
- agents/default/dead-code.md +794 -0
- agents/default/explore-agent-system.md +585 -0
- agents/default/system_prompt.md +1448 -0
- agents/kollabor/__init__.py +0 -0
- agents/kollabor/analyze-plugin-lifecycle.md +175 -0
- agents/kollabor/analyze-terminal-rendering.md +388 -0
- agents/kollabor/code-review.md +1092 -0
- agents/kollabor/debug-mcp-integration.md +521 -0
- agents/kollabor/debug-plugin-hooks.md +547 -0
- agents/kollabor/debugging.md +1102 -0
- agents/kollabor/dependency-management.md +1397 -0
- agents/kollabor/git-workflow.md +1099 -0
- agents/kollabor/inspect-llm-conversation.md +148 -0
- agents/kollabor/monitor-event-bus.md +558 -0
- agents/kollabor/profile-performance.md +576 -0
- agents/kollabor/refactoring.md +1454 -0
- agents/kollabor/system_prompt copy.md +1448 -0
- agents/kollabor/system_prompt.md +757 -0
- agents/kollabor/trace-command-execution.md +178 -0
- agents/kollabor/validate-config.md +879 -0
- agents/research/__init__.py +0 -0
- agents/research/agent.json +4 -0
- agents/research/architecture-mapping.md +1099 -0
- agents/research/codebase-analysis.md +1077 -0
- agents/research/dependency-audit.md +1027 -0
- agents/research/performance-profiling.md +1047 -0
- agents/research/security-review.md +1359 -0
- agents/research/system_prompt.md +492 -0
- agents/technical-writer/__init__.py +0 -0
- agents/technical-writer/agent.json +4 -0
- agents/technical-writer/api-documentation.md +2328 -0
- agents/technical-writer/changelog-management.md +1181 -0
- agents/technical-writer/readme-writing.md +1360 -0
- agents/technical-writer/style-guide.md +1410 -0
- agents/technical-writer/system_prompt.md +653 -0
- agents/technical-writer/tutorial-creation.md +1448 -0
- core/__init__.py +0 -2
- core/application.py +343 -88
- core/cli.py +229 -10
- core/commands/menu_renderer.py +463 -59
- core/commands/registry.py +14 -9
- core/commands/system_commands.py +2461 -14
- core/config/loader.py +151 -37
- core/config/service.py +18 -6
- core/events/bus.py +29 -9
- core/events/executor.py +205 -75
- core/events/models.py +27 -8
- core/fullscreen/command_integration.py +20 -24
- core/fullscreen/components/__init__.py +10 -1
- core/fullscreen/components/matrix_components.py +1 -2
- core/fullscreen/components/space_shooter_components.py +654 -0
- core/fullscreen/plugin.py +5 -0
- core/fullscreen/renderer.py +52 -13
- core/fullscreen/session.py +52 -15
- core/io/__init__.py +29 -5
- core/io/buffer_manager.py +6 -1
- core/io/config_status_view.py +7 -29
- core/io/core_status_views.py +267 -347
- core/io/input/__init__.py +25 -0
- core/io/input/command_mode_handler.py +711 -0
- core/io/input/display_controller.py +128 -0
- core/io/input/hook_registrar.py +286 -0
- core/io/input/input_loop_manager.py +421 -0
- core/io/input/key_press_handler.py +502 -0
- core/io/input/modal_controller.py +1011 -0
- core/io/input/paste_processor.py +339 -0
- core/io/input/status_modal_renderer.py +184 -0
- core/io/input_errors.py +5 -1
- core/io/input_handler.py +211 -2452
- core/io/key_parser.py +7 -0
- core/io/layout.py +15 -3
- core/io/message_coordinator.py +111 -2
- core/io/message_renderer.py +129 -4
- core/io/status_renderer.py +147 -607
- core/io/terminal_renderer.py +97 -51
- core/io/terminal_state.py +21 -4
- core/io/visual_effects.py +816 -165
- core/llm/agent_manager.py +1063 -0
- core/llm/api_adapters/__init__.py +44 -0
- core/llm/api_adapters/anthropic_adapter.py +432 -0
- core/llm/api_adapters/base.py +241 -0
- core/llm/api_adapters/openai_adapter.py +326 -0
- core/llm/api_communication_service.py +167 -113
- core/llm/conversation_logger.py +322 -16
- core/llm/conversation_manager.py +556 -30
- core/llm/file_operations_executor.py +84 -32
- core/llm/llm_service.py +934 -103
- core/llm/mcp_integration.py +541 -57
- core/llm/message_display_service.py +135 -18
- core/llm/plugin_sdk.py +1 -2
- core/llm/profile_manager.py +1183 -0
- core/llm/response_parser.py +274 -56
- core/llm/response_processor.py +16 -3
- core/llm/tool_executor.py +6 -1
- core/logging/__init__.py +2 -0
- core/logging/setup.py +34 -6
- core/models/resume.py +54 -0
- core/plugins/__init__.py +4 -2
- core/plugins/base.py +127 -0
- core/plugins/collector.py +23 -161
- core/plugins/discovery.py +37 -3
- core/plugins/factory.py +6 -12
- core/plugins/registry.py +5 -17
- core/ui/config_widgets.py +128 -28
- core/ui/live_modal_renderer.py +2 -1
- core/ui/modal_actions.py +5 -0
- core/ui/modal_overlay_renderer.py +0 -60
- core/ui/modal_renderer.py +268 -7
- core/ui/modal_state_manager.py +29 -4
- core/ui/widgets/base_widget.py +7 -0
- core/updates/__init__.py +10 -0
- core/updates/version_check_service.py +348 -0
- core/updates/version_comparator.py +103 -0
- core/utils/config_utils.py +685 -526
- core/utils/plugin_utils.py +1 -1
- core/utils/session_naming.py +111 -0
- fonts/LICENSE +21 -0
- fonts/README.md +46 -0
- fonts/SymbolsNerdFont-Regular.ttf +0 -0
- fonts/SymbolsNerdFontMono-Regular.ttf +0 -0
- fonts/__init__.py +44 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/METADATA +54 -4
- kollabor-0.4.15.dist-info/RECORD +228 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/top_level.txt +2 -0
- plugins/agent_orchestrator/__init__.py +39 -0
- plugins/agent_orchestrator/activity_monitor.py +181 -0
- plugins/agent_orchestrator/file_attacher.py +77 -0
- plugins/agent_orchestrator/message_injector.py +135 -0
- plugins/agent_orchestrator/models.py +48 -0
- plugins/agent_orchestrator/orchestrator.py +403 -0
- plugins/agent_orchestrator/plugin.py +976 -0
- plugins/agent_orchestrator/xml_parser.py +191 -0
- plugins/agent_orchestrator_plugin.py +9 -0
- plugins/enhanced_input/box_styles.py +1 -0
- plugins/enhanced_input/color_engine.py +19 -4
- plugins/enhanced_input/config.py +2 -2
- plugins/enhanced_input_plugin.py +61 -11
- plugins/fullscreen/__init__.py +6 -2
- plugins/fullscreen/example_plugin.py +1035 -222
- plugins/fullscreen/setup_wizard_plugin.py +592 -0
- plugins/fullscreen/space_shooter_plugin.py +131 -0
- plugins/hook_monitoring_plugin.py +436 -78
- plugins/query_enhancer_plugin.py +66 -30
- plugins/resume_conversation_plugin.py +1494 -0
- plugins/save_conversation_plugin.py +98 -32
- plugins/system_commands_plugin.py +70 -56
- plugins/tmux_plugin.py +154 -78
- plugins/workflow_enforcement_plugin.py +94 -92
- system_prompt/default.md +952 -886
- core/io/input_mode_manager.py +0 -402
- core/io/modal_interaction_handler.py +0 -315
- core/io/raw_input_processor.py +0 -946
- core/storage/__init__.py +0 -5
- core/storage/state_manager.py +0 -84
- core/ui/widget_integration.py +0 -222
- core/utils/key_reader.py +0 -171
- kollabor-0.4.9.dist-info/RECORD +0 -128
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/WHEEL +0 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/entry_points.txt +0 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/licenses/LICENSE +0 -0
core/io/core_status_views.py
CHANGED
|
@@ -1,410 +1,330 @@
|
|
|
1
|
-
"""Core status views for the Kollabor CLI application.
|
|
1
|
+
"""Core status views for the Kollabor CLI application.
|
|
2
|
+
|
|
3
|
+
All views use the agnoster powerline style with lime/cyan color scheme.
|
|
4
|
+
"""
|
|
2
5
|
|
|
3
6
|
import logging
|
|
4
|
-
import
|
|
7
|
+
from pathlib import Path
|
|
5
8
|
from typing import List
|
|
6
9
|
|
|
7
10
|
from .status_renderer import StatusViewConfig, BlockConfig
|
|
11
|
+
from .visual_effects import AgnosterSegment, ColorPalette
|
|
8
12
|
|
|
9
13
|
logger = logging.getLogger(__name__)
|
|
10
14
|
|
|
11
15
|
|
|
12
16
|
class CoreStatusViews:
|
|
13
|
-
"""Provides
|
|
14
|
-
|
|
15
|
-
def __init__(self, llm_service=None, config=None):
|
|
16
|
-
"""Initialize core status views.
|
|
17
|
+
"""Provides agnoster-styled core status views."""
|
|
17
18
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
config: Configuration manager for toggleable sections.
|
|
21
|
-
"""
|
|
19
|
+
def __init__(self, llm_service=None, config=None, profile_manager=None, agent_manager=None):
|
|
20
|
+
"""Initialize core status views."""
|
|
22
21
|
self.llm_service = llm_service
|
|
23
22
|
self.config = config
|
|
23
|
+
self.profile_manager = profile_manager
|
|
24
|
+
self.agent_manager = agent_manager
|
|
24
25
|
|
|
25
26
|
def register_all_views(self, status_registry) -> None:
|
|
26
|
-
"""Register all core status views
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
status_registry: StatusViewRegistry to register views with.
|
|
30
|
-
"""
|
|
27
|
+
"""Register all core status views."""
|
|
31
28
|
try:
|
|
32
|
-
# View
|
|
33
|
-
|
|
29
|
+
# View 1: Overview (priority 1100 - highest)
|
|
30
|
+
status_registry.register_status_view("core", StatusViewConfig(
|
|
34
31
|
name="Overview",
|
|
35
32
|
plugin_source="core",
|
|
36
33
|
priority=1100,
|
|
37
|
-
blocks=[
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
priority=90,
|
|
49
|
-
),
|
|
50
|
-
BlockConfig(
|
|
51
|
-
width_fraction=0.34,
|
|
52
|
-
content_provider=self._get_overview_performance_content,
|
|
53
|
-
title="Performance",
|
|
54
|
-
priority=80,
|
|
55
|
-
),
|
|
56
|
-
],
|
|
57
|
-
)
|
|
58
|
-
status_registry.register_status_view("core", overview_view)
|
|
59
|
-
|
|
60
|
-
# View 1: Session Stats (priority 1000)
|
|
61
|
-
session_view = StatusViewConfig(
|
|
62
|
-
name="Session Stats",
|
|
34
|
+
blocks=[BlockConfig(
|
|
35
|
+
width_fraction=1.0,
|
|
36
|
+
content_provider=self._overview_content,
|
|
37
|
+
title="Overview",
|
|
38
|
+
priority=100,
|
|
39
|
+
)],
|
|
40
|
+
))
|
|
41
|
+
|
|
42
|
+
# View 2: Session Stats (priority 1000)
|
|
43
|
+
status_registry.register_status_view("core", StatusViewConfig(
|
|
44
|
+
name="Session",
|
|
63
45
|
plugin_source="core",
|
|
64
46
|
priority=1000,
|
|
65
|
-
blocks=[
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
priority=90,
|
|
77
|
-
),
|
|
78
|
-
],
|
|
79
|
-
)
|
|
80
|
-
status_registry.register_status_view("core", session_view)
|
|
81
|
-
|
|
82
|
-
# View 2: Performance (priority 800)
|
|
83
|
-
performance_view = StatusViewConfig(
|
|
84
|
-
name="Performance",
|
|
47
|
+
blocks=[BlockConfig(
|
|
48
|
+
width_fraction=1.0,
|
|
49
|
+
content_provider=self._session_content,
|
|
50
|
+
title="Session",
|
|
51
|
+
priority=100,
|
|
52
|
+
)],
|
|
53
|
+
))
|
|
54
|
+
|
|
55
|
+
# View 3: LLM Details (priority 900)
|
|
56
|
+
status_registry.register_status_view("core", StatusViewConfig(
|
|
57
|
+
name="LLM Details",
|
|
85
58
|
plugin_source="core",
|
|
86
|
-
priority=
|
|
87
|
-
blocks=[
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
)
|
|
96
|
-
status_registry.register_status_view("core",
|
|
97
|
-
|
|
98
|
-
# View 3: Minimal (priority 600)
|
|
99
|
-
minimal_view = StatusViewConfig(
|
|
59
|
+
priority=900,
|
|
60
|
+
blocks=[BlockConfig(
|
|
61
|
+
width_fraction=1.0,
|
|
62
|
+
content_provider=self._llm_details_content,
|
|
63
|
+
title="LLM",
|
|
64
|
+
priority=100,
|
|
65
|
+
)],
|
|
66
|
+
))
|
|
67
|
+
|
|
68
|
+
# View 4: Minimal (priority 600)
|
|
69
|
+
status_registry.register_status_view("core", StatusViewConfig(
|
|
100
70
|
name="Minimal",
|
|
101
71
|
plugin_source="core",
|
|
102
72
|
priority=600,
|
|
103
|
-
blocks=[
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
)
|
|
112
|
-
status_registry.register_status_view("core", minimal_view)
|
|
113
|
-
|
|
114
|
-
# View 4: LLM Details (priority 700)
|
|
115
|
-
llm_view = StatusViewConfig(
|
|
116
|
-
name="LLM Details",
|
|
117
|
-
plugin_source="core",
|
|
118
|
-
priority=700,
|
|
119
|
-
blocks=[
|
|
120
|
-
BlockConfig(
|
|
121
|
-
width_fraction=1.0,
|
|
122
|
-
content_provider=self._get_llm_details_content,
|
|
123
|
-
title="LLM Configuration",
|
|
124
|
-
priority=100,
|
|
125
|
-
)
|
|
126
|
-
],
|
|
127
|
-
)
|
|
128
|
-
status_registry.register_status_view("core", llm_view)
|
|
129
|
-
|
|
130
|
-
logger.info(
|
|
131
|
-
"Registered 5 core status views: "
|
|
132
|
-
"Overview, Session Stats, Performance, LLM Details, Minimal"
|
|
133
|
-
)
|
|
73
|
+
blocks=[BlockConfig(
|
|
74
|
+
width_fraction=1.0,
|
|
75
|
+
content_provider=self._minimal_content,
|
|
76
|
+
title="Minimal",
|
|
77
|
+
priority=100,
|
|
78
|
+
)],
|
|
79
|
+
))
|
|
80
|
+
|
|
81
|
+
logger.info("Registered 4 core status views")
|
|
134
82
|
|
|
135
83
|
except Exception as e:
|
|
136
84
|
logger.error(f"Failed to register core status views: {e}")
|
|
137
85
|
|
|
138
|
-
def
|
|
139
|
-
"""Get
|
|
86
|
+
def _get_dir_display(self) -> str:
|
|
87
|
+
"""Get formatted directory display."""
|
|
140
88
|
try:
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
89
|
+
cwd = Path.cwd()
|
|
90
|
+
home = Path.home()
|
|
91
|
+
if cwd == home:
|
|
92
|
+
return "~"
|
|
93
|
+
elif cwd.is_relative_to(home):
|
|
94
|
+
rel_path = cwd.relative_to(home)
|
|
95
|
+
parts = rel_path.parts
|
|
96
|
+
if len(parts) > 2:
|
|
97
|
+
return f"~/{'/'.join(parts[-2:])}"
|
|
98
|
+
return f"~/{rel_path}"
|
|
99
|
+
return cwd.name or str(cwd)
|
|
100
|
+
except Exception:
|
|
101
|
+
return "?"
|
|
102
|
+
|
|
103
|
+
def _get_profile_name(self) -> str:
|
|
104
|
+
"""Get active profile name."""
|
|
105
|
+
if self.profile_manager:
|
|
106
|
+
profile = self.profile_manager.get_active_profile()
|
|
107
|
+
if profile:
|
|
108
|
+
return profile.name
|
|
109
|
+
return "default"
|
|
110
|
+
|
|
111
|
+
def _get_agent_info(self) -> tuple:
|
|
112
|
+
"""Get active agent name, all skills, and active skills."""
|
|
113
|
+
agent_name = None
|
|
114
|
+
all_skills = []
|
|
115
|
+
active_skills = set()
|
|
116
|
+
if self.agent_manager:
|
|
117
|
+
agent = self.agent_manager.get_active_agent()
|
|
118
|
+
if agent:
|
|
119
|
+
agent_name = agent.name
|
|
120
|
+
all_skills = [s.name for s in agent.list_skills()]
|
|
121
|
+
active_skills = set(agent.active_skills)
|
|
122
|
+
return agent_name, all_skills, active_skills
|
|
123
|
+
|
|
124
|
+
def _format_agent_skills_line(self, agent_name: str, all_skills: list, active_skills: set, max_width: int = 80) -> str:
|
|
125
|
+
"""Format agent/skills line with active skills bright, others dimmed.
|
|
126
|
+
|
|
127
|
+
Format: agent: skill1* skill2 skill3 +N more
|
|
128
|
+
Active skills are bright with *, inactive are dimmed.
|
|
129
|
+
"""
|
|
130
|
+
if not agent_name:
|
|
131
|
+
return ""
|
|
181
132
|
|
|
182
|
-
|
|
133
|
+
# Start with electric arrow symbol and agent name
|
|
134
|
+
line = f"{ColorPalette.BRIGHT_YELLOW}⌁{ColorPalette.RESET}{ColorPalette.LIME}{agent_name}{ColorPalette.RESET}⋮"
|
|
135
|
+
current_len = len(agent_name) + 3 # " name: "
|
|
183
136
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
processing = "* Processing" if self.llm_service.is_processing else "✓ Ready"
|
|
187
|
-
queue_size = 0
|
|
188
|
-
if hasattr(self.llm_service, "processing_queue"):
|
|
189
|
-
queue_size = self.llm_service.processing_queue.qsize()
|
|
137
|
+
# Sort skills: active first, then inactive
|
|
138
|
+
sorted_skills = sorted(all_skills, key=lambda s: (s not in active_skills, s))
|
|
190
139
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
lines.append(f"Queue: {queue_size}")
|
|
140
|
+
skills_shown = 0
|
|
141
|
+
max_skills_to_show = 3 # Show at most 3 skills before truncating
|
|
194
142
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
stats = self.llm_service.session_stats
|
|
198
|
-
msgs = stats.get('messages', 0)
|
|
199
|
-
tokens_in = stats.get('input_tokens', 0)
|
|
200
|
-
tokens_out = stats.get('output_tokens', 0)
|
|
201
|
-
total_tokens = tokens_in + tokens_out
|
|
143
|
+
for skill in sorted_skills:
|
|
144
|
+
is_active = skill in active_skills
|
|
202
145
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
else:
|
|
209
|
-
token_display = f"{total_tokens/1000000:.1f}M"
|
|
146
|
+
# Check if we need to truncate
|
|
147
|
+
if skills_shown >= max_skills_to_show and len(all_skills) > max_skills_to_show:
|
|
148
|
+
remaining = len(all_skills) - skills_shown
|
|
149
|
+
line += f"⋮{ColorPalette.DIM}+{remaining} more{ColorPalette.RESET}"
|
|
150
|
+
break
|
|
210
151
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
152
|
+
# Format skill
|
|
153
|
+
if is_active:
|
|
154
|
+
skill_text = f"{ColorPalette.BRIGHT_CYAN} ⏵{ColorPalette.BRIGHT_WHITE}{skill}{ColorPalette.RESET}"
|
|
155
|
+
else:
|
|
156
|
+
skill_text = f"{ColorPalette.DIM}⋮{skill}{ColorPalette.RESET}"
|
|
216
157
|
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
return ["Error"]
|
|
158
|
+
line += skill_text
|
|
159
|
+
skills_shown += 1
|
|
220
160
|
|
|
221
|
-
|
|
222
|
-
"""Get Model info for Overview (middle column)."""
|
|
223
|
-
try:
|
|
224
|
-
lines = []
|
|
161
|
+
return line.rstrip()
|
|
225
162
|
|
|
226
|
-
|
|
163
|
+
def _get_model_info(self) -> tuple:
|
|
164
|
+
"""Get model name and endpoint from active profile."""
|
|
165
|
+
model = "unknown"
|
|
166
|
+
endpoint = ""
|
|
227
167
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
api_url =
|
|
234
|
-
|
|
235
|
-
# Extract endpoint from URL
|
|
236
|
-
endpoint = "Unknown"
|
|
237
|
-
if api_url != "Unknown":
|
|
168
|
+
# Prefer profile_manager as source of truth (supports env vars and reload)
|
|
169
|
+
if self.profile_manager:
|
|
170
|
+
profile = self.profile_manager.get_active_profile()
|
|
171
|
+
if profile:
|
|
172
|
+
model = profile.get_model() or "unknown"
|
|
173
|
+
api_url = profile.get_endpoint() or ""
|
|
174
|
+
if api_url:
|
|
238
175
|
try:
|
|
239
176
|
from urllib.parse import urlparse
|
|
240
|
-
|
|
241
|
-
endpoint = parsed.hostname or api_url
|
|
177
|
+
endpoint = urlparse(api_url).hostname or ""
|
|
242
178
|
except Exception:
|
|
243
|
-
|
|
179
|
+
pass
|
|
180
|
+
return model, endpoint
|
|
181
|
+
|
|
182
|
+
# Fallback to api_service if no profile_manager
|
|
183
|
+
if self.llm_service and hasattr(self.llm_service, "api_service"):
|
|
184
|
+
api_service = self.llm_service.api_service
|
|
185
|
+
model = getattr(api_service, "model", "unknown")
|
|
186
|
+
api_url = getattr(api_service, "api_url", "")
|
|
187
|
+
if api_url:
|
|
188
|
+
try:
|
|
189
|
+
from urllib.parse import urlparse
|
|
190
|
+
endpoint = urlparse(api_url).hostname or ""
|
|
191
|
+
except Exception:
|
|
192
|
+
pass
|
|
193
|
+
return model, endpoint
|
|
194
|
+
|
|
195
|
+
def _get_status(self) -> tuple:
|
|
196
|
+
"""Get status text and variant."""
|
|
197
|
+
if self.llm_service and self.llm_service.is_processing:
|
|
198
|
+
return "* Working", "light"
|
|
199
|
+
return "* Ready", "normal"
|
|
200
|
+
|
|
201
|
+
def _get_stats(self) -> tuple:
|
|
202
|
+
"""Get message count and token display."""
|
|
203
|
+
msgs = 0
|
|
204
|
+
tokens = 0
|
|
205
|
+
if self.llm_service and hasattr(self.llm_service, "session_stats"):
|
|
206
|
+
stats = self.llm_service.session_stats
|
|
207
|
+
msgs = stats.get("messages", 0)
|
|
208
|
+
tokens = stats.get("input_tokens", 0) + stats.get("output_tokens", 0)
|
|
209
|
+
|
|
210
|
+
if tokens < 1000:
|
|
211
|
+
token_display = f"{tokens}"
|
|
212
|
+
elif tokens < 1000000:
|
|
213
|
+
token_display = f"{tokens/1000:.1f}K"
|
|
214
|
+
else:
|
|
215
|
+
token_display = f"{tokens/1000000:.1f}M"
|
|
216
|
+
|
|
217
|
+
return msgs, token_display
|
|
218
|
+
|
|
219
|
+
def _overview_content(self) -> List[str]:
|
|
220
|
+
"""Agnoster overview: dir | profile | model@endpoint | status | stats.
|
|
221
|
+
|
|
222
|
+
If an agent is active, adds a second line showing agent and skills.
|
|
223
|
+
"""
|
|
224
|
+
try:
|
|
225
|
+
seg = AgnosterSegment()
|
|
244
226
|
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
lines.append(f"Endpoint: {endpoint}")
|
|
248
|
-
if temp is not None:
|
|
249
|
-
lines.append(f"Temp: {temp}")
|
|
250
|
-
if max_tok is not None:
|
|
251
|
-
lines.append(f"Max Tokens: {max_tok}")
|
|
227
|
+
# Directory (lime dark)
|
|
228
|
+
seg.add_lime(self._get_dir_display(), "dark")
|
|
252
229
|
|
|
253
|
-
|
|
230
|
+
# Profile (cyan dark)
|
|
231
|
+
seg.add_cyan(self._get_profile_name(), "dark")
|
|
254
232
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
233
|
+
# Model @ Endpoint (lime)
|
|
234
|
+
model, endpoint = self._get_model_info()
|
|
235
|
+
model_text = f"{model} @ {endpoint}" if endpoint else model
|
|
236
|
+
seg.add_lime(model_text)
|
|
258
237
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
# Performance metrics disabled per user request
|
|
263
|
-
return []
|
|
238
|
+
# Status (cyan)
|
|
239
|
+
status_text, variant = self._get_status()
|
|
240
|
+
seg.add_cyan(status_text, variant)
|
|
264
241
|
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
242
|
+
# Stats (neutral)
|
|
243
|
+
msgs, token_display = self._get_stats()
|
|
244
|
+
seg.add_neutral(f"{msgs} msg | {token_display} tok", "mid")
|
|
268
245
|
|
|
269
|
-
|
|
270
|
-
"""Get config value with fallback to default."""
|
|
271
|
-
if self.config and hasattr(self.config, "get"):
|
|
272
|
-
return self.config.get(key, default)
|
|
273
|
-
return default
|
|
246
|
+
lines = [seg.render()]
|
|
274
247
|
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
return [
|
|
282
|
-
f"Messages: {stats.get('messages', 0)}",
|
|
283
|
-
f"Tokens In: {stats.get('input_tokens', 0)}",
|
|
284
|
-
f"Tokens Out: {stats.get('output_tokens', 0)}",
|
|
285
|
-
]
|
|
286
|
-
return ["Messages: 0", "Tokens: 0"]
|
|
287
|
-
except Exception:
|
|
288
|
-
return ["Session: N/A"]
|
|
248
|
+
# Add agent/skills line if agent is active
|
|
249
|
+
agent_name, all_skills, active_skills = self._get_agent_info()
|
|
250
|
+
if agent_name:
|
|
251
|
+
agent_line = self._format_agent_skills_line(agent_name, all_skills, active_skills)
|
|
252
|
+
if agent_line:
|
|
253
|
+
lines.append(agent_line)
|
|
289
254
|
|
|
290
|
-
|
|
291
|
-
"""Get AI status content."""
|
|
292
|
-
try:
|
|
293
|
-
if self.llm_service:
|
|
294
|
-
processing = (
|
|
295
|
-
"* Processing" if self.llm_service.is_processing else "✓ Ready"
|
|
296
|
-
)
|
|
297
|
-
if hasattr(self.llm_service, "processing_queue"):
|
|
298
|
-
queue_size = self.llm_service.processing_queue.qsize()
|
|
299
|
-
else:
|
|
300
|
-
queue_size = 0
|
|
301
|
-
|
|
302
|
-
# Get model and endpoint info from API service
|
|
303
|
-
model = "Unknown"
|
|
304
|
-
endpoint = "Unknown"
|
|
305
|
-
if hasattr(self.llm_service, "api_service"):
|
|
306
|
-
api_service = self.llm_service.api_service
|
|
307
|
-
model = getattr(api_service, "model", "Unknown")
|
|
308
|
-
api_url = getattr(api_service, "api_url", "Unknown")
|
|
309
|
-
# Extract domain from URL for cleaner display
|
|
310
|
-
if api_url != "Unknown":
|
|
311
|
-
try:
|
|
312
|
-
from urllib.parse import urlparse
|
|
313
|
-
|
|
314
|
-
parsed = urlparse(api_url)
|
|
315
|
-
endpoint = parsed.hostname or api_url
|
|
316
|
-
except Exception:
|
|
317
|
-
endpoint = api_url
|
|
318
|
-
|
|
319
|
-
return [
|
|
320
|
-
f"AI: {processing}",
|
|
321
|
-
f"Model: {model}",
|
|
322
|
-
f"Endpoint: {endpoint}",
|
|
323
|
-
f"Queue: {queue_size}",
|
|
324
|
-
]
|
|
325
|
-
return ["AI: Unknown"]
|
|
326
|
-
except Exception:
|
|
327
|
-
return ["AI: N/A"]
|
|
255
|
+
return lines
|
|
328
256
|
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
# Performance metrics disabled per user request
|
|
333
|
-
return []
|
|
334
|
-
except Exception:
|
|
335
|
-
return []
|
|
257
|
+
except Exception as e:
|
|
258
|
+
logger.error(f"Overview error: {e}")
|
|
259
|
+
return [f"{ColorPalette.DIM}Status unavailable{ColorPalette.RESET}"]
|
|
336
260
|
|
|
337
|
-
def
|
|
338
|
-
"""
|
|
261
|
+
def _session_content(self) -> List[str]:
|
|
262
|
+
"""Agnoster session: messages | tokens in | tokens out | total."""
|
|
339
263
|
try:
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
# Get model info
|
|
347
|
-
if hasattr(self.llm_service, "api_service"):
|
|
348
|
-
model = getattr(self.llm_service.api_service, "model", "Unknown")
|
|
349
|
-
|
|
350
|
-
messages = 0
|
|
351
|
-
tokens = 0
|
|
264
|
+
seg = AgnosterSegment()
|
|
265
|
+
|
|
266
|
+
msgs = 0
|
|
267
|
+
tokens_in = 0
|
|
268
|
+
tokens_out = 0
|
|
352
269
|
if self.llm_service and hasattr(self.llm_service, "session_stats"):
|
|
353
270
|
stats = self.llm_service.session_stats
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
tokens = input_tokens + output_tokens
|
|
271
|
+
msgs = stats.get("messages", 0)
|
|
272
|
+
tokens_in = stats.get("input_tokens", 0)
|
|
273
|
+
tokens_out = stats.get("output_tokens", 0)
|
|
358
274
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
275
|
+
seg.add_lime(f"Messages: {msgs}", "dark")
|
|
276
|
+
seg.add_cyan(f"In: {tokens_in}", "dark")
|
|
277
|
+
seg.add_lime(f"Out: {tokens_out}")
|
|
278
|
+
seg.add_cyan(f"Total: {tokens_in + tokens_out}")
|
|
363
279
|
|
|
364
|
-
return [
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
return ["Status: N/A"]
|
|
280
|
+
return [seg.render()]
|
|
281
|
+
|
|
282
|
+
except Exception as e:
|
|
283
|
+
logger.error(f"Session error: {e}")
|
|
284
|
+
return [f"{ColorPalette.DIM}Session unavailable{ColorPalette.RESET}"]
|
|
370
285
|
|
|
371
|
-
def
|
|
372
|
-
"""
|
|
286
|
+
def _llm_details_content(self) -> List[str]:
|
|
287
|
+
"""Agnoster LLM details: status | model | endpoint | temp | max_tokens."""
|
|
373
288
|
try:
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
temperature = "Unknown"
|
|
383
|
-
max_tokens = "Unknown"
|
|
384
|
-
|
|
385
|
-
if hasattr(self.llm_service, "api_service"):
|
|
289
|
+
seg = AgnosterSegment()
|
|
290
|
+
|
|
291
|
+
status_text, _ = self._get_status()
|
|
292
|
+
model, endpoint = self._get_model_info()
|
|
293
|
+
|
|
294
|
+
temp = "?"
|
|
295
|
+
max_tokens = "?"
|
|
296
|
+
if self.llm_service and hasattr(self.llm_service, "api_service"):
|
|
386
297
|
api_service = self.llm_service.api_service
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
max_tokens = getattr(api_service, "max_tokens", "Unknown")
|
|
390
|
-
api_url = getattr(api_service, "api_url", "Unknown")
|
|
298
|
+
temp = getattr(api_service, "temperature", "?")
|
|
299
|
+
max_tokens = getattr(api_service, "max_tokens", None) or "None"
|
|
391
300
|
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
301
|
+
seg.add_lime(status_text, "dark")
|
|
302
|
+
seg.add_cyan(f"Model: {model}", "dark")
|
|
303
|
+
seg.add_lime(f"@ {endpoint}" if endpoint else "local")
|
|
304
|
+
seg.add_cyan(f"Temp: {temp}")
|
|
305
|
+
seg.add_neutral(f"Max: {max_tokens}", "mid")
|
|
396
306
|
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
307
|
+
return [seg.render()]
|
|
308
|
+
|
|
309
|
+
except Exception as e:
|
|
310
|
+
logger.error(f"LLM details error: {e}")
|
|
311
|
+
return [f"{ColorPalette.DIM}LLM unavailable{ColorPalette.RESET}"]
|
|
312
|
+
|
|
313
|
+
def _minimal_content(self) -> List[str]:
|
|
314
|
+
"""Agnoster minimal: status | model | msgs | tokens."""
|
|
315
|
+
try:
|
|
316
|
+
seg = AgnosterSegment()
|
|
317
|
+
|
|
318
|
+
status_text, variant = self._get_status()
|
|
319
|
+
model, _ = self._get_model_info()
|
|
320
|
+
msgs, token_display = self._get_stats()
|
|
321
|
+
|
|
322
|
+
seg.add_lime(status_text, "dark")
|
|
323
|
+
seg.add_cyan(model, "dark")
|
|
324
|
+
seg.add_neutral(f"{msgs} msg | {token_display} tok", "mid")
|
|
325
|
+
|
|
326
|
+
return [seg.render()]
|
|
327
|
+
|
|
328
|
+
except Exception as e:
|
|
329
|
+
logger.error(f"Minimal error: {e}")
|
|
330
|
+
return [f"{ColorPalette.DIM}--{ColorPalette.RESET}"]
|