kollabor 0.4.9__py3-none-any.whl → 0.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. agents/__init__.py +2 -0
  2. agents/coder/__init__.py +0 -0
  3. agents/coder/agent.json +4 -0
  4. agents/coder/api-integration.md +2150 -0
  5. agents/coder/cli-pretty.md +765 -0
  6. agents/coder/code-review.md +1092 -0
  7. agents/coder/database-design.md +1525 -0
  8. agents/coder/debugging.md +1102 -0
  9. agents/coder/dependency-management.md +1397 -0
  10. agents/coder/git-workflow.md +1099 -0
  11. agents/coder/refactoring.md +1454 -0
  12. agents/coder/security-hardening.md +1732 -0
  13. agents/coder/system_prompt.md +1448 -0
  14. agents/coder/tdd.md +1367 -0
  15. agents/creative-writer/__init__.py +0 -0
  16. agents/creative-writer/agent.json +4 -0
  17. agents/creative-writer/character-development.md +1852 -0
  18. agents/creative-writer/dialogue-craft.md +1122 -0
  19. agents/creative-writer/plot-structure.md +1073 -0
  20. agents/creative-writer/revision-editing.md +1484 -0
  21. agents/creative-writer/system_prompt.md +690 -0
  22. agents/creative-writer/worldbuilding.md +2049 -0
  23. agents/data-analyst/__init__.py +30 -0
  24. agents/data-analyst/agent.json +4 -0
  25. agents/data-analyst/data-visualization.md +992 -0
  26. agents/data-analyst/exploratory-data-analysis.md +1110 -0
  27. agents/data-analyst/pandas-data-manipulation.md +1081 -0
  28. agents/data-analyst/sql-query-optimization.md +881 -0
  29. agents/data-analyst/statistical-analysis.md +1118 -0
  30. agents/data-analyst/system_prompt.md +928 -0
  31. agents/default/__init__.py +0 -0
  32. agents/default/agent.json +4 -0
  33. agents/default/dead-code.md +794 -0
  34. agents/default/explore-agent-system.md +585 -0
  35. agents/default/system_prompt.md +1448 -0
  36. agents/kollabor/__init__.py +0 -0
  37. agents/kollabor/analyze-plugin-lifecycle.md +175 -0
  38. agents/kollabor/analyze-terminal-rendering.md +388 -0
  39. agents/kollabor/code-review.md +1092 -0
  40. agents/kollabor/debug-mcp-integration.md +521 -0
  41. agents/kollabor/debug-plugin-hooks.md +547 -0
  42. agents/kollabor/debugging.md +1102 -0
  43. agents/kollabor/dependency-management.md +1397 -0
  44. agents/kollabor/git-workflow.md +1099 -0
  45. agents/kollabor/inspect-llm-conversation.md +148 -0
  46. agents/kollabor/monitor-event-bus.md +558 -0
  47. agents/kollabor/profile-performance.md +576 -0
  48. agents/kollabor/refactoring.md +1454 -0
  49. agents/kollabor/system_prompt copy.md +1448 -0
  50. agents/kollabor/system_prompt.md +757 -0
  51. agents/kollabor/trace-command-execution.md +178 -0
  52. agents/kollabor/validate-config.md +879 -0
  53. agents/research/__init__.py +0 -0
  54. agents/research/agent.json +4 -0
  55. agents/research/architecture-mapping.md +1099 -0
  56. agents/research/codebase-analysis.md +1077 -0
  57. agents/research/dependency-audit.md +1027 -0
  58. agents/research/performance-profiling.md +1047 -0
  59. agents/research/security-review.md +1359 -0
  60. agents/research/system_prompt.md +492 -0
  61. agents/technical-writer/__init__.py +0 -0
  62. agents/technical-writer/agent.json +4 -0
  63. agents/technical-writer/api-documentation.md +2328 -0
  64. agents/technical-writer/changelog-management.md +1181 -0
  65. agents/technical-writer/readme-writing.md +1360 -0
  66. agents/technical-writer/style-guide.md +1410 -0
  67. agents/technical-writer/system_prompt.md +653 -0
  68. agents/technical-writer/tutorial-creation.md +1448 -0
  69. core/__init__.py +0 -2
  70. core/application.py +343 -88
  71. core/cli.py +229 -10
  72. core/commands/menu_renderer.py +463 -59
  73. core/commands/registry.py +14 -9
  74. core/commands/system_commands.py +2461 -14
  75. core/config/loader.py +151 -37
  76. core/config/service.py +18 -6
  77. core/events/bus.py +29 -9
  78. core/events/executor.py +205 -75
  79. core/events/models.py +27 -8
  80. core/fullscreen/command_integration.py +20 -24
  81. core/fullscreen/components/__init__.py +10 -1
  82. core/fullscreen/components/matrix_components.py +1 -2
  83. core/fullscreen/components/space_shooter_components.py +654 -0
  84. core/fullscreen/plugin.py +5 -0
  85. core/fullscreen/renderer.py +52 -13
  86. core/fullscreen/session.py +52 -15
  87. core/io/__init__.py +29 -5
  88. core/io/buffer_manager.py +6 -1
  89. core/io/config_status_view.py +7 -29
  90. core/io/core_status_views.py +267 -347
  91. core/io/input/__init__.py +25 -0
  92. core/io/input/command_mode_handler.py +711 -0
  93. core/io/input/display_controller.py +128 -0
  94. core/io/input/hook_registrar.py +286 -0
  95. core/io/input/input_loop_manager.py +421 -0
  96. core/io/input/key_press_handler.py +502 -0
  97. core/io/input/modal_controller.py +1011 -0
  98. core/io/input/paste_processor.py +339 -0
  99. core/io/input/status_modal_renderer.py +184 -0
  100. core/io/input_errors.py +5 -1
  101. core/io/input_handler.py +211 -2452
  102. core/io/key_parser.py +7 -0
  103. core/io/layout.py +15 -3
  104. core/io/message_coordinator.py +111 -2
  105. core/io/message_renderer.py +129 -4
  106. core/io/status_renderer.py +147 -607
  107. core/io/terminal_renderer.py +97 -51
  108. core/io/terminal_state.py +21 -4
  109. core/io/visual_effects.py +816 -165
  110. core/llm/agent_manager.py +1063 -0
  111. core/llm/api_adapters/__init__.py +44 -0
  112. core/llm/api_adapters/anthropic_adapter.py +432 -0
  113. core/llm/api_adapters/base.py +241 -0
  114. core/llm/api_adapters/openai_adapter.py +326 -0
  115. core/llm/api_communication_service.py +167 -113
  116. core/llm/conversation_logger.py +322 -16
  117. core/llm/conversation_manager.py +556 -30
  118. core/llm/file_operations_executor.py +84 -32
  119. core/llm/llm_service.py +934 -103
  120. core/llm/mcp_integration.py +541 -57
  121. core/llm/message_display_service.py +135 -18
  122. core/llm/plugin_sdk.py +1 -2
  123. core/llm/profile_manager.py +1183 -0
  124. core/llm/response_parser.py +274 -56
  125. core/llm/response_processor.py +16 -3
  126. core/llm/tool_executor.py +6 -1
  127. core/logging/__init__.py +2 -0
  128. core/logging/setup.py +34 -6
  129. core/models/resume.py +54 -0
  130. core/plugins/__init__.py +4 -2
  131. core/plugins/base.py +127 -0
  132. core/plugins/collector.py +23 -161
  133. core/plugins/discovery.py +37 -3
  134. core/plugins/factory.py +6 -12
  135. core/plugins/registry.py +5 -17
  136. core/ui/config_widgets.py +128 -28
  137. core/ui/live_modal_renderer.py +2 -1
  138. core/ui/modal_actions.py +5 -0
  139. core/ui/modal_overlay_renderer.py +0 -60
  140. core/ui/modal_renderer.py +268 -7
  141. core/ui/modal_state_manager.py +29 -4
  142. core/ui/widgets/base_widget.py +7 -0
  143. core/updates/__init__.py +10 -0
  144. core/updates/version_check_service.py +348 -0
  145. core/updates/version_comparator.py +103 -0
  146. core/utils/config_utils.py +685 -526
  147. core/utils/plugin_utils.py +1 -1
  148. core/utils/session_naming.py +111 -0
  149. fonts/LICENSE +21 -0
  150. fonts/README.md +46 -0
  151. fonts/SymbolsNerdFont-Regular.ttf +0 -0
  152. fonts/SymbolsNerdFontMono-Regular.ttf +0 -0
  153. fonts/__init__.py +44 -0
  154. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/METADATA +54 -4
  155. kollabor-0.4.15.dist-info/RECORD +228 -0
  156. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/top_level.txt +2 -0
  157. plugins/agent_orchestrator/__init__.py +39 -0
  158. plugins/agent_orchestrator/activity_monitor.py +181 -0
  159. plugins/agent_orchestrator/file_attacher.py +77 -0
  160. plugins/agent_orchestrator/message_injector.py +135 -0
  161. plugins/agent_orchestrator/models.py +48 -0
  162. plugins/agent_orchestrator/orchestrator.py +403 -0
  163. plugins/agent_orchestrator/plugin.py +976 -0
  164. plugins/agent_orchestrator/xml_parser.py +191 -0
  165. plugins/agent_orchestrator_plugin.py +9 -0
  166. plugins/enhanced_input/box_styles.py +1 -0
  167. plugins/enhanced_input/color_engine.py +19 -4
  168. plugins/enhanced_input/config.py +2 -2
  169. plugins/enhanced_input_plugin.py +61 -11
  170. plugins/fullscreen/__init__.py +6 -2
  171. plugins/fullscreen/example_plugin.py +1035 -222
  172. plugins/fullscreen/setup_wizard_plugin.py +592 -0
  173. plugins/fullscreen/space_shooter_plugin.py +131 -0
  174. plugins/hook_monitoring_plugin.py +436 -78
  175. plugins/query_enhancer_plugin.py +66 -30
  176. plugins/resume_conversation_plugin.py +1494 -0
  177. plugins/save_conversation_plugin.py +98 -32
  178. plugins/system_commands_plugin.py +70 -56
  179. plugins/tmux_plugin.py +154 -78
  180. plugins/workflow_enforcement_plugin.py +94 -92
  181. system_prompt/default.md +952 -886
  182. core/io/input_mode_manager.py +0 -402
  183. core/io/modal_interaction_handler.py +0 -315
  184. core/io/raw_input_processor.py +0 -946
  185. core/storage/__init__.py +0 -5
  186. core/storage/state_manager.py +0 -84
  187. core/ui/widget_integration.py +0 -222
  188. core/utils/key_reader.py +0 -171
  189. kollabor-0.4.9.dist-info/RECORD +0 -128
  190. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/WHEEL +0 -0
  191. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/entry_points.txt +0 -0
  192. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,410 +1,330 @@
1
- """Core status views for the Kollabor CLI application."""
1
+ """Core status views for the Kollabor CLI application.
2
+
3
+ All views use the agnoster powerline style with lime/cyan color scheme.
4
+ """
2
5
 
3
6
  import logging
4
- import psutil
7
+ from pathlib import Path
5
8
  from typing import List
6
9
 
7
10
  from .status_renderer import StatusViewConfig, BlockConfig
11
+ from .visual_effects import AgnosterSegment, ColorPalette
8
12
 
9
13
  logger = logging.getLogger(__name__)
10
14
 
11
15
 
12
16
  class CoreStatusViews:
13
- """Provides default core status views for the application."""
14
-
15
- def __init__(self, llm_service=None, config=None):
16
- """Initialize core status views.
17
+ """Provides agnoster-styled core status views."""
17
18
 
18
- Args:
19
- llm_service: LLM service instance for status data.
20
- config: Configuration manager for toggleable sections.
21
- """
19
+ def __init__(self, llm_service=None, config=None, profile_manager=None, agent_manager=None):
20
+ """Initialize core status views."""
22
21
  self.llm_service = llm_service
23
22
  self.config = config
23
+ self.profile_manager = profile_manager
24
+ self.agent_manager = agent_manager
24
25
 
25
26
  def register_all_views(self, status_registry) -> None:
26
- """Register all core status views with the registry.
27
-
28
- Args:
29
- status_registry: StatusViewRegistry to register views with.
30
- """
27
+ """Register all core status views."""
31
28
  try:
32
- # View 0: Overview - Consolidated view with everything (priority 1100 - highest)
33
- overview_view = StatusViewConfig(
29
+ # View 1: Overview (priority 1100 - highest)
30
+ status_registry.register_status_view("core", StatusViewConfig(
34
31
  name="Overview",
35
32
  plugin_source="core",
36
33
  priority=1100,
37
- blocks=[
38
- BlockConfig(
39
- width_fraction=0.33,
40
- content_provider=self._get_overview_ai_session_content,
41
- title="AI & Session",
42
- priority=100,
43
- ),
44
- BlockConfig(
45
- width_fraction=0.33,
46
- content_provider=self._get_overview_model_content,
47
- title="Model",
48
- priority=90,
49
- ),
50
- BlockConfig(
51
- width_fraction=0.34,
52
- content_provider=self._get_overview_performance_content,
53
- title="Performance",
54
- priority=80,
55
- ),
56
- ],
57
- )
58
- status_registry.register_status_view("core", overview_view)
59
-
60
- # View 1: Session Stats (priority 1000)
61
- session_view = StatusViewConfig(
62
- name="Session Stats",
34
+ blocks=[BlockConfig(
35
+ width_fraction=1.0,
36
+ content_provider=self._overview_content,
37
+ title="Overview",
38
+ priority=100,
39
+ )],
40
+ ))
41
+
42
+ # View 2: Session Stats (priority 1000)
43
+ status_registry.register_status_view("core", StatusViewConfig(
44
+ name="Session",
63
45
  plugin_source="core",
64
46
  priority=1000,
65
- blocks=[
66
- BlockConfig(
67
- width_fraction=0.5,
68
- content_provider=self._get_session_stats_content,
69
- title="Session Stats",
70
- priority=100,
71
- ),
72
- BlockConfig(
73
- width_fraction=0.5,
74
- content_provider=self._get_ai_status_content,
75
- title="AI Status",
76
- priority=90,
77
- ),
78
- ],
79
- )
80
- status_registry.register_status_view("core", session_view)
81
-
82
- # View 2: Performance (priority 800)
83
- performance_view = StatusViewConfig(
84
- name="Performance",
47
+ blocks=[BlockConfig(
48
+ width_fraction=1.0,
49
+ content_provider=self._session_content,
50
+ title="Session",
51
+ priority=100,
52
+ )],
53
+ ))
54
+
55
+ # View 3: LLM Details (priority 900)
56
+ status_registry.register_status_view("core", StatusViewConfig(
57
+ name="LLM Details",
85
58
  plugin_source="core",
86
- priority=800,
87
- blocks=[
88
- BlockConfig(
89
- width_fraction=1.0,
90
- content_provider=self._get_performance_content,
91
- title="Performance",
92
- priority=100,
93
- )
94
- ],
95
- )
96
- status_registry.register_status_view("core", performance_view)
97
-
98
- # View 3: Minimal (priority 600)
99
- minimal_view = StatusViewConfig(
59
+ priority=900,
60
+ blocks=[BlockConfig(
61
+ width_fraction=1.0,
62
+ content_provider=self._llm_details_content,
63
+ title="LLM",
64
+ priority=100,
65
+ )],
66
+ ))
67
+
68
+ # View 4: Minimal (priority 600)
69
+ status_registry.register_status_view("core", StatusViewConfig(
100
70
  name="Minimal",
101
71
  plugin_source="core",
102
72
  priority=600,
103
- blocks=[
104
- BlockConfig(
105
- width_fraction=1.0,
106
- content_provider=self._get_minimal_content,
107
- title="Minimal",
108
- priority=100,
109
- )
110
- ],
111
- )
112
- status_registry.register_status_view("core", minimal_view)
113
-
114
- # View 4: LLM Details (priority 700)
115
- llm_view = StatusViewConfig(
116
- name="LLM Details",
117
- plugin_source="core",
118
- priority=700,
119
- blocks=[
120
- BlockConfig(
121
- width_fraction=1.0,
122
- content_provider=self._get_llm_details_content,
123
- title="LLM Configuration",
124
- priority=100,
125
- )
126
- ],
127
- )
128
- status_registry.register_status_view("core", llm_view)
129
-
130
- logger.info(
131
- "Registered 5 core status views: "
132
- "Overview, Session Stats, Performance, LLM Details, Minimal"
133
- )
73
+ blocks=[BlockConfig(
74
+ width_fraction=1.0,
75
+ content_provider=self._minimal_content,
76
+ title="Minimal",
77
+ priority=100,
78
+ )],
79
+ ))
80
+
81
+ logger.info("Registered 4 core status views")
134
82
 
135
83
  except Exception as e:
136
84
  logger.error(f"Failed to register core status views: {e}")
137
85
 
138
- def _get_overview_ai_session_content(self) -> List[str]:
139
- """Get AI and Session stats for Overview (left column)."""
86
+ def _get_dir_display(self) -> str:
87
+ """Get formatted directory display."""
140
88
  try:
141
- import os
142
- from pathlib import Path
143
-
144
- lines = []
145
-
146
- # Get config toggles
147
- show_ai = self._get_config("terminal.status.overview.show_ai", True)
148
- show_session = self._get_config("terminal.status.overview.show_session", True)
149
- show_directory = self._get_config("terminal.status.overview.show_directory", True)
150
-
151
- # Current Directory
152
- if show_directory:
153
- cwd = Path.cwd()
154
- # Show just the directory name, or full path if configured
155
- show_full_path = self._get_config("terminal.status.overview.show_full_path", False)
156
- if show_full_path:
157
- dir_display = str(cwd)
158
- else:
159
- # Show home as ~ and just the folder name
160
- try:
161
- home = Path.home()
162
- if cwd == home:
163
- dir_display = "~"
164
- elif cwd.is_relative_to(home):
165
- rel_path = cwd.relative_to(home)
166
- # Show last 2 parts of path for context
167
- parts = rel_path.parts
168
- if len(parts) > 2:
169
- dir_display = f"~/{'/'.join(parts[-2:])}"
170
- else:
171
- dir_display = f"~/{rel_path}"
172
- else:
173
- # Show last 2 parts of absolute path
174
- parts = cwd.parts
175
- if len(parts) > 2:
176
- dir_display = f"{'/'.join(parts[-2:])}"
177
- else:
178
- dir_display = str(cwd)
179
- except Exception:
180
- dir_display = cwd.name or str(cwd)
89
+ cwd = Path.cwd()
90
+ home = Path.home()
91
+ if cwd == home:
92
+ return "~"
93
+ elif cwd.is_relative_to(home):
94
+ rel_path = cwd.relative_to(home)
95
+ parts = rel_path.parts
96
+ if len(parts) > 2:
97
+ return f"~/{'/'.join(parts[-2:])}"
98
+ return f"~/{rel_path}"
99
+ return cwd.name or str(cwd)
100
+ except Exception:
101
+ return "?"
102
+
103
+ def _get_profile_name(self) -> str:
104
+ """Get active profile name."""
105
+ if self.profile_manager:
106
+ profile = self.profile_manager.get_active_profile()
107
+ if profile:
108
+ return profile.name
109
+ return "default"
110
+
111
+ def _get_agent_info(self) -> tuple:
112
+ """Get active agent name, all skills, and active skills."""
113
+ agent_name = None
114
+ all_skills = []
115
+ active_skills = set()
116
+ if self.agent_manager:
117
+ agent = self.agent_manager.get_active_agent()
118
+ if agent:
119
+ agent_name = agent.name
120
+ all_skills = [s.name for s in agent.list_skills()]
121
+ active_skills = set(agent.active_skills)
122
+ return agent_name, all_skills, active_skills
123
+
124
+ def _format_agent_skills_line(self, agent_name: str, all_skills: list, active_skills: set, max_width: int = 80) -> str:
125
+ """Format agent/skills line with active skills bright, others dimmed.
126
+
127
+ Format: agent: skill1* skill2 skill3 +N more
128
+ Active skills are bright with *, inactive are dimmed.
129
+ """
130
+ if not agent_name:
131
+ return ""
181
132
 
182
- lines.append(f"Dir: {dir_display}")
133
+ # Start with electric arrow symbol and agent name
134
+ line = f"{ColorPalette.BRIGHT_YELLOW}⌁{ColorPalette.RESET}{ColorPalette.LIME}{agent_name}{ColorPalette.RESET}⋮"
135
+ current_len = len(agent_name) + 3 # " name: "
183
136
 
184
- # AI Status
185
- if show_ai and self.llm_service:
186
- processing = "* Processing" if self.llm_service.is_processing else "✓ Ready"
187
- queue_size = 0
188
- if hasattr(self.llm_service, "processing_queue"):
189
- queue_size = self.llm_service.processing_queue.qsize()
137
+ # Sort skills: active first, then inactive
138
+ sorted_skills = sorted(all_skills, key=lambda s: (s not in active_skills, s))
190
139
 
191
- lines.append(f"AI: {processing}")
192
- if queue_size > 0:
193
- lines.append(f"Queue: {queue_size}")
140
+ skills_shown = 0
141
+ max_skills_to_show = 3 # Show at most 3 skills before truncating
194
142
 
195
- # Session Stats
196
- if show_session and self.llm_service and hasattr(self.llm_service, "session_stats"):
197
- stats = self.llm_service.session_stats
198
- msgs = stats.get('messages', 0)
199
- tokens_in = stats.get('input_tokens', 0)
200
- tokens_out = stats.get('output_tokens', 0)
201
- total_tokens = tokens_in + tokens_out
143
+ for skill in sorted_skills:
144
+ is_active = skill in active_skills
202
145
 
203
- # Format tokens
204
- if total_tokens < 1000:
205
- token_display = f"{total_tokens}"
206
- elif total_tokens < 1000000:
207
- token_display = f"{total_tokens/1000:.1f}K"
208
- else:
209
- token_display = f"{total_tokens/1000000:.1f}M"
146
+ # Check if we need to truncate
147
+ if skills_shown >= max_skills_to_show and len(all_skills) > max_skills_to_show:
148
+ remaining = len(all_skills) - skills_shown
149
+ line += f"⋮{ColorPalette.DIM}+{remaining} more{ColorPalette.RESET}"
150
+ break
210
151
 
211
- lines.append(f"Messages: {msgs}")
212
- lines.append(f"Tokens: {token_display}")
213
- lines.append(f"In/Out: {tokens_in}/{tokens_out}")
214
-
215
- return lines if lines else ["Hidden"]
152
+ # Format skill
153
+ if is_active:
154
+ skill_text = f"{ColorPalette.BRIGHT_CYAN} {ColorPalette.BRIGHT_WHITE}{skill}{ColorPalette.RESET}"
155
+ else:
156
+ skill_text = f"{ColorPalette.DIM}⋮{skill}{ColorPalette.RESET}"
216
157
 
217
- except Exception as e:
218
- logger.error(f"Error getting AI/Session content: {e}")
219
- return ["Error"]
158
+ line += skill_text
159
+ skills_shown += 1
220
160
 
221
- def _get_overview_model_content(self) -> List[str]:
222
- """Get Model info for Overview (middle column)."""
223
- try:
224
- lines = []
161
+ return line.rstrip()
225
162
 
226
- show_model = self._get_config("terminal.status.overview.show_model", True)
163
+ def _get_model_info(self) -> tuple:
164
+ """Get model name and endpoint from active profile."""
165
+ model = "unknown"
166
+ endpoint = ""
227
167
 
228
- if show_model and self.llm_service and hasattr(self.llm_service, "api_service"):
229
- api_service = self.llm_service.api_service
230
- model = getattr(api_service, "model", "Unknown")
231
- temp = getattr(api_service, "temperature", None)
232
- max_tok = getattr(api_service, "max_tokens", None)
233
- api_url = getattr(api_service, "api_url", "Unknown")
234
-
235
- # Extract endpoint from URL
236
- endpoint = "Unknown"
237
- if api_url != "Unknown":
168
+ # Prefer profile_manager as source of truth (supports env vars and reload)
169
+ if self.profile_manager:
170
+ profile = self.profile_manager.get_active_profile()
171
+ if profile:
172
+ model = profile.get_model() or "unknown"
173
+ api_url = profile.get_endpoint() or ""
174
+ if api_url:
238
175
  try:
239
176
  from urllib.parse import urlparse
240
- parsed = urlparse(api_url)
241
- endpoint = parsed.hostname or api_url
177
+ endpoint = urlparse(api_url).hostname or ""
242
178
  except Exception:
243
- endpoint = api_url
179
+ pass
180
+ return model, endpoint
181
+
182
+ # Fallback to api_service if no profile_manager
183
+ if self.llm_service and hasattr(self.llm_service, "api_service"):
184
+ api_service = self.llm_service.api_service
185
+ model = getattr(api_service, "model", "unknown")
186
+ api_url = getattr(api_service, "api_url", "")
187
+ if api_url:
188
+ try:
189
+ from urllib.parse import urlparse
190
+ endpoint = urlparse(api_url).hostname or ""
191
+ except Exception:
192
+ pass
193
+ return model, endpoint
194
+
195
+ def _get_status(self) -> tuple:
196
+ """Get status text and variant."""
197
+ if self.llm_service and self.llm_service.is_processing:
198
+ return "* Working", "light"
199
+ return "* Ready", "normal"
200
+
201
+ def _get_stats(self) -> tuple:
202
+ """Get message count and token display."""
203
+ msgs = 0
204
+ tokens = 0
205
+ if self.llm_service and hasattr(self.llm_service, "session_stats"):
206
+ stats = self.llm_service.session_stats
207
+ msgs = stats.get("messages", 0)
208
+ tokens = stats.get("input_tokens", 0) + stats.get("output_tokens", 0)
209
+
210
+ if tokens < 1000:
211
+ token_display = f"{tokens}"
212
+ elif tokens < 1000000:
213
+ token_display = f"{tokens/1000:.1f}K"
214
+ else:
215
+ token_display = f"{tokens/1000000:.1f}M"
216
+
217
+ return msgs, token_display
218
+
219
+ def _overview_content(self) -> List[str]:
220
+ """Agnoster overview: dir | profile | model@endpoint | status | stats.
221
+
222
+ If an agent is active, adds a second line showing agent and skills.
223
+ """
224
+ try:
225
+ seg = AgnosterSegment()
244
226
 
245
- lines.append(f"Model: {model}")
246
- if endpoint != "Unknown":
247
- lines.append(f"Endpoint: {endpoint}")
248
- if temp is not None:
249
- lines.append(f"Temp: {temp}")
250
- if max_tok is not None:
251
- lines.append(f"Max Tokens: {max_tok}")
227
+ # Directory (lime dark)
228
+ seg.add_lime(self._get_dir_display(), "dark")
252
229
 
253
- return lines if lines else ["Hidden"]
230
+ # Profile (cyan dark)
231
+ seg.add_cyan(self._get_profile_name(), "dark")
254
232
 
255
- except Exception as e:
256
- logger.error(f"Error getting model content: {e}")
257
- return ["Error"]
233
+ # Model @ Endpoint (lime)
234
+ model, endpoint = self._get_model_info()
235
+ model_text = f"{model} @ {endpoint}" if endpoint else model
236
+ seg.add_lime(model_text)
258
237
 
259
- def _get_overview_performance_content(self) -> List[str]:
260
- """Get Performance info for Overview (right column)."""
261
- try:
262
- # Performance metrics disabled per user request
263
- return []
238
+ # Status (cyan)
239
+ status_text, variant = self._get_status()
240
+ seg.add_cyan(status_text, variant)
264
241
 
265
- except Exception as e:
266
- logger.error(f"Error getting performance content: {e}")
267
- return ["Error"]
242
+ # Stats (neutral)
243
+ msgs, token_display = self._get_stats()
244
+ seg.add_neutral(f"{msgs} msg | {token_display} tok", "mid")
268
245
 
269
- def _get_config(self, key: str, default):
270
- """Get config value with fallback to default."""
271
- if self.config and hasattr(self.config, "get"):
272
- return self.config.get(key, default)
273
- return default
246
+ lines = [seg.render()]
274
247
 
275
- def _get_session_stats_content(self) -> List[str]:
276
- """Get session statistics content."""
277
- try:
278
- # Get session stats from LLM service
279
- if self.llm_service and hasattr(self.llm_service, "session_stats"):
280
- stats = self.llm_service.session_stats
281
- return [
282
- f"Messages: {stats.get('messages', 0)}",
283
- f"Tokens In: {stats.get('input_tokens', 0)}",
284
- f"Tokens Out: {stats.get('output_tokens', 0)}",
285
- ]
286
- return ["Messages: 0", "Tokens: 0"]
287
- except Exception:
288
- return ["Session: N/A"]
248
+ # Add agent/skills line if agent is active
249
+ agent_name, all_skills, active_skills = self._get_agent_info()
250
+ if agent_name:
251
+ agent_line = self._format_agent_skills_line(agent_name, all_skills, active_skills)
252
+ if agent_line:
253
+ lines.append(agent_line)
289
254
 
290
- def _get_ai_status_content(self) -> List[str]:
291
- """Get AI status content."""
292
- try:
293
- if self.llm_service:
294
- processing = (
295
- "* Processing" if self.llm_service.is_processing else "✓ Ready"
296
- )
297
- if hasattr(self.llm_service, "processing_queue"):
298
- queue_size = self.llm_service.processing_queue.qsize()
299
- else:
300
- queue_size = 0
301
-
302
- # Get model and endpoint info from API service
303
- model = "Unknown"
304
- endpoint = "Unknown"
305
- if hasattr(self.llm_service, "api_service"):
306
- api_service = self.llm_service.api_service
307
- model = getattr(api_service, "model", "Unknown")
308
- api_url = getattr(api_service, "api_url", "Unknown")
309
- # Extract domain from URL for cleaner display
310
- if api_url != "Unknown":
311
- try:
312
- from urllib.parse import urlparse
313
-
314
- parsed = urlparse(api_url)
315
- endpoint = parsed.hostname or api_url
316
- except Exception:
317
- endpoint = api_url
318
-
319
- return [
320
- f"AI: {processing}",
321
- f"Model: {model}",
322
- f"Endpoint: {endpoint}",
323
- f"Queue: {queue_size}",
324
- ]
325
- return ["AI: Unknown"]
326
- except Exception:
327
- return ["AI: N/A"]
255
+ return lines
328
256
 
329
- def _get_performance_content(self) -> List[str]:
330
- """Get performance content."""
331
- try:
332
- # Performance metrics disabled per user request
333
- return []
334
- except Exception:
335
- return []
257
+ except Exception as e:
258
+ logger.error(f"Overview error: {e}")
259
+ return [f"{ColorPalette.DIM}Status unavailable{ColorPalette.RESET}"]
336
260
 
337
- def _get_minimal_content(self) -> List[str]:
338
- """Get minimal view content."""
261
+ def _session_content(self) -> List[str]:
262
+ """Agnoster session: messages | tokens in | tokens out | total."""
339
263
  try:
340
- ai_status = "✓ Ready"
341
- model = "Unknown"
342
- if self.llm_service:
343
- if self.llm_service.is_processing:
344
- ai_status = "* Processing"
345
-
346
- # Get model info
347
- if hasattr(self.llm_service, "api_service"):
348
- model = getattr(self.llm_service.api_service, "model", "Unknown")
349
-
350
- messages = 0
351
- tokens = 0
264
+ seg = AgnosterSegment()
265
+
266
+ msgs = 0
267
+ tokens_in = 0
268
+ tokens_out = 0
352
269
  if self.llm_service and hasattr(self.llm_service, "session_stats"):
353
270
  stats = self.llm_service.session_stats
354
- messages = stats.get("messages", 0)
355
- input_tokens = stats.get("input_tokens", 0)
356
- output_tokens = stats.get("output_tokens", 0)
357
- tokens = input_tokens + output_tokens
271
+ msgs = stats.get("messages", 0)
272
+ tokens_in = stats.get("input_tokens", 0)
273
+ tokens_out = stats.get("output_tokens", 0)
358
274
 
359
- if tokens < 1000:
360
- token_display = f"{tokens}"
361
- else:
362
- token_display = f"{tokens/1000:.1f}K"
275
+ seg.add_lime(f"Messages: {msgs}", "dark")
276
+ seg.add_cyan(f"In: {tokens_in}", "dark")
277
+ seg.add_lime(f"Out: {tokens_out}")
278
+ seg.add_cyan(f"Total: {tokens_in + tokens_out}")
363
279
 
364
- return [
365
- f"AI: {ai_status} ({model}) | Messages: {messages} "
366
- f"| Tokens: {token_display}"
367
- ]
368
- except Exception:
369
- return ["Status: N/A"]
280
+ return [seg.render()]
281
+
282
+ except Exception as e:
283
+ logger.error(f"Session error: {e}")
284
+ return [f"{ColorPalette.DIM}Session unavailable{ColorPalette.RESET}"]
370
285
 
371
- def _get_llm_details_content(self) -> List[str]:
372
- """Get detailed LLM configuration content."""
286
+ def _llm_details_content(self) -> List[str]:
287
+ """Agnoster LLM details: status | model | endpoint | temp | max_tokens."""
373
288
  try:
374
- if not self.llm_service:
375
- return ["LLM: Not initialized"]
376
-
377
- ai_status = (
378
- "* Processing" if self.llm_service.is_processing else "✓ Ready"
379
- )
380
- model = "Unknown"
381
- endpoint = "Unknown"
382
- temperature = "Unknown"
383
- max_tokens = "Unknown"
384
-
385
- if hasattr(self.llm_service, "api_service"):
289
+ seg = AgnosterSegment()
290
+
291
+ status_text, _ = self._get_status()
292
+ model, endpoint = self._get_model_info()
293
+
294
+ temp = "?"
295
+ max_tokens = "?"
296
+ if self.llm_service and hasattr(self.llm_service, "api_service"):
386
297
  api_service = self.llm_service.api_service
387
- model = getattr(api_service, "model", "Unknown")
388
- temperature = getattr(api_service, "temperature", "Unknown")
389
- max_tokens = getattr(api_service, "max_tokens", "Unknown")
390
- api_url = getattr(api_service, "api_url", "Unknown")
298
+ temp = getattr(api_service, "temperature", "?")
299
+ max_tokens = getattr(api_service, "max_tokens", None) or "None"
391
300
 
392
- # Extract domain from URL for cleaner display
393
- if api_url != "Unknown":
394
- try:
395
- from urllib.parse import urlparse
301
+ seg.add_lime(status_text, "dark")
302
+ seg.add_cyan(f"Model: {model}", "dark")
303
+ seg.add_lime(f"@ {endpoint}" if endpoint else "local")
304
+ seg.add_cyan(f"Temp: {temp}")
305
+ seg.add_neutral(f"Max: {max_tokens}", "mid")
396
306
 
397
- parsed = urlparse(api_url)
398
- endpoint = parsed.hostname or api_url
399
- except Exception:
400
- endpoint = api_url
401
-
402
- return [
403
- f"Status: {ai_status}",
404
- f"Model: {model}",
405
- f"Endpoint: {endpoint}",
406
- f"Temperature: {temperature}",
407
- f"Max Tokens: {max_tokens}",
408
- ]
409
- except Exception:
410
- return ["LLM Details: N/A"]
307
+ return [seg.render()]
308
+
309
+ except Exception as e:
310
+ logger.error(f"LLM details error: {e}")
311
+ return [f"{ColorPalette.DIM}LLM unavailable{ColorPalette.RESET}"]
312
+
313
+ def _minimal_content(self) -> List[str]:
314
+ """Agnoster minimal: status | model | msgs | tokens."""
315
+ try:
316
+ seg = AgnosterSegment()
317
+
318
+ status_text, variant = self._get_status()
319
+ model, _ = self._get_model_info()
320
+ msgs, token_display = self._get_stats()
321
+
322
+ seg.add_lime(status_text, "dark")
323
+ seg.add_cyan(model, "dark")
324
+ seg.add_neutral(f"{msgs} msg | {token_display} tok", "mid")
325
+
326
+ return [seg.render()]
327
+
328
+ except Exception as e:
329
+ logger.error(f"Minimal error: {e}")
330
+ return [f"{ColorPalette.DIM}--{ColorPalette.RESET}"]