superqode 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. superqode/__init__.py +33 -0
  2. superqode/acp/__init__.py +23 -0
  3. superqode/acp/client.py +913 -0
  4. superqode/acp/permission_screen.py +457 -0
  5. superqode/acp/types.py +480 -0
  6. superqode/acp_discovery.py +856 -0
  7. superqode/agent/__init__.py +22 -0
  8. superqode/agent/edit_strategies.py +334 -0
  9. superqode/agent/loop.py +892 -0
  10. superqode/agent/qe_report_templates.py +39 -0
  11. superqode/agent/system_prompts.py +353 -0
  12. superqode/agent_output.py +721 -0
  13. superqode/agent_stream.py +953 -0
  14. superqode/agents/__init__.py +59 -0
  15. superqode/agents/acp_registry.py +305 -0
  16. superqode/agents/client.py +249 -0
  17. superqode/agents/data/augmentcode.com.toml +51 -0
  18. superqode/agents/data/cagent.dev.toml +51 -0
  19. superqode/agents/data/claude.com.toml +60 -0
  20. superqode/agents/data/codeassistant.dev.toml +51 -0
  21. superqode/agents/data/codex.openai.com.toml +57 -0
  22. superqode/agents/data/fastagent.ai.toml +66 -0
  23. superqode/agents/data/geminicli.com.toml +77 -0
  24. superqode/agents/data/goose.block.xyz.toml +54 -0
  25. superqode/agents/data/junie.jetbrains.com.toml +56 -0
  26. superqode/agents/data/kimi.moonshot.cn.toml +57 -0
  27. superqode/agents/data/llmlingagent.dev.toml +51 -0
  28. superqode/agents/data/molt.bot.toml +49 -0
  29. superqode/agents/data/opencode.ai.toml +60 -0
  30. superqode/agents/data/stakpak.dev.toml +51 -0
  31. superqode/agents/data/vtcode.dev.toml +51 -0
  32. superqode/agents/discovery.py +266 -0
  33. superqode/agents/messaging.py +160 -0
  34. superqode/agents/persona.py +166 -0
  35. superqode/agents/registry.py +421 -0
  36. superqode/agents/schema.py +72 -0
  37. superqode/agents/unified.py +367 -0
  38. superqode/app/__init__.py +111 -0
  39. superqode/app/constants.py +314 -0
  40. superqode/app/css.py +366 -0
  41. superqode/app/models.py +118 -0
  42. superqode/app/suggester.py +125 -0
  43. superqode/app/widgets.py +1591 -0
  44. superqode/app_enhanced.py +399 -0
  45. superqode/app_main.py +17187 -0
  46. superqode/approval.py +312 -0
  47. superqode/atomic.py +296 -0
  48. superqode/commands/__init__.py +1 -0
  49. superqode/commands/acp.py +965 -0
  50. superqode/commands/agents.py +180 -0
  51. superqode/commands/auth.py +278 -0
  52. superqode/commands/config.py +374 -0
  53. superqode/commands/init.py +826 -0
  54. superqode/commands/providers.py +819 -0
  55. superqode/commands/qe.py +1145 -0
  56. superqode/commands/roles.py +380 -0
  57. superqode/commands/serve.py +172 -0
  58. superqode/commands/suggestions.py +127 -0
  59. superqode/commands/superqe.py +460 -0
  60. superqode/config/__init__.py +51 -0
  61. superqode/config/loader.py +812 -0
  62. superqode/config/schema.py +498 -0
  63. superqode/core/__init__.py +111 -0
  64. superqode/core/roles.py +281 -0
  65. superqode/danger.py +386 -0
  66. superqode/data/superqode-template.yaml +1522 -0
  67. superqode/design_system.py +1080 -0
  68. superqode/dialogs/__init__.py +6 -0
  69. superqode/dialogs/base.py +39 -0
  70. superqode/dialogs/model.py +130 -0
  71. superqode/dialogs/provider.py +870 -0
  72. superqode/diff_view.py +919 -0
  73. superqode/enterprise.py +21 -0
  74. superqode/evaluation/__init__.py +25 -0
  75. superqode/evaluation/adapters.py +93 -0
  76. superqode/evaluation/behaviors.py +89 -0
  77. superqode/evaluation/engine.py +209 -0
  78. superqode/evaluation/scenarios.py +96 -0
  79. superqode/execution/__init__.py +36 -0
  80. superqode/execution/linter.py +538 -0
  81. superqode/execution/modes.py +347 -0
  82. superqode/execution/resolver.py +283 -0
  83. superqode/execution/runner.py +642 -0
  84. superqode/file_explorer.py +811 -0
  85. superqode/file_viewer.py +471 -0
  86. superqode/flash.py +183 -0
  87. superqode/guidance/__init__.py +58 -0
  88. superqode/guidance/config.py +203 -0
  89. superqode/guidance/prompts.py +71 -0
  90. superqode/harness/__init__.py +54 -0
  91. superqode/harness/accelerator.py +291 -0
  92. superqode/harness/config.py +319 -0
  93. superqode/harness/validator.py +147 -0
  94. superqode/history.py +279 -0
  95. superqode/integrations/superopt_runner.py +124 -0
  96. superqode/logging/__init__.py +49 -0
  97. superqode/logging/adapters.py +219 -0
  98. superqode/logging/formatter.py +923 -0
  99. superqode/logging/integration.py +341 -0
  100. superqode/logging/sinks.py +170 -0
  101. superqode/logging/unified_log.py +417 -0
  102. superqode/lsp/__init__.py +26 -0
  103. superqode/lsp/client.py +544 -0
  104. superqode/main.py +1069 -0
  105. superqode/mcp/__init__.py +89 -0
  106. superqode/mcp/auth_storage.py +380 -0
  107. superqode/mcp/client.py +1236 -0
  108. superqode/mcp/config.py +319 -0
  109. superqode/mcp/integration.py +337 -0
  110. superqode/mcp/oauth.py +436 -0
  111. superqode/mcp/oauth_callback.py +385 -0
  112. superqode/mcp/types.py +290 -0
  113. superqode/memory/__init__.py +31 -0
  114. superqode/memory/feedback.py +342 -0
  115. superqode/memory/store.py +522 -0
  116. superqode/notifications.py +369 -0
  117. superqode/optimization/__init__.py +5 -0
  118. superqode/optimization/config.py +33 -0
  119. superqode/permissions/__init__.py +25 -0
  120. superqode/permissions/rules.py +488 -0
  121. superqode/plan.py +323 -0
  122. superqode/providers/__init__.py +33 -0
  123. superqode/providers/gateway/__init__.py +165 -0
  124. superqode/providers/gateway/base.py +228 -0
  125. superqode/providers/gateway/litellm_gateway.py +1170 -0
  126. superqode/providers/gateway/openresponses_gateway.py +436 -0
  127. superqode/providers/health.py +297 -0
  128. superqode/providers/huggingface/__init__.py +74 -0
  129. superqode/providers/huggingface/downloader.py +472 -0
  130. superqode/providers/huggingface/endpoints.py +442 -0
  131. superqode/providers/huggingface/hub.py +531 -0
  132. superqode/providers/huggingface/inference.py +394 -0
  133. superqode/providers/huggingface/transformers_runner.py +516 -0
  134. superqode/providers/local/__init__.py +100 -0
  135. superqode/providers/local/base.py +438 -0
  136. superqode/providers/local/discovery.py +418 -0
  137. superqode/providers/local/lmstudio.py +256 -0
  138. superqode/providers/local/mlx.py +457 -0
  139. superqode/providers/local/ollama.py +486 -0
  140. superqode/providers/local/sglang.py +268 -0
  141. superqode/providers/local/tgi.py +260 -0
  142. superqode/providers/local/tool_support.py +477 -0
  143. superqode/providers/local/vllm.py +258 -0
  144. superqode/providers/manager.py +1338 -0
  145. superqode/providers/models.py +1016 -0
  146. superqode/providers/models_dev.py +578 -0
  147. superqode/providers/openresponses/__init__.py +87 -0
  148. superqode/providers/openresponses/converters/__init__.py +17 -0
  149. superqode/providers/openresponses/converters/messages.py +343 -0
  150. superqode/providers/openresponses/converters/tools.py +268 -0
  151. superqode/providers/openresponses/schema/__init__.py +56 -0
  152. superqode/providers/openresponses/schema/models.py +585 -0
  153. superqode/providers/openresponses/streaming/__init__.py +5 -0
  154. superqode/providers/openresponses/streaming/parser.py +338 -0
  155. superqode/providers/openresponses/tools/__init__.py +21 -0
  156. superqode/providers/openresponses/tools/apply_patch.py +352 -0
  157. superqode/providers/openresponses/tools/code_interpreter.py +290 -0
  158. superqode/providers/openresponses/tools/file_search.py +333 -0
  159. superqode/providers/openresponses/tools/mcp_adapter.py +252 -0
  160. superqode/providers/registry.py +716 -0
  161. superqode/providers/usage.py +332 -0
  162. superqode/pure_mode.py +384 -0
  163. superqode/qr/__init__.py +23 -0
  164. superqode/qr/dashboard.py +781 -0
  165. superqode/qr/generator.py +1018 -0
  166. superqode/qr/templates.py +135 -0
  167. superqode/safety/__init__.py +41 -0
  168. superqode/safety/sandbox.py +413 -0
  169. superqode/safety/warnings.py +256 -0
  170. superqode/server/__init__.py +33 -0
  171. superqode/server/lsp_server.py +775 -0
  172. superqode/server/web.py +250 -0
  173. superqode/session/__init__.py +25 -0
  174. superqode/session/persistence.py +580 -0
  175. superqode/session/sharing.py +477 -0
  176. superqode/session.py +475 -0
  177. superqode/sidebar.py +2991 -0
  178. superqode/stream_view.py +648 -0
  179. superqode/styles/__init__.py +3 -0
  180. superqode/superqe/__init__.py +184 -0
  181. superqode/superqe/acp_runner.py +1064 -0
  182. superqode/superqe/constitution/__init__.py +62 -0
  183. superqode/superqe/constitution/evaluator.py +308 -0
  184. superqode/superqe/constitution/loader.py +432 -0
  185. superqode/superqe/constitution/schema.py +250 -0
  186. superqode/superqe/events.py +591 -0
  187. superqode/superqe/frameworks/__init__.py +65 -0
  188. superqode/superqe/frameworks/base.py +234 -0
  189. superqode/superqe/frameworks/e2e.py +263 -0
  190. superqode/superqe/frameworks/executor.py +237 -0
  191. superqode/superqe/frameworks/javascript.py +409 -0
  192. superqode/superqe/frameworks/python.py +373 -0
  193. superqode/superqe/frameworks/registry.py +92 -0
  194. superqode/superqe/mcp_tools/__init__.py +47 -0
  195. superqode/superqe/mcp_tools/core_tools.py +418 -0
  196. superqode/superqe/mcp_tools/registry.py +230 -0
  197. superqode/superqe/mcp_tools/testing_tools.py +167 -0
  198. superqode/superqe/noise.py +89 -0
  199. superqode/superqe/orchestrator.py +778 -0
  200. superqode/superqe/roles.py +609 -0
  201. superqode/superqe/session.py +713 -0
  202. superqode/superqe/skills/__init__.py +57 -0
  203. superqode/superqe/skills/base.py +106 -0
  204. superqode/superqe/skills/core_skills.py +899 -0
  205. superqode/superqe/skills/registry.py +90 -0
  206. superqode/superqe/verifier.py +101 -0
  207. superqode/superqe_cli.py +76 -0
  208. superqode/tool_call.py +358 -0
  209. superqode/tools/__init__.py +93 -0
  210. superqode/tools/agent_tools.py +496 -0
  211. superqode/tools/base.py +324 -0
  212. superqode/tools/batch_tool.py +133 -0
  213. superqode/tools/diagnostics.py +311 -0
  214. superqode/tools/edit_tools.py +653 -0
  215. superqode/tools/enhanced_base.py +515 -0
  216. superqode/tools/file_tools.py +269 -0
  217. superqode/tools/file_tracking.py +45 -0
  218. superqode/tools/lsp_tools.py +610 -0
  219. superqode/tools/network_tools.py +350 -0
  220. superqode/tools/permissions.py +400 -0
  221. superqode/tools/question_tool.py +324 -0
  222. superqode/tools/search_tools.py +598 -0
  223. superqode/tools/shell_tools.py +259 -0
  224. superqode/tools/todo_tools.py +121 -0
  225. superqode/tools/validation.py +80 -0
  226. superqode/tools/web_tools.py +639 -0
  227. superqode/tui.py +1152 -0
  228. superqode/tui_integration.py +875 -0
  229. superqode/tui_widgets/__init__.py +27 -0
  230. superqode/tui_widgets/widgets/__init__.py +18 -0
  231. superqode/tui_widgets/widgets/progress.py +185 -0
  232. superqode/tui_widgets/widgets/tool_display.py +188 -0
  233. superqode/undo_manager.py +574 -0
  234. superqode/utils/__init__.py +5 -0
  235. superqode/utils/error_handling.py +323 -0
  236. superqode/utils/fuzzy.py +257 -0
  237. superqode/widgets/__init__.py +477 -0
  238. superqode/widgets/agent_collab.py +390 -0
  239. superqode/widgets/agent_store.py +936 -0
  240. superqode/widgets/agent_switcher.py +395 -0
  241. superqode/widgets/animation_manager.py +284 -0
  242. superqode/widgets/code_context.py +356 -0
  243. superqode/widgets/command_palette.py +412 -0
  244. superqode/widgets/connection_status.py +537 -0
  245. superqode/widgets/conversation_history.py +470 -0
  246. superqode/widgets/diff_indicator.py +155 -0
  247. superqode/widgets/enhanced_status_bar.py +385 -0
  248. superqode/widgets/enhanced_toast.py +476 -0
  249. superqode/widgets/file_browser.py +809 -0
  250. superqode/widgets/file_reference.py +585 -0
  251. superqode/widgets/issue_timeline.py +340 -0
  252. superqode/widgets/leader_key.py +264 -0
  253. superqode/widgets/mode_switcher.py +445 -0
  254. superqode/widgets/model_picker.py +234 -0
  255. superqode/widgets/permission_preview.py +1205 -0
  256. superqode/widgets/prompt.py +358 -0
  257. superqode/widgets/provider_connect.py +725 -0
  258. superqode/widgets/pty_shell.py +587 -0
  259. superqode/widgets/qe_dashboard.py +321 -0
  260. superqode/widgets/resizable_sidebar.py +377 -0
  261. superqode/widgets/response_changes.py +218 -0
  262. superqode/widgets/response_display.py +528 -0
  263. superqode/widgets/rich_tool_display.py +613 -0
  264. superqode/widgets/sidebar_panels.py +1180 -0
  265. superqode/widgets/slash_complete.py +356 -0
  266. superqode/widgets/split_view.py +612 -0
  267. superqode/widgets/status_bar.py +273 -0
  268. superqode/widgets/superqode_display.py +786 -0
  269. superqode/widgets/thinking_display.py +815 -0
  270. superqode/widgets/throbber.py +87 -0
  271. superqode/widgets/toast.py +206 -0
  272. superqode/widgets/unified_output.py +1073 -0
  273. superqode/workspace/__init__.py +75 -0
  274. superqode/workspace/artifacts.py +472 -0
  275. superqode/workspace/coordinator.py +353 -0
  276. superqode/workspace/diff_tracker.py +429 -0
  277. superqode/workspace/git_guard.py +373 -0
  278. superqode/workspace/git_snapshot.py +526 -0
  279. superqode/workspace/manager.py +750 -0
  280. superqode/workspace/snapshot.py +357 -0
  281. superqode/workspace/watcher.py +535 -0
  282. superqode/workspace/worktree.py +440 -0
  283. superqode-0.1.5.dist-info/METADATA +204 -0
  284. superqode-0.1.5.dist-info/RECORD +288 -0
  285. superqode-0.1.5.dist-info/WHEEL +5 -0
  286. superqode-0.1.5.dist-info/entry_points.txt +3 -0
  287. superqode-0.1.5.dist-info/licenses/LICENSE +648 -0
  288. superqode-0.1.5.dist-info/top_level.txt +1 -0
@@ -0,0 +1,418 @@
1
+ """Local provider auto-discovery service.
2
+
3
+ This module provides automatic discovery of running local LLM servers
4
+ by scanning common ports and detecting provider types.
5
+ """
6
+
7
+ import asyncio
8
+ import json
9
+ import socket
10
+ import time
11
+ from dataclasses import dataclass, field
12
+ from typing import Any, Dict, List, Optional, Tuple
13
+ from urllib.error import URLError
14
+ from urllib.request import Request, urlopen
15
+
16
+ from superqode.providers.local.base import (
17
+ LocalProviderType,
18
+ LocalProviderClient,
19
+ LocalModel,
20
+ ProviderStatus,
21
+ )
22
+
23
+
24
+ # Default ports for each provider type
25
+ DEFAULT_PORTS = {
26
+ LocalProviderType.OLLAMA: [11434],
27
+ LocalProviderType.LMSTUDIO: [1234],
28
+ LocalProviderType.VLLM: [8000],
29
+ LocalProviderType.SGLANG: [30000],
30
+ LocalProviderType.TGI: [8080],
31
+ LocalProviderType.MLX: [8080],
32
+ LocalProviderType.LLAMACPP: [8080],
33
+ LocalProviderType.OPENAI_COMPAT: [8000, 8080, 5000],
34
+ }
35
+
36
+ # All ports to scan for discovery
37
+ ALL_PORTS = [11434, 1234, 8000, 8080, 30000, 5000, 3000]
38
+
39
+
40
+ @dataclass
41
+ class DiscoveredProvider:
42
+ """A discovered local LLM provider.
43
+
44
+ Attributes:
45
+ provider_type: Type of provider detected
46
+ host: Provider host URL
47
+ port: Port number
48
+ version: Provider version (if available)
49
+ models: List of available models
50
+ running_models: List of models currently loaded
51
+ latency_ms: Discovery latency
52
+ """
53
+
54
+ provider_type: LocalProviderType
55
+ host: str
56
+ port: int
57
+ version: str = ""
58
+ models: List[LocalModel] = field(default_factory=list)
59
+ running_models: List[LocalModel] = field(default_factory=list)
60
+ latency_ms: float = 0.0
61
+
62
+ @property
63
+ def url(self) -> str:
64
+ """Get the full provider URL."""
65
+ return f"http://localhost:{self.port}"
66
+
67
+ @property
68
+ def model_count(self) -> int:
69
+ """Get number of available models."""
70
+ return len(self.models)
71
+
72
+ @property
73
+ def running_count(self) -> int:
74
+ """Get number of running models."""
75
+ return len(self.running_models)
76
+
77
+
78
+ class LocalProviderDiscovery:
79
+ """Discovers running local LLM servers.
80
+
81
+ Scans common ports and detects provider types:
82
+ - 11434: Ollama
83
+ - 1234: LM Studio
84
+ - 8000: vLLM, OpenAI-compatible
85
+ - 30000: SGLang
86
+ - 8080: TGI, MLX, llama.cpp
87
+ """
88
+
89
+ def __init__(self, timeout: float = 2.0):
90
+ """Initialize the discovery service.
91
+
92
+ Args:
93
+ timeout: Connection timeout for port scanning.
94
+ """
95
+ self._timeout = timeout
96
+ self._discovered: Dict[str, DiscoveredProvider] = {}
97
+
98
+ async def scan_all(self) -> Dict[str, DiscoveredProvider]:
99
+ """Scan all common ports for local providers.
100
+
101
+ Returns:
102
+ Dict mapping host:port to DiscoveredProvider.
103
+ """
104
+ # Check all ports in parallel
105
+ tasks = []
106
+ for port in ALL_PORTS:
107
+ tasks.append(self._scan_port(port))
108
+
109
+ results = await asyncio.gather(*tasks, return_exceptions=True)
110
+
111
+ discovered = {}
112
+ for result in results:
113
+ if isinstance(result, DiscoveredProvider):
114
+ key = f"localhost:{result.port}"
115
+ discovered[key] = result
116
+
117
+ self._discovered = discovered
118
+ return discovered
119
+
120
+ async def scan_port(self, port: int) -> Optional[DiscoveredProvider]:
121
+ """Scan a specific port for a provider.
122
+
123
+ Args:
124
+ port: Port number to scan.
125
+
126
+ Returns:
127
+ DiscoveredProvider if found, None otherwise.
128
+ """
129
+ return await self._scan_port(port)
130
+
131
+ async def _scan_port(self, port: int) -> Optional[DiscoveredProvider]:
132
+ """Internal port scanning implementation."""
133
+ # First check if port is open
134
+ if not await self._is_port_open(port):
135
+ return None
136
+
137
+ start_time = time.time()
138
+
139
+ # Try to detect provider type
140
+ provider_type = await self._detect_provider_type(port)
141
+
142
+ if provider_type is None:
143
+ return None
144
+
145
+ latency = (time.time() - start_time) * 1000
146
+
147
+ # Get provider details
148
+ version = await self._get_version(port, provider_type)
149
+ models = await self._list_models(port, provider_type)
150
+ running = await self._list_running(port, provider_type)
151
+
152
+ return DiscoveredProvider(
153
+ provider_type=provider_type,
154
+ host=f"http://localhost:{port}",
155
+ port=port,
156
+ version=version,
157
+ models=models,
158
+ running_models=running,
159
+ latency_ms=latency,
160
+ )
161
+
162
+ async def _is_port_open(self, port: int) -> bool:
163
+ """Check if a port is open."""
164
+ loop = asyncio.get_event_loop()
165
+
166
+ def check():
167
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
168
+ sock.settimeout(self._timeout)
169
+ try:
170
+ result = sock.connect_ex(("localhost", port))
171
+ return result == 0
172
+ finally:
173
+ sock.close()
174
+
175
+ return await loop.run_in_executor(None, check)
176
+
177
+ async def _detect_provider_type(self, port: int) -> Optional[LocalProviderType]:
178
+ """Detect the provider type from port responses."""
179
+ loop = asyncio.get_event_loop()
180
+
181
+ def detect():
182
+ # Try Ollama-specific endpoint
183
+ if port == 11434:
184
+ try:
185
+ req = Request(f"http://localhost:{port}/api/tags")
186
+ with urlopen(req, timeout=self._timeout) as resp:
187
+ data = json.loads(resp.read())
188
+ if "models" in data:
189
+ return LocalProviderType.OLLAMA
190
+ except Exception:
191
+ pass
192
+
193
+ # Try LM Studio-specific detection
194
+ if port == 1234:
195
+ try:
196
+ req = Request(f"http://localhost:{port}/v1/models")
197
+ with urlopen(req, timeout=self._timeout) as resp:
198
+ data = json.loads(resp.read())
199
+ if "data" in data:
200
+ return LocalProviderType.LMSTUDIO
201
+ except Exception:
202
+ pass
203
+
204
+ # Try SGLang-specific detection (has /health endpoint)
205
+ if port == 30000:
206
+ try:
207
+ req = Request(f"http://localhost:{port}/health")
208
+ with urlopen(req, timeout=self._timeout) as resp:
209
+ return LocalProviderType.SGLANG
210
+ except Exception:
211
+ pass
212
+
213
+ # Try TGI-specific detection (has /info endpoint)
214
+ try:
215
+ req = Request(f"http://localhost:{port}/info")
216
+ with urlopen(req, timeout=self._timeout) as resp:
217
+ data = json.loads(resp.read())
218
+ if "model_id" in data:
219
+ return LocalProviderType.TGI
220
+ except Exception:
221
+ pass
222
+
223
+ # Try vLLM-specific detection
224
+ if port == 8000:
225
+ try:
226
+ req = Request(f"http://localhost:{port}/health")
227
+ with urlopen(req, timeout=self._timeout) as resp:
228
+ # vLLM health endpoint exists
229
+ pass
230
+ # Also check for models endpoint
231
+ req2 = Request(f"http://localhost:{port}/v1/models")
232
+ with urlopen(req2, timeout=self._timeout) as resp:
233
+ return LocalProviderType.VLLM
234
+ except Exception:
235
+ pass
236
+
237
+ # Generic OpenAI-compatible check
238
+ try:
239
+ req = Request(f"http://localhost:{port}/v1/models")
240
+ with urlopen(req, timeout=self._timeout) as resp:
241
+ data = json.loads(resp.read())
242
+ if "data" in data:
243
+ # Could be MLX, llama.cpp, or generic OpenAI-compatible
244
+ if port == 8080:
245
+ return LocalProviderType.MLX # Common MLX port
246
+ return LocalProviderType.OPENAI_COMPAT
247
+ except Exception:
248
+ pass
249
+
250
+ return None
251
+
252
+ return await loop.run_in_executor(None, detect)
253
+
254
+ async def _get_version(self, port: int, provider_type: LocalProviderType) -> str:
255
+ """Get provider version string."""
256
+ loop = asyncio.get_event_loop()
257
+
258
+ def get_ver():
259
+ if provider_type == LocalProviderType.OLLAMA:
260
+ try:
261
+ req = Request(f"http://localhost:{port}/api/version")
262
+ with urlopen(req, timeout=self._timeout) as resp:
263
+ data = json.loads(resp.read())
264
+ return data.get("version", "")
265
+ except Exception:
266
+ pass
267
+
268
+ if provider_type == LocalProviderType.TGI:
269
+ try:
270
+ req = Request(f"http://localhost:{port}/info")
271
+ with urlopen(req, timeout=self._timeout) as resp:
272
+ data = json.loads(resp.read())
273
+ return data.get("version", "")
274
+ except Exception:
275
+ pass
276
+
277
+ return ""
278
+
279
+ return await loop.run_in_executor(None, get_ver)
280
+
281
+ async def _list_models(self, port: int, provider_type: LocalProviderType) -> List[LocalModel]:
282
+ """List available models from provider."""
283
+ loop = asyncio.get_event_loop()
284
+
285
+ def list_mod():
286
+ models = []
287
+
288
+ if provider_type == LocalProviderType.OLLAMA:
289
+ try:
290
+ req = Request(f"http://localhost:{port}/api/tags")
291
+ with urlopen(req, timeout=self._timeout) as resp:
292
+ data = json.loads(resp.read())
293
+ for m in data.get("models", []):
294
+ models.append(
295
+ LocalModel(
296
+ id=m.get("name", ""),
297
+ name=m.get("name", "").split(":")[0].title(),
298
+ size_bytes=m.get("size", 0),
299
+ )
300
+ )
301
+ except Exception:
302
+ pass
303
+
304
+ elif provider_type in (
305
+ LocalProviderType.LMSTUDIO,
306
+ LocalProviderType.VLLM,
307
+ LocalProviderType.OPENAI_COMPAT,
308
+ LocalProviderType.MLX,
309
+ ):
310
+ try:
311
+ req = Request(f"http://localhost:{port}/v1/models")
312
+ with urlopen(req, timeout=self._timeout) as resp:
313
+ data = json.loads(resp.read())
314
+ for m in data.get("data", []):
315
+ models.append(
316
+ LocalModel(
317
+ id=m.get("id", ""),
318
+ name=m.get("id", "").split("/")[-1],
319
+ )
320
+ )
321
+ except Exception:
322
+ pass
323
+
324
+ elif provider_type == LocalProviderType.TGI:
325
+ try:
326
+ req = Request(f"http://localhost:{port}/info")
327
+ with urlopen(req, timeout=self._timeout) as resp:
328
+ data = json.loads(resp.read())
329
+ model_id = data.get("model_id", "")
330
+ if model_id:
331
+ models.append(
332
+ LocalModel(
333
+ id=model_id,
334
+ name=model_id.split("/")[-1],
335
+ context_window=data.get("max_input_length", 4096),
336
+ )
337
+ )
338
+ except Exception:
339
+ pass
340
+
341
+ return models
342
+
343
+ return await loop.run_in_executor(None, list_mod)
344
+
345
+ async def _list_running(self, port: int, provider_type: LocalProviderType) -> List[LocalModel]:
346
+ """List running models from provider."""
347
+ loop = asyncio.get_event_loop()
348
+
349
+ def list_run():
350
+ if provider_type != LocalProviderType.OLLAMA:
351
+ return [] # Only Ollama has running model tracking
352
+
353
+ try:
354
+ req = Request(f"http://localhost:{port}/api/ps")
355
+ with urlopen(req, timeout=self._timeout) as resp:
356
+ data = json.loads(resp.read())
357
+ return [
358
+ LocalModel(
359
+ id=m.get("name", ""),
360
+ name=m.get("name", "").split(":")[0].title(),
361
+ running=True,
362
+ vram_usage=m.get("size_vram", 0),
363
+ )
364
+ for m in data.get("models", [])
365
+ ]
366
+ except Exception:
367
+ return []
368
+
369
+ return await loop.run_in_executor(None, list_run)
370
+
371
+ async def discover_models(self) -> List[LocalModel]:
372
+ """Discover all available models from all running providers.
373
+
374
+ Returns:
375
+ Combined list of LocalModel from all discovered providers.
376
+ """
377
+ if not self._discovered:
378
+ await self.scan_all()
379
+
380
+ all_models = []
381
+ for provider in self._discovered.values():
382
+ for model in provider.models:
383
+ # Add provider info to model
384
+ model.details["provider_type"] = provider.provider_type.value
385
+ model.details["provider_host"] = provider.host
386
+ all_models.append(model)
387
+
388
+ return all_models
389
+
390
+ def get_discovered(self) -> Dict[str, DiscoveredProvider]:
391
+ """Get cached discovered providers."""
392
+ return self._discovered
393
+
394
+
395
+ # Singleton instance
396
+ _discovery_instance: Optional[LocalProviderDiscovery] = None
397
+
398
+
399
+ def get_discovery_service() -> LocalProviderDiscovery:
400
+ """Get the global discovery service instance.
401
+
402
+ Returns:
403
+ LocalProviderDiscovery instance.
404
+ """
405
+ global _discovery_instance
406
+ if _discovery_instance is None:
407
+ _discovery_instance = LocalProviderDiscovery()
408
+ return _discovery_instance
409
+
410
+
411
+ async def quick_scan() -> Dict[str, DiscoveredProvider]:
412
+ """Perform a quick scan for local providers.
413
+
414
+ Returns:
415
+ Dict of discovered providers.
416
+ """
417
+ service = get_discovery_service()
418
+ return await service.scan_all()
@@ -0,0 +1,256 @@
1
+ """LM Studio client for local model inference.
2
+
3
+ LM Studio is a desktop application for running LLMs locally with
4
+ a user-friendly interface and OpenAI-compatible API server.
5
+ """
6
+
7
+ import asyncio
8
+ import json
9
+ import os
10
+ import time
11
+ from datetime import datetime
12
+ from typing import Any, Dict, List, Optional
13
+ from urllib.error import URLError
14
+ from urllib.request import Request, urlopen
15
+
16
+ from superqode.providers.local.base import (
17
+ LocalProviderClient,
18
+ LocalProviderType,
19
+ LocalModel,
20
+ ProviderStatus,
21
+ ToolTestResult,
22
+ detect_model_family,
23
+ detect_quantization,
24
+ likely_supports_tools,
25
+ )
26
+
27
+
28
+ class LMStudioClient(LocalProviderClient):
29
+ """LM Studio local server client.
30
+
31
+ LM Studio provides:
32
+ - User-friendly GUI for model management
33
+ - OpenAI-compatible local server
34
+ - GGUF model support
35
+ - GPU and CPU inference
36
+
37
+ API Endpoints (OpenAI-compatible):
38
+ - GET /v1/models - List loaded models
39
+ - POST /v1/chat/completions - Chat completion
40
+ - POST /v1/completions - Text completion
41
+ - POST /v1/embeddings - Embeddings
42
+
43
+ Environment:
44
+ LMSTUDIO_HOST: Override default host (default: http://localhost:1234)
45
+ """
46
+
47
+ provider_type = LocalProviderType.LMSTUDIO
48
+ default_port = 1234
49
+
50
+ def __init__(self, host: Optional[str] = None):
51
+ """Initialize LM Studio client.
52
+
53
+ Args:
54
+ host: LM Studio host URL. Falls back to LMSTUDIO_HOST env var.
55
+ """
56
+ if host is None:
57
+ host = os.environ.get("LMSTUDIO_HOST")
58
+ super().__init__(host)
59
+
60
+ def _request(
61
+ self, method: str, endpoint: str, data: Optional[Dict] = None, timeout: float = 30.0
62
+ ) -> Any:
63
+ """Make a request to the LM Studio API."""
64
+ url = f"{self.host}{endpoint}"
65
+ headers = {"Content-Type": "application/json"}
66
+
67
+ body = None
68
+ if data is not None:
69
+ body = json.dumps(data).encode("utf-8")
70
+
71
+ request = Request(url, data=body, headers=headers, method=method)
72
+
73
+ with urlopen(request, timeout=timeout) as response:
74
+ return json.loads(response.read().decode("utf-8"))
75
+
76
+ async def _async_request(
77
+ self, method: str, endpoint: str, data: Optional[Dict] = None, timeout: float = 30.0
78
+ ) -> Any:
79
+ """Async wrapper for _request."""
80
+ loop = asyncio.get_event_loop()
81
+ return await loop.run_in_executor(
82
+ None, lambda: self._request(method, endpoint, data, timeout)
83
+ )
84
+
85
+ async def is_available(self) -> bool:
86
+ """Check if LM Studio server is running."""
87
+ try:
88
+ await self._async_request("GET", "/v1/models", timeout=5.0)
89
+ return True
90
+ except Exception:
91
+ return False
92
+
93
+ async def get_status(self) -> ProviderStatus:
94
+ """Get detailed LM Studio status."""
95
+ start_time = time.time()
96
+
97
+ try:
98
+ models_response = await self._async_request("GET", "/v1/models", timeout=5.0)
99
+ latency = (time.time() - start_time) * 1000
100
+
101
+ models = models_response.get("data", [])
102
+
103
+ return ProviderStatus(
104
+ available=True,
105
+ provider_type=self.provider_type,
106
+ host=self.host,
107
+ version="LM Studio",
108
+ models_count=len(models),
109
+ running_models=len(models),
110
+ gpu_available=True,
111
+ latency_ms=latency,
112
+ last_checked=datetime.now(),
113
+ )
114
+
115
+ except Exception as e:
116
+ return ProviderStatus(
117
+ available=False,
118
+ provider_type=self.provider_type,
119
+ host=self.host,
120
+ error=str(e),
121
+ last_checked=datetime.now(),
122
+ )
123
+
124
+ async def list_models(self) -> List[LocalModel]:
125
+ """List available models."""
126
+ try:
127
+ response = await self._async_request("GET", "/v1/models")
128
+ models = response.get("data", [])
129
+
130
+ result = []
131
+ for model_data in models:
132
+ model_id = model_data.get("id", "")
133
+
134
+ # Parse model info from ID (LM Studio often uses paths)
135
+ name = model_id.split("/")[-1]
136
+ family = detect_model_family(model_id)
137
+ quant = detect_quantization(model_id)
138
+
139
+ result.append(
140
+ LocalModel(
141
+ id=model_id,
142
+ name=name,
143
+ quantization=quant,
144
+ family=family,
145
+ supports_tools=likely_supports_tools(model_id),
146
+ running=True,
147
+ )
148
+ )
149
+
150
+ return result
151
+
152
+ except Exception:
153
+ return []
154
+
155
+ async def list_running(self) -> List[LocalModel]:
156
+ """List running models."""
157
+ return await self.list_models()
158
+
159
+ async def get_model_info(self, model_id: str) -> Optional[LocalModel]:
160
+ """Get model information."""
161
+ models = await self.list_models()
162
+ for m in models:
163
+ if m.id == model_id or model_id in m.id:
164
+ return m
165
+ return None
166
+
167
+ async def test_tool_calling(self, model_id: str) -> ToolTestResult:
168
+ """Test tool calling capability."""
169
+ start_time = time.time()
170
+
171
+ if not likely_supports_tools(model_id):
172
+ return ToolTestResult(
173
+ model_id=model_id,
174
+ supports_tools=False,
175
+ notes="Model family not known to support tools",
176
+ )
177
+
178
+ test_tools = [
179
+ {
180
+ "type": "function",
181
+ "function": {
182
+ "name": "get_weather",
183
+ "description": "Get weather for a city",
184
+ "parameters": {
185
+ "type": "object",
186
+ "properties": {"city": {"type": "string"}},
187
+ "required": ["city"],
188
+ },
189
+ },
190
+ }
191
+ ]
192
+
193
+ try:
194
+ response = await self._async_request(
195
+ "POST",
196
+ "/v1/chat/completions",
197
+ data={
198
+ "model": model_id,
199
+ "messages": [{"role": "user", "content": "What's the weather in Paris?"}],
200
+ "tools": test_tools,
201
+ },
202
+ timeout=60.0,
203
+ )
204
+
205
+ latency = (time.time() - start_time) * 1000
206
+
207
+ choices = response.get("choices", [])
208
+ if choices:
209
+ message = choices[0].get("message", {})
210
+ tool_calls = message.get("tool_calls", [])
211
+
212
+ if tool_calls:
213
+ return ToolTestResult(
214
+ model_id=model_id,
215
+ supports_tools=True,
216
+ parallel_tools=len(tool_calls) > 1,
217
+ tool_choice=["auto"],
218
+ latency_ms=latency,
219
+ notes="Tool calling verified via LM Studio",
220
+ )
221
+
222
+ return ToolTestResult(
223
+ model_id=model_id,
224
+ supports_tools=False,
225
+ latency_ms=latency,
226
+ notes="Model did not use tools in test",
227
+ )
228
+
229
+ except Exception as e:
230
+ return ToolTestResult(
231
+ model_id=model_id,
232
+ supports_tools=False,
233
+ error=str(e),
234
+ )
235
+
236
+ def get_litellm_model_name(self, model_id: str) -> str:
237
+ """Get LiteLLM-compatible model name."""
238
+ # LM Studio uses lm_studio/ prefix in LiteLLM
239
+ if model_id.startswith("lm_studio/"):
240
+ return model_id
241
+ return f"lm_studio/{model_id}"
242
+
243
+
244
+ async def get_lmstudio_client(host: Optional[str] = None) -> Optional[LMStudioClient]:
245
+ """Get an LM Studio client if available.
246
+
247
+ Args:
248
+ host: Optional host override.
249
+
250
+ Returns:
251
+ LMStudioClient if LM Studio server is running, None otherwise.
252
+ """
253
+ client = LMStudioClient(host)
254
+ if await client.is_available():
255
+ return client
256
+ return None