claw-code 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. claw_code-0.2.0.dist-info/METADATA +560 -0
  2. claw_code-0.2.0.dist-info/RECORD +110 -0
  3. claw_code-0.2.0.dist-info/WHEEL +5 -0
  4. claw_code-0.2.0.dist-info/entry_points.txt +2 -0
  5. claw_code-0.2.0.dist-info/licenses/LICENSE +68 -0
  6. claw_code-0.2.0.dist-info/top_level.txt +1 -0
  7. src/QueryEngine.py +19 -0
  8. src/Tool.py +15 -0
  9. src/__init__.py +29 -0
  10. src/assistant/__init__.py +16 -0
  11. src/bootstrap/__init__.py +16 -0
  12. src/bootstrap_graph.py +27 -0
  13. src/bridge/__init__.py +16 -0
  14. src/buddy/__init__.py +16 -0
  15. src/cli/__init__.py +16 -0
  16. src/command_graph.py +34 -0
  17. src/commands.py +90 -0
  18. src/components/__init__.py +16 -0
  19. src/config.py +58 -0
  20. src/constants/__init__.py +16 -0
  21. src/context.py +47 -0
  22. src/coordinator/__init__.py +16 -0
  23. src/costHook.py +8 -0
  24. src/cost_tracker.py +13 -0
  25. src/deferred_init.py +31 -0
  26. src/dialogLaunchers.py +15 -0
  27. src/direct_modes.py +21 -0
  28. src/entrypoints/__init__.py +16 -0
  29. src/execution_registry.py +51 -0
  30. src/history.py +22 -0
  31. src/hooks/__init__.py +16 -0
  32. src/init_wizard.py +238 -0
  33. src/ink.py +6 -0
  34. src/interactiveHelpers.py +5 -0
  35. src/keybindings/__init__.py +16 -0
  36. src/main.py +274 -0
  37. src/memdir/__init__.py +16 -0
  38. src/migrations/__init__.py +16 -0
  39. src/model_detection.py +96 -0
  40. src/models.py +49 -0
  41. src/moreright/__init__.py +16 -0
  42. src/native_ts/__init__.py +16 -0
  43. src/outputStyles/__init__.py +16 -0
  44. src/parity_audit.py +138 -0
  45. src/permissions.py +20 -0
  46. src/plugins/__init__.py +16 -0
  47. src/port_manifest.py +52 -0
  48. src/prefetch.py +23 -0
  49. src/projectOnboardingState.py +10 -0
  50. src/query.py +13 -0
  51. src/query_engine.py +289 -0
  52. src/reference_data/__init__.py +1 -0
  53. src/reference_data/archive_surface_snapshot.json +63 -0
  54. src/reference_data/commands_snapshot.json +1037 -0
  55. src/reference_data/subsystems/assistant.json +8 -0
  56. src/reference_data/subsystems/bootstrap.json +8 -0
  57. src/reference_data/subsystems/bridge.json +32 -0
  58. src/reference_data/subsystems/buddy.json +13 -0
  59. src/reference_data/subsystems/cli.json +26 -0
  60. src/reference_data/subsystems/components.json +32 -0
  61. src/reference_data/subsystems/constants.json +28 -0
  62. src/reference_data/subsystems/coordinator.json +8 -0
  63. src/reference_data/subsystems/entrypoints.json +15 -0
  64. src/reference_data/subsystems/hooks.json +32 -0
  65. src/reference_data/subsystems/keybindings.json +21 -0
  66. src/reference_data/subsystems/memdir.json +15 -0
  67. src/reference_data/subsystems/migrations.json +18 -0
  68. src/reference_data/subsystems/moreright.json +8 -0
  69. src/reference_data/subsystems/native_ts.json +11 -0
  70. src/reference_data/subsystems/outputStyles.json +8 -0
  71. src/reference_data/subsystems/plugins.json +9 -0
  72. src/reference_data/subsystems/remote.json +11 -0
  73. src/reference_data/subsystems/schemas.json +8 -0
  74. src/reference_data/subsystems/screens.json +10 -0
  75. src/reference_data/subsystems/server.json +10 -0
  76. src/reference_data/subsystems/services.json +32 -0
  77. src/reference_data/subsystems/skills.json +27 -0
  78. src/reference_data/subsystems/state.json +13 -0
  79. src/reference_data/subsystems/types.json +18 -0
  80. src/reference_data/subsystems/upstreamproxy.json +9 -0
  81. src/reference_data/subsystems/utils.json +32 -0
  82. src/reference_data/subsystems/vim.json +12 -0
  83. src/reference_data/subsystems/voice.json +8 -0
  84. src/reference_data/tools_snapshot.json +922 -0
  85. src/remote/__init__.py +16 -0
  86. src/remote_runtime.py +25 -0
  87. src/repl.py +577 -0
  88. src/replLauncher.py +5 -0
  89. src/runtime.py +205 -0
  90. src/schemas/__init__.py +16 -0
  91. src/screens/__init__.py +16 -0
  92. src/server/__init__.py +16 -0
  93. src/services/__init__.py +16 -0
  94. src/services/ollama_adapter.py +251 -0
  95. src/services/ollama_setup.py +192 -0
  96. src/session_store.py +79 -0
  97. src/setup.py +77 -0
  98. src/skills/__init__.py +16 -0
  99. src/state/__init__.py +16 -0
  100. src/system_init.py +23 -0
  101. src/task.py +5 -0
  102. src/tasks.py +11 -0
  103. src/tool_pool.py +37 -0
  104. src/tools.py +96 -0
  105. src/transcript.py +23 -0
  106. src/types/__init__.py +16 -0
  107. src/upstreamproxy/__init__.py +16 -0
  108. src/utils/__init__.py +16 -0
  109. src/vim/__init__.py +16 -0
  110. src/voice/__init__.py +16 -0
src/runtime.py ADDED
@@ -0,0 +1,205 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+
5
+ from .commands import PORTED_COMMANDS
6
+ from .context import PortContext, build_port_context, render_context
7
+ from .history import HistoryLog
8
+ from .models import PermissionDenial, PortingModule
9
+ from .query_engine import QueryEngineConfig, QueryEnginePort, TurnResult
10
+ from .setup import SetupReport, WorkspaceSetup, run_setup
11
+ from .system_init import build_system_init_message
12
+ from .tools import PORTED_TOOLS
13
+ from .execution_registry import build_execution_registry
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class RoutedMatch:
18
+ kind: str
19
+ name: str
20
+ source_hint: str
21
+ score: int
22
+
23
+
24
+ @dataclass
25
+ class RuntimeSession:
26
+ prompt: str
27
+ context: PortContext
28
+ setup: WorkspaceSetup
29
+ setup_report: SetupReport
30
+ system_init_message: str
31
+ history: HistoryLog
32
+ routed_matches: list[RoutedMatch]
33
+ turn_result: TurnResult
34
+ command_execution_messages: tuple[str, ...]
35
+ tool_execution_messages: tuple[str, ...]
36
+ stream_events: tuple[dict[str, object], ...]
37
+ persisted_session_path: str
38
+
39
+ def as_markdown(self) -> str:
40
+ lines = [
41
+ '# Runtime Session',
42
+ '',
43
+ f'Prompt: {self.prompt}',
44
+ '',
45
+ '## Context',
46
+ render_context(self.context),
47
+ '',
48
+ '## Setup',
49
+ f'- Python: {self.setup.python_version} ({self.setup.implementation})',
50
+ f'- Platform: {self.setup.platform_name}',
51
+ f'- Test command: {self.setup.test_command}',
52
+ '',
53
+ '## Startup Steps',
54
+ *(f'- {step}' for step in self.setup.startup_steps()),
55
+ '',
56
+ '## System Init',
57
+ self.system_init_message,
58
+ '',
59
+ '## Routed Matches',
60
+ ]
61
+ if self.routed_matches:
62
+ lines.extend(
63
+ f'- [{match.kind}] {match.name} ({match.score}) — {match.source_hint}'
64
+ for match in self.routed_matches
65
+ )
66
+ else:
67
+ lines.append('- none')
68
+ lines.extend([
69
+ '',
70
+ '## Command Execution',
71
+ *(self.command_execution_messages or ('none',)),
72
+ '',
73
+ '## Tool Execution',
74
+ *(self.tool_execution_messages or ('none',)),
75
+ '',
76
+ '## Stream Events',
77
+ *(f"- {event['type']}: {event}" for event in self.stream_events),
78
+ '',
79
+ '## Turn Result',
80
+ self.turn_result.output,
81
+ '',
82
+ f'Persisted session path: {self.persisted_session_path}',
83
+ '',
84
+ self.history.as_markdown(),
85
+ ])
86
+ return '\n'.join(lines)
87
+
88
+
89
+ class PortRuntime:
90
+ def route_prompt(self, prompt: str, limit: int = 5) -> list[RoutedMatch]:
91
+ tokens = {token.lower() for token in prompt.replace('/', ' ').replace('-', ' ').split() if token}
92
+ by_kind = {
93
+ 'command': self._collect_matches(tokens, PORTED_COMMANDS, 'command'),
94
+ 'tool': self._collect_matches(tokens, PORTED_TOOLS, 'tool'),
95
+ }
96
+
97
+ selected: list[RoutedMatch] = []
98
+ for kind in ('command', 'tool'):
99
+ if by_kind[kind]:
100
+ selected.append(by_kind[kind].pop(0))
101
+
102
+ leftovers = sorted(
103
+ [match for matches in by_kind.values() for match in matches],
104
+ key=lambda item: (-item.score, item.kind, item.name),
105
+ )
106
+ selected.extend(leftovers[: max(0, limit - len(selected))])
107
+ return selected[:limit]
108
+
109
+ def bootstrap_session(self, prompt: str, limit: int = 5) -> RuntimeSession:
110
+ context = build_port_context()
111
+ setup_report = run_setup(trusted=True)
112
+ setup = setup_report.setup
113
+ history = HistoryLog()
114
+ engine = QueryEnginePort.from_workspace()
115
+ history.add('context', f'python_files={context.python_file_count}, archive_available={context.archive_available}')
116
+ history.add('registry', f'commands={len(PORTED_COMMANDS)}, tools={len(PORTED_TOOLS)}')
117
+ matches = self.route_prompt(prompt, limit=limit)
118
+ registry = build_execution_registry()
119
+ command_execs = tuple(registry.command(match.name).execute(prompt) for match in matches if match.kind == 'command' and registry.command(match.name))
120
+ tool_execs = tuple(registry.tool(match.name).execute(prompt) for match in matches if match.kind == 'tool' and registry.tool(match.name))
121
+ denials = tuple(self._infer_permission_denials(matches))
122
+ stream_events = tuple(engine.stream_submit_message(
123
+ prompt,
124
+ matched_commands=tuple(match.name for match in matches if match.kind == 'command'),
125
+ matched_tools=tuple(match.name for match in matches if match.kind == 'tool'),
126
+ denied_tools=denials,
127
+ ))
128
+ turn_result = engine.submit_message(
129
+ prompt,
130
+ matched_commands=tuple(match.name for match in matches if match.kind == 'command'),
131
+ matched_tools=tuple(match.name for match in matches if match.kind == 'tool'),
132
+ denied_tools=denials,
133
+ )
134
+ persisted_session_path = engine.persist_session()
135
+ history.add('routing', f'matches={len(matches)} for prompt={prompt!r}')
136
+ history.add('execution', f'command_execs={len(command_execs)} tool_execs={len(tool_execs)}')
137
+ history.add('turn', f'commands={len(turn_result.matched_commands)} tools={len(turn_result.matched_tools)} denials={len(turn_result.permission_denials)} stop={turn_result.stop_reason}')
138
+ history.add('session_store', persisted_session_path)
139
+ return RuntimeSession(
140
+ prompt=prompt,
141
+ context=context,
142
+ setup=setup,
143
+ setup_report=setup_report,
144
+ system_init_message=build_system_init_message(trusted=True),
145
+ history=history,
146
+ routed_matches=matches,
147
+ turn_result=turn_result,
148
+ command_execution_messages=command_execs,
149
+ tool_execution_messages=tool_execs,
150
+ stream_events=stream_events,
151
+ persisted_session_path=persisted_session_path,
152
+ )
153
+
154
+ def run_turn_loop(self, prompt: str, limit: int = 5, max_turns: int = 3, structured_output: bool = False) -> list[TurnResult]:
155
+ engine = QueryEnginePort.from_workspace()
156
+ engine.config = QueryEngineConfig(max_turns=max_turns, structured_output=structured_output)
157
+ matches = self.route_prompt(prompt, limit=limit)
158
+ command_names = tuple(match.name for match in matches if match.kind == 'command')
159
+ tool_names = tuple(match.name for match in matches if match.kind == 'tool')
160
+ results: list[TurnResult] = []
161
+ for turn in range(max_turns):
162
+ turn_prompt = prompt if turn == 0 else f'{prompt} [turn {turn + 1}]'
163
+ result = engine.submit_message(turn_prompt, command_names, tool_names, ())
164
+ results.append(result)
165
+ if result.stop_reason != 'completed':
166
+ break
167
+ return results
168
+
169
+ def stream_turn_loop(self, prompt: str, limit: int = 5, max_turns: int = 3, structured_output: bool = False):
170
+ """Stream turn loop with real-time output from Ollama"""
171
+ engine = QueryEnginePort.from_workspace()
172
+ engine.config = QueryEngineConfig(max_turns=max_turns, structured_output=structured_output)
173
+ matches = self.route_prompt(prompt, limit=limit)
174
+ command_names = tuple(match.name for match in matches if match.kind == 'command')
175
+ tool_names = tuple(match.name for match in matches if match.kind == 'tool')
176
+
177
+ for turn in range(max_turns):
178
+ turn_prompt = prompt if turn == 0 else f'{prompt} [turn {turn + 1}]'
179
+ for event in engine.stream_submit_message(turn_prompt, command_names, tool_names, ()):
180
+ yield event
181
+
182
+ def _infer_permission_denials(self, matches: list[RoutedMatch]) -> list[PermissionDenial]:
183
+ denials: list[PermissionDenial] = []
184
+ for match in matches:
185
+ if match.kind == 'tool' and 'bash' in match.name.lower():
186
+ denials.append(PermissionDenial(tool_name=match.name, reason='destructive shell execution remains gated in the Python port'))
187
+ return denials
188
+
189
+ def _collect_matches(self, tokens: set[str], modules: tuple[PortingModule, ...], kind: str) -> list[RoutedMatch]:
190
+ matches: list[RoutedMatch] = []
191
+ for module in modules:
192
+ score = self._score(tokens, module)
193
+ if score > 0:
194
+ matches.append(RoutedMatch(kind=kind, name=module.name, source_hint=module.source_hint, score=score))
195
+ matches.sort(key=lambda item: (-item.score, item.name))
196
+ return matches
197
+
198
+ @staticmethod
199
+ def _score(tokens: set[str], module: PortingModule) -> int:
200
+ haystacks = [module.name.lower(), module.source_hint.lower(), module.responsibility.lower()]
201
+ score = 0
202
+ for token in tokens:
203
+ if any(token in haystack for haystack in haystacks):
204
+ score += 1
205
+ return score
@@ -0,0 +1,16 @@
1
+ """Python package placeholder for the archived `schemas` subsystem."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+
8
+ SNAPSHOT_PATH = Path(__file__).resolve().parent.parent / 'reference_data' / 'subsystems' / 'schemas.json'
9
+ _SNAPSHOT = json.loads(SNAPSHOT_PATH.read_text())
10
+
11
+ ARCHIVE_NAME = _SNAPSHOT['archive_name']
12
+ MODULE_COUNT = _SNAPSHOT['module_count']
13
+ SAMPLE_FILES = tuple(_SNAPSHOT['sample_files'])
14
+ PORTING_NOTE = f"Python placeholder package for '{ARCHIVE_NAME}' with {MODULE_COUNT} archived module references."
15
+
16
+ __all__ = ['ARCHIVE_NAME', 'MODULE_COUNT', 'PORTING_NOTE', 'SAMPLE_FILES']
@@ -0,0 +1,16 @@
1
+ """Python package placeholder for the archived `screens` subsystem."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+
8
+ SNAPSHOT_PATH = Path(__file__).resolve().parent.parent / 'reference_data' / 'subsystems' / 'screens.json'
9
+ _SNAPSHOT = json.loads(SNAPSHOT_PATH.read_text())
10
+
11
+ ARCHIVE_NAME = _SNAPSHOT['archive_name']
12
+ MODULE_COUNT = _SNAPSHOT['module_count']
13
+ SAMPLE_FILES = tuple(_SNAPSHOT['sample_files'])
14
+ PORTING_NOTE = f"Python placeholder package for '{ARCHIVE_NAME}' with {MODULE_COUNT} archived module references."
15
+
16
+ __all__ = ['ARCHIVE_NAME', 'MODULE_COUNT', 'PORTING_NOTE', 'SAMPLE_FILES']
src/server/__init__.py ADDED
@@ -0,0 +1,16 @@
1
+ """Python package placeholder for the archived `server` subsystem."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+
8
+ SNAPSHOT_PATH = Path(__file__).resolve().parent.parent / 'reference_data' / 'subsystems' / 'server.json'
9
+ _SNAPSHOT = json.loads(SNAPSHOT_PATH.read_text())
10
+
11
+ ARCHIVE_NAME = _SNAPSHOT['archive_name']
12
+ MODULE_COUNT = _SNAPSHOT['module_count']
13
+ SAMPLE_FILES = tuple(_SNAPSHOT['sample_files'])
14
+ PORTING_NOTE = f"Python placeholder package for '{ARCHIVE_NAME}' with {MODULE_COUNT} archived module references."
15
+
16
+ __all__ = ['ARCHIVE_NAME', 'MODULE_COUNT', 'PORTING_NOTE', 'SAMPLE_FILES']
@@ -0,0 +1,16 @@
1
+ """Python package placeholder for the archived `services` subsystem."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+
8
+ SNAPSHOT_PATH = Path(__file__).resolve().parent.parent / 'reference_data' / 'subsystems' / 'services.json'
9
+ _SNAPSHOT = json.loads(SNAPSHOT_PATH.read_text())
10
+
11
+ ARCHIVE_NAME = _SNAPSHOT['archive_name']
12
+ MODULE_COUNT = _SNAPSHOT['module_count']
13
+ SAMPLE_FILES = tuple(_SNAPSHOT['sample_files'])
14
+ PORTING_NOTE = f"Python placeholder package for '{ARCHIVE_NAME}' with {MODULE_COUNT} archived module references."
15
+
16
+ __all__ = ['ARCHIVE_NAME', 'MODULE_COUNT', 'PORTING_NOTE', 'SAMPLE_FILES']
@@ -0,0 +1,251 @@
1
+ """
2
+ Ollama integration adapter for local LLM execution without API costs.
3
+ Implements auto-detection of hardware and model selection.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ import logging
10
+ from dataclasses import dataclass
11
+ from typing import Generator
12
+
13
+ try:
14
+ import psutil
15
+ PSUTIL_AVAILABLE = True
16
+ except ImportError:
17
+ PSUTIL_AVAILABLE = False
18
+
19
+ import requests
20
+
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class ModelTier:
27
+ """Model selection tier based on available VRAM"""
28
+ name: str
29
+ vram_required_gb: float
30
+ vram_min_gb: float
31
+ vram_max_gb: float
32
+ tokens_per_sec_estimate: str
33
+ use_case: str
34
+
35
+
36
+ # Model registry by tier
37
+ MODEL_TIERS = [
38
+ ModelTier(
39
+ name="phi4-mini",
40
+ vram_required_gb=3.0,
41
+ vram_min_gb=0,
42
+ vram_max_gb=8,
43
+ tokens_per_sec_estimate="15-20",
44
+ use_case="Low-end machines (M1 MacBook Air, entry laptops)"
45
+ ),
46
+ ModelTier(
47
+ name="qwen2.5-coder:7b",
48
+ vram_required_gb=6.0,
49
+ vram_min_gb=8,
50
+ vram_max_gb=12,
51
+ tokens_per_sec_estimate="25-40",
52
+ use_case="Primary recommendation (autocomplete, refactors, tests)"
53
+ ),
54
+ ModelTier(
55
+ name="qwen2.5-coder:14b",
56
+ vram_required_gb=10.0,
57
+ vram_min_gb=10,
58
+ vram_max_gb=float('inf'),
59
+ tokens_per_sec_estimate="10-20",
60
+ use_case="Power tier (complex logic, debugging, algorithms)"
61
+ ),
62
+ ]
63
+
64
+
65
+ class OllamaAdapter:
66
+ """Adapter for local Ollama-based LLM execution"""
67
+
68
+ def __init__(
69
+ self,
70
+ base_url: str = "http://localhost:11434",
71
+ model: str | None = None,
72
+ auto_detect: bool = True,
73
+ timeout: int = 120,
74
+ ):
75
+ """
76
+ Initialize Ollama adapter.
77
+
78
+ Args:
79
+ base_url: Ollama server URL (default: localhost:11434)
80
+ model: Model name override. If None and auto_detect=True, will select based on VRAM
81
+ auto_detect: Whether to auto-detect model based on available VRAM
82
+ timeout: Request timeout in seconds
83
+ """
84
+ self.base_url = base_url
85
+ self.timeout = timeout
86
+ self.model = model or (self.recommend_model() if auto_detect else "qwen2.5-coder:7b")
87
+ self._verify_connection()
88
+
89
+ @staticmethod
90
+ def get_available_vram_gb() -> float:
91
+ """Get available system VRAM in gigabytes"""
92
+ if not PSUTIL_AVAILABLE:
93
+ logger.warning("psutil not available; assuming 8GB VRAM")
94
+ return 8.0
95
+ try:
96
+ return psutil.virtual_memory().total / (1024 ** 3)
97
+ except Exception as e:
98
+ logger.warning(f"Failed to detect VRAM: {e}; assuming 8GB")
99
+ return 8.0
100
+
101
+ @classmethod
102
+ def recommend_model(cls) -> str:
103
+ """Auto-detect VRAM and recommend optimal model tier"""
104
+ vram_gb = cls.get_available_vram_gb()
105
+ logger.info(f"Detected {vram_gb:.1f}GB VRAM; recommending model tier...")
106
+
107
+ for tier in MODEL_TIERS:
108
+ if tier.vram_min_gb <= vram_gb <= tier.vram_max_gb:
109
+ logger.info(f"Selected {tier.name} for {vram_gb:.1f}GB VRAM")
110
+ logger.info(f" Use case: {tier.use_case}")
111
+ logger.info(f" Est. speed: {tier.tokens_per_sec_estimate} tokens/sec")
112
+ return tier.name
113
+
114
+ # Fallback to most conservative
115
+ logger.warning(f"VRAM {vram_gb:.1f}GB outside known ranges; using phi4-mini (conservative)")
116
+ return "phi4-mini"
117
+
118
+ @staticmethod
119
+ def print_model_roadmap() -> None:
120
+ """Print model selection roadmap for user reference"""
121
+ print("\n" + "=" * 65)
122
+ print(" Ollama Model Selection Roadmap")
123
+ print("=" * 65 + "\n")
124
+
125
+ vram_gb = OllamaAdapter.get_available_vram_gb()
126
+ print(f"Your system: {vram_gb:.1f}GB VRAM\n")
127
+
128
+ for tier in MODEL_TIERS:
129
+ marker = " ⭐ RECOMMENDED" if tier.vram_min_gb <= vram_gb <= tier.vram_max_gb else ""
130
+ print(f"[{tier.vram_min_gb:.0f}-{tier.vram_max_gb:.1f}GB] {tier.name}{marker}")
131
+ print(f" VRAM: {tier.vram_required_gb}GB | Speed: {tier.tokens_per_sec_estimate} tok/s")
132
+ print(f" {tier.use_case}\n")
133
+
134
+ print("=" * 65 + "\n")
135
+
136
+ def _verify_connection(self) -> None:
137
+ """Verify Ollama server is reachable"""
138
+ try:
139
+ response = requests.get(f"{self.base_url}/api/tags", timeout=5)
140
+ response.raise_for_status()
141
+ logger.info(f"✓ Connected to Ollama at {self.base_url}")
142
+ except requests.RequestException as e:
143
+ logger.error(f"✗ Cannot connect to Ollama at {self.base_url}: {e}")
144
+ raise RuntimeError(
145
+ f"Ollama server not reachable at {self.base_url}\n"
146
+ f"Start it with: ollama serve"
147
+ )
148
+
149
+ def generate(self, prompt: str) -> str:
150
+ """
151
+ Generate response from prompt (non-streaming).
152
+
153
+ Args:
154
+ prompt: Input prompt
155
+
156
+ Returns:
157
+ Generated response text
158
+ """
159
+ try:
160
+ response = requests.post(
161
+ f"{self.base_url}/api/generate",
162
+ json={
163
+ "model": self.model,
164
+ "prompt": prompt,
165
+ "stream": False,
166
+ },
167
+ timeout=self.timeout,
168
+ )
169
+ response.raise_for_status()
170
+ return response.json()["response"]
171
+ except requests.RequestException as e:
172
+ logger.error(f"Generation failed: {e}")
173
+ raise
174
+
175
+ def stream_generate(self, prompt: str) -> Generator[str, None, None]:
176
+ """
177
+ Stream-based generation (yields tokens as they arrive).
178
+
179
+ Args:
180
+ prompt: Input prompt
181
+
182
+ Yields:
183
+ Token strings as they are generated
184
+ """
185
+ try:
186
+ response = requests.post(
187
+ f"{self.base_url}/api/generate",
188
+ json={
189
+ "model": self.model,
190
+ "prompt": prompt,
191
+ "stream": True,
192
+ },
193
+ stream=True,
194
+ timeout=self.timeout,
195
+ )
196
+ response.raise_for_status()
197
+
198
+ for line in response.iter_lines():
199
+ if line:
200
+ try:
201
+ chunk = json.loads(line)
202
+ token = chunk.get("response", "")
203
+ if token:
204
+ yield token
205
+ except json.JSONDecodeError:
206
+ logger.warning(f"Failed to parse chunk: {line}")
207
+ continue
208
+
209
+ except requests.RequestException as e:
210
+ logger.error(f"Stream generation failed: {e}")
211
+ raise
212
+
213
+ def formats_supported(self) -> dict:
214
+ """Get supported formats from Ollama server"""
215
+ try:
216
+ response = requests.get(f"{self.base_url}/api/tags", timeout=5)
217
+ response.raise_for_status()
218
+ return response.json()
219
+ except requests.RequestException as e:
220
+ logger.error(f"Failed to fetch formats: {e}")
221
+ return {}
222
+
223
+
224
+ def create_ollama_adapter(
225
+ base_url: str = "http://localhost:11434",
226
+ model: str | None = None,
227
+ auto_detect: bool = True,
228
+ ) -> OllamaAdapter:
229
+ """Factory function for creating OllamaAdapter instances"""
230
+ return OllamaAdapter(
231
+ base_url=base_url,
232
+ model=model,
233
+ auto_detect=auto_detect,
234
+ )
235
+
236
+
237
+ if __name__ == "__main__":
238
+ # CLI for testing/demo
239
+ import sys
240
+
241
+ logging.basicConfig(level=logging.INFO)
242
+
243
+ if len(sys.argv) > 1 and sys.argv[1] == "roadmap":
244
+ OllamaAdapter.print_model_roadmap()
245
+ else:
246
+ # Test connection
247
+ print("Testing Ollama adapter...")
248
+ adapter = OllamaAdapter()
249
+ print(f"Model: {adapter.model}")
250
+ print(f"Base URL: {adapter.base_url}")
251
+ print("✓ Connection verified")