more-compute 0.4.4__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- frontend/app/globals.css +734 -27
- frontend/app/layout.tsx +13 -3
- frontend/components/Notebook.tsx +2 -14
- frontend/components/cell/MonacoCell.tsx +99 -5
- frontend/components/layout/Sidebar.tsx +39 -4
- frontend/components/panels/ClaudePanel.tsx +461 -0
- frontend/components/popups/ComputePopup.tsx +738 -447
- frontend/components/popups/FilterPopup.tsx +305 -189
- frontend/components/popups/MetricsPopup.tsx +20 -1
- frontend/components/popups/ProviderConfigModal.tsx +322 -0
- frontend/components/popups/ProviderDropdown.tsx +398 -0
- frontend/components/popups/SettingsPopup.tsx +1 -1
- frontend/contexts/ClaudeContext.tsx +392 -0
- frontend/contexts/PodWebSocketContext.tsx +16 -21
- frontend/hooks/useInlineDiff.ts +269 -0
- frontend/lib/api.ts +323 -12
- frontend/lib/settings.ts +5 -0
- frontend/lib/websocket-native.ts +4 -8
- frontend/lib/websocket.ts +1 -2
- frontend/package-lock.json +733 -36
- frontend/package.json +2 -0
- frontend/public/assets/icons/providers/lambda_labs.svg +22 -0
- frontend/public/assets/icons/providers/prime_intellect.svg +18 -0
- frontend/public/assets/icons/providers/runpod.svg +9 -0
- frontend/public/assets/icons/providers/vastai.svg +1 -0
- frontend/settings.md +54 -0
- frontend/tsconfig.tsbuildinfo +1 -0
- frontend/types/claude.ts +194 -0
- kernel_run.py +13 -0
- {more_compute-0.4.4.dist-info → more_compute-0.5.0.dist-info}/METADATA +53 -11
- {more_compute-0.4.4.dist-info → more_compute-0.5.0.dist-info}/RECORD +56 -37
- {more_compute-0.4.4.dist-info → more_compute-0.5.0.dist-info}/WHEEL +1 -1
- morecompute/__init__.py +1 -1
- morecompute/__version__.py +1 -1
- morecompute/execution/executor.py +24 -67
- morecompute/execution/worker.py +6 -72
- morecompute/models/api_models.py +62 -0
- morecompute/notebook.py +11 -0
- morecompute/server.py +641 -133
- morecompute/services/claude_service.py +392 -0
- morecompute/services/pod_manager.py +168 -67
- morecompute/services/pod_monitor.py +67 -39
- morecompute/services/prime_intellect.py +0 -4
- morecompute/services/providers/__init__.py +92 -0
- morecompute/services/providers/base_provider.py +336 -0
- morecompute/services/providers/lambda_labs_provider.py +394 -0
- morecompute/services/providers/provider_factory.py +194 -0
- morecompute/services/providers/runpod_provider.py +504 -0
- morecompute/services/providers/vastai_provider.py +407 -0
- morecompute/utils/cell_magics.py +0 -3
- morecompute/utils/config_util.py +93 -3
- morecompute/utils/special_commands.py +5 -32
- morecompute/utils/version_check.py +117 -0
- frontend/styling_README.md +0 -23
- {more_compute-0.4.4.dist-info/licenses → more_compute-0.5.0.dist-info}/LICENSE +0 -0
- {more_compute-0.4.4.dist-info → more_compute-0.5.0.dist-info}/entry_points.txt +0 -0
- {more_compute-0.4.4.dist-info → more_compute-0.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
"""Claude AI service for MORECOMPUTE notebook assistant."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import AsyncGenerator, Optional
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import anthropic
|
|
9
|
+
ANTHROPIC_AVAILABLE = True
|
|
10
|
+
except ImportError:
|
|
11
|
+
ANTHROPIC_AVAILABLE = False
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Context window management constants
|
|
15
|
+
MAX_CONTEXT_CHARS = 100_000 # ~25k tokens for notebook context
|
|
16
|
+
MAX_CELL_SOURCE_CHARS = 5_000 # Per-cell source code cap
|
|
17
|
+
MAX_CELL_OUTPUT_CHARS = 500 # Per-cell output cap (outputs can be huge)
|
|
18
|
+
MAX_HISTORY_MESSAGES = 20 # Cap conversation history
|
|
19
|
+
MAX_HISTORY_CHARS = 30_000 # Total history character budget
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ProposedEdit:
|
|
24
|
+
"""Represents a proposed edit to a notebook cell."""
|
|
25
|
+
cell_index: int
|
|
26
|
+
original_code: str
|
|
27
|
+
new_code: str
|
|
28
|
+
explanation: str
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ClaudeContext:
|
|
33
|
+
"""Context information sent to Claude."""
|
|
34
|
+
cells: list[dict]
|
|
35
|
+
focused_cell: int = -1 # Index of currently focused cell (-1 = none)
|
|
36
|
+
gpu_info: Optional[dict] = None
|
|
37
|
+
metrics: Optional[dict] = None
|
|
38
|
+
packages: Optional[list[dict]] = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ClaudeService:
|
|
42
|
+
"""Service for interacting with Claude API."""
|
|
43
|
+
|
|
44
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant integrated into MORECOMPUTE, a Python notebook interface for GPU computing.
|
|
45
|
+
|
|
46
|
+
You help users with:
|
|
47
|
+
- Writing and debugging Python code
|
|
48
|
+
- Understanding GPU/CUDA operations
|
|
49
|
+
- Optimizing code for GPU execution
|
|
50
|
+
- Explaining errors and suggesting fixes
|
|
51
|
+
- Data science and machine learning tasks
|
|
52
|
+
|
|
53
|
+
IMPORTANT: When you want to suggest code changes to a cell, use this exact format:
|
|
54
|
+
|
|
55
|
+
```edit:CELL_INDEX
|
|
56
|
+
NEW CODE HERE
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Where CELL_INDEX is the 0-based index of the cell to modify. For example, to modify cell 0:
|
|
60
|
+
|
|
61
|
+
```edit:0
|
|
62
|
+
print("Hello, world!")
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
You can propose multiple edits in a single response. The user will see a visual diff and can accept or reject each edit.
|
|
66
|
+
|
|
67
|
+
When providing code suggestions:
|
|
68
|
+
- Be concise and focused
|
|
69
|
+
- Explain what the code does
|
|
70
|
+
- Mention any potential issues or improvements
|
|
71
|
+
- Consider GPU memory constraints when relevant
|
|
72
|
+
|
|
73
|
+
Current notebook context will be provided with each message."""
|
|
74
|
+
|
|
75
|
+
# Available models
|
|
76
|
+
MODELS = {
|
|
77
|
+
"sonnet": "claude-sonnet-4-20250514",
|
|
78
|
+
"haiku": "claude-haiku-4-20250514",
|
|
79
|
+
"opus": "claude-opus-4-5-20251101", # Opus 4.5
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
def __init__(self, api_key: str, model: str = "sonnet"):
|
|
83
|
+
if not ANTHROPIC_AVAILABLE:
|
|
84
|
+
raise ImportError("anthropic package is not installed. Run: pip install anthropic")
|
|
85
|
+
self.client = anthropic.AsyncAnthropic(api_key=api_key)
|
|
86
|
+
self.model = self.MODELS.get(model, self.MODELS["sonnet"])
|
|
87
|
+
|
|
88
|
+
def _prioritize_cells(self, cells: list[dict], focused_cell: int = -1) -> list[tuple[int, dict, int]]:
|
|
89
|
+
"""
|
|
90
|
+
Prioritize cells for context inclusion.
|
|
91
|
+
Returns list of (index, cell, priority) tuples, sorted by priority (higher = more important).
|
|
92
|
+
|
|
93
|
+
Priority levels:
|
|
94
|
+
- 100: Focused cell
|
|
95
|
+
- 80: Cells with errors
|
|
96
|
+
- 60: Recent cells (last 5)
|
|
97
|
+
- 40: Cells with outputs
|
|
98
|
+
- 20: Other cells
|
|
99
|
+
"""
|
|
100
|
+
prioritized = []
|
|
101
|
+
num_cells = len(cells)
|
|
102
|
+
|
|
103
|
+
for i, cell in enumerate(cells):
|
|
104
|
+
priority = 20 # Base priority
|
|
105
|
+
|
|
106
|
+
# Focused cell gets highest priority
|
|
107
|
+
if i == focused_cell:
|
|
108
|
+
priority = 100
|
|
109
|
+
# Cells with errors get high priority
|
|
110
|
+
elif cell.get("error") or any(
|
|
111
|
+
o.get("output_type") == "error" for o in cell.get("outputs", [])
|
|
112
|
+
):
|
|
113
|
+
priority = 80
|
|
114
|
+
# Recent cells (last 5) get medium-high priority
|
|
115
|
+
elif i >= num_cells - 5:
|
|
116
|
+
priority = 60
|
|
117
|
+
# Cells with outputs get medium priority
|
|
118
|
+
elif cell.get("outputs"):
|
|
119
|
+
priority = 40
|
|
120
|
+
|
|
121
|
+
prioritized.append((i, cell, priority))
|
|
122
|
+
|
|
123
|
+
# Sort by priority (descending), then by index (ascending for tie-breaking)
|
|
124
|
+
prioritized.sort(key=lambda x: (-x[2], x[0]))
|
|
125
|
+
return prioritized
|
|
126
|
+
|
|
127
|
+
def _format_cell(self, index: int, cell: dict, is_focused: bool = False) -> str:
|
|
128
|
+
"""Format a single cell for context, with truncation."""
|
|
129
|
+
parts = []
|
|
130
|
+
cell_type = cell.get("cell_type", "code")
|
|
131
|
+
source = cell.get("source", "")
|
|
132
|
+
if isinstance(source, list):
|
|
133
|
+
source = "".join(source)
|
|
134
|
+
|
|
135
|
+
# Truncate source if needed
|
|
136
|
+
if len(source) > MAX_CELL_SOURCE_CHARS:
|
|
137
|
+
source = source[:MAX_CELL_SOURCE_CHARS] + "\n... [truncated]"
|
|
138
|
+
|
|
139
|
+
focused_marker = " (FOCUSED)" if is_focused else ""
|
|
140
|
+
lang = "python" if cell_type == "code" else "markdown"
|
|
141
|
+
parts.append(f"### Cell {index} ({cell_type}){focused_marker}\n```{lang}\n{source}\n```\n")
|
|
142
|
+
|
|
143
|
+
# Include outputs (truncated)
|
|
144
|
+
outputs = cell.get("outputs", [])
|
|
145
|
+
if outputs:
|
|
146
|
+
parts.append("**Output:**\n")
|
|
147
|
+
for output in outputs[:3]: # Max 3 outputs per cell
|
|
148
|
+
output_type = output.get("output_type", "")
|
|
149
|
+
if output_type == "stream":
|
|
150
|
+
text = output.get("text", "")
|
|
151
|
+
if isinstance(text, list):
|
|
152
|
+
text = "".join(text)
|
|
153
|
+
text = text[:MAX_CELL_OUTPUT_CHARS]
|
|
154
|
+
if len(output.get("text", "")) > MAX_CELL_OUTPUT_CHARS:
|
|
155
|
+
text += "\n... [truncated]"
|
|
156
|
+
parts.append(f"```\n{text}\n```\n")
|
|
157
|
+
elif output_type == "execute_result":
|
|
158
|
+
data = output.get("data", {})
|
|
159
|
+
if "text/plain" in data:
|
|
160
|
+
text = data["text/plain"]
|
|
161
|
+
if isinstance(text, list):
|
|
162
|
+
text = "".join(text)
|
|
163
|
+
text = text[:MAX_CELL_OUTPUT_CHARS]
|
|
164
|
+
parts.append(f"```\n{text}\n```\n")
|
|
165
|
+
elif output_type == "error":
|
|
166
|
+
ename = output.get("ename", "Error")
|
|
167
|
+
evalue = output.get("evalue", "")
|
|
168
|
+
traceback = output.get("traceback", [])
|
|
169
|
+
# Include truncated traceback for errors
|
|
170
|
+
tb_text = "\n".join(traceback[-5:]) if traceback else "" # Last 5 lines
|
|
171
|
+
tb_text = tb_text[:1000] # Cap traceback
|
|
172
|
+
parts.append(f"**Error: {ename}**: {evalue}\n```\n{tb_text}\n```\n")
|
|
173
|
+
|
|
174
|
+
return "".join(parts)
|
|
175
|
+
|
|
176
|
+
def build_context_message(self, context: ClaudeContext) -> str:
|
|
177
|
+
"""Build a context string from notebook state with budget management."""
|
|
178
|
+
budget = MAX_CONTEXT_CHARS
|
|
179
|
+
parts = []
|
|
180
|
+
|
|
181
|
+
# Add cells context with prioritization
|
|
182
|
+
if context.cells:
|
|
183
|
+
parts.append("## Current Notebook Cells\n")
|
|
184
|
+
budget -= len(parts[-1])
|
|
185
|
+
|
|
186
|
+
prioritized = self._prioritize_cells(context.cells, context.focused_cell)
|
|
187
|
+
included_cells = []
|
|
188
|
+
|
|
189
|
+
for index, cell, priority in prioritized:
|
|
190
|
+
is_focused = (index == context.focused_cell)
|
|
191
|
+
cell_str = self._format_cell(index, cell, is_focused)
|
|
192
|
+
|
|
193
|
+
if len(cell_str) <= budget:
|
|
194
|
+
included_cells.append((index, cell_str))
|
|
195
|
+
budget -= len(cell_str)
|
|
196
|
+
elif budget > 200: # Room for truncated version
|
|
197
|
+
truncated = f"### Cell {index} ({cell.get('cell_type', 'code')}) [content omitted]\n"
|
|
198
|
+
included_cells.append((index, truncated))
|
|
199
|
+
budget -= len(truncated)
|
|
200
|
+
|
|
201
|
+
# Sort by index for display
|
|
202
|
+
included_cells.sort(key=lambda x: x[0])
|
|
203
|
+
for _, cell_str in included_cells:
|
|
204
|
+
parts.append(cell_str)
|
|
205
|
+
|
|
206
|
+
omitted = len(context.cells) - len(included_cells)
|
|
207
|
+
if omitted > 0:
|
|
208
|
+
parts.append(f"\n*({omitted} cells omitted due to context limits)*\n")
|
|
209
|
+
|
|
210
|
+
# Add GPU info (compact)
|
|
211
|
+
if context.gpu_info and budget > 500:
|
|
212
|
+
gpu_parts = ["\n## GPU Information\n"]
|
|
213
|
+
gpu_list = context.gpu_info.get("gpu", [])
|
|
214
|
+
if gpu_list:
|
|
215
|
+
for i, gpu in enumerate(gpu_list):
|
|
216
|
+
util = gpu.get("util_percent", "N/A")
|
|
217
|
+
mem_used = gpu.get("mem_used", 0) / (1024**3) if gpu.get("mem_used") else 0
|
|
218
|
+
mem_total = gpu.get("mem_total", 0) / (1024**3) if gpu.get("mem_total") else 0
|
|
219
|
+
temp = gpu.get("temperature_c", "N/A")
|
|
220
|
+
gpu_parts.append(f"- GPU {i}: {util}% util, {mem_used:.1f}/{mem_total:.1f}GB, {temp}C\n")
|
|
221
|
+
else:
|
|
222
|
+
gpu_parts.append("No GPU detected\n")
|
|
223
|
+
|
|
224
|
+
gpu_str = "".join(gpu_parts)
|
|
225
|
+
if len(gpu_str) <= budget:
|
|
226
|
+
parts.append(gpu_str)
|
|
227
|
+
budget -= len(gpu_str)
|
|
228
|
+
|
|
229
|
+
# Add system metrics (compact)
|
|
230
|
+
if context.metrics and budget > 300:
|
|
231
|
+
cpu = context.metrics.get("cpu", {})
|
|
232
|
+
memory = context.metrics.get("memory", {})
|
|
233
|
+
mem_used = memory.get("used", 0) / (1024**3) if memory.get("used") else 0
|
|
234
|
+
mem_total = memory.get("total", 0) / (1024**3) if memory.get("total") else 0
|
|
235
|
+
metrics_str = f"\n## System: CPU {cpu.get('percent', 'N/A')}%, RAM {mem_used:.1f}/{mem_total:.1f}GB\n"
|
|
236
|
+
if len(metrics_str) <= budget:
|
|
237
|
+
parts.append(metrics_str)
|
|
238
|
+
budget -= len(metrics_str)
|
|
239
|
+
|
|
240
|
+
# Add relevant packages (compact)
|
|
241
|
+
if context.packages and budget > 200:
|
|
242
|
+
ml_packages = ["torch", "tensorflow", "jax", "numpy", "pandas", "scikit-learn",
|
|
243
|
+
"transformers", "datasets", "accelerate", "deepspeed"]
|
|
244
|
+
relevant = [p for p in context.packages if p.get("name", "").lower() in ml_packages]
|
|
245
|
+
if relevant:
|
|
246
|
+
pkg_str = "\n## Packages: " + ", ".join(
|
|
247
|
+
f"{p.get('name')} {p.get('version')}" for p in relevant[:8]
|
|
248
|
+
) + "\n"
|
|
249
|
+
if len(pkg_str) <= budget:
|
|
250
|
+
parts.append(pkg_str)
|
|
251
|
+
|
|
252
|
+
return "".join(parts)
|
|
253
|
+
|
|
254
|
+
def _truncate_history(self, history: list[dict]) -> list[dict]:
|
|
255
|
+
"""Truncate conversation history to fit within budget."""
|
|
256
|
+
if not history:
|
|
257
|
+
return []
|
|
258
|
+
|
|
259
|
+
# Limit number of messages
|
|
260
|
+
history = history[-MAX_HISTORY_MESSAGES:]
|
|
261
|
+
|
|
262
|
+
# Truncate by character count
|
|
263
|
+
truncated = []
|
|
264
|
+
total_chars = 0
|
|
265
|
+
|
|
266
|
+
# Process from most recent to oldest
|
|
267
|
+
for msg in reversed(history):
|
|
268
|
+
content = msg.get("content", "")
|
|
269
|
+
msg_chars = len(content)
|
|
270
|
+
|
|
271
|
+
if total_chars + msg_chars <= MAX_HISTORY_CHARS:
|
|
272
|
+
truncated.insert(0, msg)
|
|
273
|
+
total_chars += msg_chars
|
|
274
|
+
elif total_chars < MAX_HISTORY_CHARS:
|
|
275
|
+
# Truncate this message to fit remaining budget
|
|
276
|
+
remaining = MAX_HISTORY_CHARS - total_chars
|
|
277
|
+
truncated.insert(0, {
|
|
278
|
+
"role": msg["role"],
|
|
279
|
+
"content": content[:remaining] + "\n... [earlier content truncated]"
|
|
280
|
+
})
|
|
281
|
+
break
|
|
282
|
+
else:
|
|
283
|
+
break
|
|
284
|
+
|
|
285
|
+
return truncated
|
|
286
|
+
|
|
287
|
+
async def stream_response(
|
|
288
|
+
self,
|
|
289
|
+
message: str,
|
|
290
|
+
context: ClaudeContext,
|
|
291
|
+
history: Optional[list[dict]] = None,
|
|
292
|
+
max_tokens: int = 4096,
|
|
293
|
+
model: Optional[str] = None
|
|
294
|
+
) -> AsyncGenerator[str, None]:
|
|
295
|
+
"""Stream a response from Claude.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
model: One of "sonnet", "haiku", "opus". Defaults to instance model.
|
|
299
|
+
"""
|
|
300
|
+
messages = []
|
|
301
|
+
|
|
302
|
+
# Add truncated history
|
|
303
|
+
if history:
|
|
304
|
+
truncated_history = self._truncate_history(history)
|
|
305
|
+
for msg in truncated_history:
|
|
306
|
+
messages.append({
|
|
307
|
+
"role": msg["role"],
|
|
308
|
+
"content": msg["content"]
|
|
309
|
+
})
|
|
310
|
+
|
|
311
|
+
# Build context and add to user message
|
|
312
|
+
context_str = self.build_context_message(context)
|
|
313
|
+
user_content = f"{context_str}\n\n---\n\n**User Question:**\n{message}"
|
|
314
|
+
|
|
315
|
+
messages.append({
|
|
316
|
+
"role": "user",
|
|
317
|
+
"content": user_content
|
|
318
|
+
})
|
|
319
|
+
|
|
320
|
+
# Use provided model or fall back to instance model
|
|
321
|
+
model_id = self.MODELS.get(model, self.model) if model else self.model
|
|
322
|
+
|
|
323
|
+
async with self.client.messages.stream(
|
|
324
|
+
model=model_id,
|
|
325
|
+
max_tokens=max_tokens,
|
|
326
|
+
system=self.SYSTEM_PROMPT,
|
|
327
|
+
messages=messages
|
|
328
|
+
) as stream:
|
|
329
|
+
async for text in stream.text_stream:
|
|
330
|
+
yield text
|
|
331
|
+
|
|
332
|
+
async def get_response(
|
|
333
|
+
self,
|
|
334
|
+
message: str,
|
|
335
|
+
context: ClaudeContext,
|
|
336
|
+
history: Optional[list[dict]] = None,
|
|
337
|
+
max_tokens: int = 4096
|
|
338
|
+
) -> str:
|
|
339
|
+
"""Get a complete response from Claude (non-streaming)."""
|
|
340
|
+
full_response = []
|
|
341
|
+
async for chunk in self.stream_response(message, context, history, max_tokens):
|
|
342
|
+
full_response.append(chunk)
|
|
343
|
+
return "".join(full_response)
|
|
344
|
+
|
|
345
|
+
@staticmethod
|
|
346
|
+
def parse_edit_blocks(response: str, cells: list[dict]) -> list[ProposedEdit]:
|
|
347
|
+
"""Parse edit blocks from Claude's response.
|
|
348
|
+
|
|
349
|
+
Format: ```edit:CELL_INDEX
|
|
350
|
+
NEW CODE
|
|
351
|
+
```
|
|
352
|
+
"""
|
|
353
|
+
edits = []
|
|
354
|
+
|
|
355
|
+
# Pattern to match edit blocks
|
|
356
|
+
pattern = r'```edit:(\d+)\n(.*?)```'
|
|
357
|
+
matches = re.findall(pattern, response, re.DOTALL)
|
|
358
|
+
|
|
359
|
+
for cell_index_str, new_code in matches:
|
|
360
|
+
cell_index = int(cell_index_str)
|
|
361
|
+
|
|
362
|
+
# Validate cell index
|
|
363
|
+
if 0 <= cell_index < len(cells):
|
|
364
|
+
original_code = cells[cell_index].get("source", "")
|
|
365
|
+
if isinstance(original_code, list):
|
|
366
|
+
original_code = "".join(original_code)
|
|
367
|
+
|
|
368
|
+
# Extract explanation (text before the edit block)
|
|
369
|
+
explanation = ""
|
|
370
|
+
edit_start = response.find(f"```edit:{cell_index}")
|
|
371
|
+
if edit_start > 0:
|
|
372
|
+
# Get text before this edit block
|
|
373
|
+
prev_text = response[:edit_start].strip()
|
|
374
|
+
# Get the last paragraph as explanation
|
|
375
|
+
paragraphs = prev_text.split("\n\n")
|
|
376
|
+
if paragraphs:
|
|
377
|
+
explanation = paragraphs[-1].strip()
|
|
378
|
+
|
|
379
|
+
edits.append(ProposedEdit(
|
|
380
|
+
cell_index=cell_index,
|
|
381
|
+
original_code=original_code.strip(),
|
|
382
|
+
new_code=new_code.strip(),
|
|
383
|
+
explanation=explanation
|
|
384
|
+
))
|
|
385
|
+
|
|
386
|
+
return edits
|
|
387
|
+
|
|
388
|
+
@staticmethod
|
|
389
|
+
def remove_edit_blocks(response: str) -> str:
|
|
390
|
+
"""Remove edit blocks from response for display purposes."""
|
|
391
|
+
pattern = r'```edit:\d+\n.*?```'
|
|
392
|
+
return re.sub(pattern, '[Edit proposed - see inline diff]', response, flags=re.DOTALL)
|