gitarsenal-cli 1.9.21 → 1.9.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.venv_status.json +1 -1
  2. package/package.json +1 -1
  3. package/python/__pycache__/auth_manager.cpython-313.pyc +0 -0
  4. package/python/__pycache__/command_manager.cpython-313.pyc +0 -0
  5. package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
  6. package/python/__pycache__/llm_debugging.cpython-313.pyc +0 -0
  7. package/python/__pycache__/modal_container.cpython-313.pyc +0 -0
  8. package/python/__pycache__/shell.cpython-313.pyc +0 -0
  9. package/python/api_integration.py +0 -0
  10. package/python/command_manager.py +613 -0
  11. package/python/credentials_manager.py +0 -0
  12. package/python/fetch_modal_tokens.py +0 -0
  13. package/python/fix_modal_token.py +0 -0
  14. package/python/fix_modal_token_advanced.py +0 -0
  15. package/python/gitarsenal.py +0 -0
  16. package/python/gitarsenal_proxy_client.py +0 -0
  17. package/python/llm_debugging.py +1369 -0
  18. package/python/modal_container.py +626 -0
  19. package/python/setup.py +15 -0
  20. package/python/setup_modal_token.py +0 -39
  21. package/python/shell.py +627 -0
  22. package/python/test_modalSandboxScript.py +75 -2639
  23. package/scripts/postinstall.js +22 -23
  24. package/python/__pycache__/credentials_manager.cpython-313.pyc +0 -0
  25. package/python/__pycache__/test_modalSandboxScript.cpython-313.pyc +0 -0
  26. package/python/__pycache__/test_modalSandboxScript_stable.cpython-313.pyc +0 -0
  27. package/python/debug_delete.py +0 -167
  28. package/python/documentation.py +0 -76
  29. package/python/fix_setup_commands.py +0 -116
  30. package/python/modal_auth_patch.py +0 -178
  31. package/python/modal_proxy_service.py +0 -665
  32. package/python/modal_token_solution.py +0 -293
  33. package/python/test_dynamic_commands.py +0 -147
  34. package/test_modalSandboxScript.py +0 -5004
package/.venv_status.json CHANGED
@@ -1 +1 @@
1
- {"created":"2025-08-06T11:52:55.135Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
1
+ {"created":"2025-08-07T09:48:26.761Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.9.21",
3
+ "version": "1.9.24",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
File without changes
@@ -0,0 +1,613 @@
1
+ import os
2
+ import time
3
+ import requests
4
+ import re
5
+ import json
6
+
7
+ # Import the LLM debugging function
8
+ try:
9
+ from llm_debugging import call_llm_for_batch_debug
10
+ except ImportError:
11
+ # Fallback: define a simple version if the import fails
12
+ def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
13
+ print("⚠️ LLM batch debugging not available")
14
+ return []
15
+
16
+ class CommandListManager:
17
+ """Manages a dynamic list of setup commands with status tracking and LLM-suggested fixes."""
18
+
19
+ def __init__(self, initial_commands=None):
20
+ self.commands = []
21
+ self.executed_commands = []
22
+ self.failed_commands = []
23
+ self.suggested_fixes = []
24
+ self.current_index = 0
25
+ self.total_commands = 0
26
+
27
+ if initial_commands:
28
+ self.add_commands(initial_commands)
29
+
30
+ def add_commands(self, commands):
31
+ """Add new commands to the list."""
32
+ if isinstance(commands, str):
33
+ commands = [commands]
34
+
35
+ added_count = 0
36
+ for cmd in commands:
37
+ if cmd and cmd.strip():
38
+ self.commands.append({
39
+ 'command': cmd.strip(),
40
+ 'status': 'pending',
41
+ 'index': len(self.commands),
42
+ 'stdout': '',
43
+ 'stderr': '',
44
+ 'execution_time': None,
45
+ 'fix_attempts': 0,
46
+ 'max_fix_attempts': 3
47
+ })
48
+ added_count += 1
49
+
50
+ self.total_commands = len(self.commands)
51
+ if added_count > 0:
52
+ print(f"📋 Added {added_count} commands to list. Total: {self.total_commands}")
53
+
54
+ def add_command_dynamically(self, command, priority='normal'):
55
+ """Add a single command dynamically during execution."""
56
+ if not command or not command.strip():
57
+ return False
58
+
59
+ new_command = {
60
+ 'command': command.strip(),
61
+ 'status': 'pending',
62
+ 'index': len(self.commands),
63
+ 'stdout': '',
64
+ 'stderr': '',
65
+ 'execution_time': None,
66
+ 'fix_attempts': 0,
67
+ 'max_fix_attempts': 3,
68
+ 'priority': priority
69
+ }
70
+
71
+ if priority == 'high':
72
+ # Insert at the beginning of pending commands
73
+ self.commands.insert(self.current_index, new_command)
74
+ # Update indices for all commands after insertion
75
+ for i in range(self.current_index + 1, len(self.commands)):
76
+ self.commands[i]['index'] = i
77
+ else:
78
+ # Add to the end
79
+ self.commands.append(new_command)
80
+
81
+ self.total_commands = len(self.commands)
82
+ print(f"📋 Added dynamic command: {command.strip()}")
83
+ return True
84
+
85
+ def add_suggested_fix(self, original_command, fix_command, reason=""):
86
+ """Add a LLM-suggested fix for a failed command."""
87
+ fix_entry = {
88
+ 'original_command': original_command,
89
+ 'fix_command': fix_command,
90
+ 'reason': reason,
91
+ 'status': 'pending',
92
+ 'index': len(self.suggested_fixes),
93
+ 'stdout': '',
94
+ 'stderr': '',
95
+ 'execution_time': None
96
+ }
97
+ self.suggested_fixes.append(fix_entry)
98
+ print(f"🔧 Added suggested fix: {fix_command}")
99
+ return len(self.suggested_fixes) - 1
100
+
101
+ def get_next_command(self):
102
+ """Get the next pending command to execute."""
103
+ # First, try to get a pending command from the main list
104
+ for i in range(self.current_index, len(self.commands)):
105
+ if self.commands[i]['status'] == 'pending':
106
+ return self.commands[i], 'main'
107
+
108
+ # If no pending commands in main list, check suggested fixes
109
+ for fix in self.suggested_fixes:
110
+ if fix['status'] == 'pending':
111
+ return fix, 'fix'
112
+
113
+ return None, None
114
+
115
+ def mark_command_executed(self, command_index, command_type='main', success=True, stdout='', stderr='', execution_time=None):
116
+ """Mark a command as executed with results."""
117
+ if command_type == 'main':
118
+ if 0 <= command_index < len(self.commands):
119
+ self.commands[command_index].update({
120
+ 'status': 'success' if success else 'failed',
121
+ 'stdout': stdout,
122
+ 'stderr': stderr,
123
+ 'execution_time': execution_time
124
+ })
125
+
126
+ if success:
127
+ self.executed_commands.append(self.commands[command_index])
128
+ print(f"✅ Command {command_index + 1}/{self.total_commands} completed successfully")
129
+ else:
130
+ self.failed_commands.append(self.commands[command_index])
131
+ print(f"❌ Command {command_index + 1}/{self.total_commands} failed")
132
+
133
+ self.current_index = max(self.current_index, command_index + 1)
134
+
135
+ elif command_type == 'fix':
136
+ if 0 <= command_index < len(self.suggested_fixes):
137
+ self.suggested_fixes[command_index].update({
138
+ 'status': 'success' if success else 'failed',
139
+ 'stdout': stdout,
140
+ 'stderr': stderr,
141
+ 'execution_time': execution_time
142
+ })
143
+
144
+ if success:
145
+ print(f"✅ Fix command {command_index + 1} completed successfully")
146
+ else:
147
+ print(f"❌ Fix command {command_index + 1} failed")
148
+
149
+ def get_status_summary(self):
150
+ """Get a summary of command execution status."""
151
+ total_main = len(self.commands)
152
+ total_fixes = len(self.suggested_fixes)
153
+ executed_main = len([c for c in self.commands if c['status'] == 'success'])
154
+ failed_main = len([c for c in self.commands if c['status'] == 'failed'])
155
+ pending_main = len([c for c in self.commands if c['status'] == 'pending'])
156
+ executed_fixes = len([f for f in self.suggested_fixes if f['status'] == 'success'])
157
+ failed_fixes = len([f for f in self.suggested_fixes if f['status'] == 'failed'])
158
+
159
+ return {
160
+ 'total_main_commands': total_main,
161
+ 'executed_main_commands': executed_main,
162
+ 'failed_main_commands': failed_main,
163
+ 'pending_main_commands': pending_main,
164
+ 'total_fix_commands': total_fixes,
165
+ 'executed_fix_commands': executed_fixes,
166
+ 'failed_fix_commands': failed_fixes,
167
+ 'progress_percentage': (executed_main / total_main * 100) if total_main > 0 else 0
168
+ }
169
+
170
+ def print_status(self):
171
+ """Print current status of all commands."""
172
+ summary = self.get_status_summary()
173
+
174
+ print("\n" + "="*60)
175
+ print("📋 COMMAND EXECUTION STATUS")
176
+ print("="*60)
177
+
178
+ # Main commands status
179
+ print(f"📋 Main Commands: {summary['executed_main_commands']}/{summary['total_main_commands']} completed")
180
+ print(f" ✅ Successful: {summary['executed_main_commands']}")
181
+ print(f" ❌ Failed: {summary['failed_main_commands']}")
182
+ print(f" ⏳ Pending: {summary['pending_main_commands']}")
183
+
184
+ # Fix commands status
185
+ if summary['total_fix_commands'] > 0:
186
+ print(f"🔧 Fix Commands: {summary['executed_fix_commands']}/{summary['total_fix_commands']} completed")
187
+ print(f" ✅ Successful: {summary['executed_fix_commands']}")
188
+ print(f" ❌ Failed: {summary['failed_fix_commands']}")
189
+
190
+ # Progress bar
191
+ progress = summary['progress_percentage']
192
+ bar_length = 30
193
+ filled_length = int(bar_length * progress / 100)
194
+ bar = '█' * filled_length + '░' * (bar_length - filled_length)
195
+ print(f"📊 Progress: [{bar}] {progress:.1f}%")
196
+
197
+ # Show current command if any
198
+ next_cmd, cmd_type = self.get_next_command()
199
+ if next_cmd:
200
+ cmd_type_str = "main" if cmd_type == 'main' else "fix"
201
+ cmd_text = next_cmd.get('command', next_cmd.get('fix_command', 'Unknown command'))
202
+ print(f"🔄 Current: {cmd_type_str} command - {cmd_text[:50]}...")
203
+
204
+ print("="*60)
205
+
206
+ def get_failed_commands_for_llm(self):
207
+ """Get failed commands for LLM analysis."""
208
+ failed_commands = []
209
+
210
+ # Get failed main commands
211
+ for cmd in self.commands:
212
+ if cmd['status'] == 'failed':
213
+ failed_commands.append({
214
+ 'command': cmd['command'],
215
+ 'stderr': cmd['stderr'],
216
+ 'stdout': cmd['stdout'],
217
+ 'type': 'main'
218
+ })
219
+
220
+ # Get failed fix commands
221
+ for fix in self.suggested_fixes:
222
+ if fix['status'] == 'failed':
223
+ failed_commands.append({
224
+ 'command': fix['fix_command'],
225
+ 'stderr': fix['stderr'],
226
+ 'stdout': fix['stdout'],
227
+ 'type': 'fix',
228
+ 'original_command': fix['original_command']
229
+ })
230
+
231
+ return failed_commands
232
+
233
+ def has_pending_commands(self):
234
+ """Check if there are any pending commands."""
235
+ return any(cmd['status'] == 'pending' for cmd in self.commands) or \
236
+ any(fix['status'] == 'pending' for fix in self.suggested_fixes)
237
+
238
+ def get_all_commands(self):
239
+ """Get all commands (main + fixes) in execution order."""
240
+ all_commands = []
241
+
242
+ # Add main commands
243
+ for cmd in self.commands:
244
+ all_commands.append({
245
+ **cmd,
246
+ 'type': 'main'
247
+ })
248
+
249
+ # Add fix commands
250
+ for fix in self.suggested_fixes:
251
+ all_commands.append({
252
+ **fix,
253
+ 'type': 'fix'
254
+ })
255
+
256
+ return all_commands
257
+
258
+ def analyze_failed_commands_with_llm(self, api_key=None, current_dir=None, sandbox=None):
259
+ """Analyze all failed commands using LLM and add suggested fixes."""
260
+ failed_commands = self.get_failed_commands_for_llm()
261
+
262
+ if not failed_commands:
263
+ print("✅ No failed commands to analyze")
264
+ return []
265
+
266
+ print(f"🔍 Analyzing {len(failed_commands)} failed commands with LLM...")
267
+
268
+ # Use unified batch debugging for efficiency
269
+ fixes = call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
270
+
271
+ # Add the fixes to the command list
272
+ added_fixes = []
273
+ for fix in fixes:
274
+ fix_index = self.add_suggested_fix(
275
+ fix['original_command'],
276
+ fix['fix_command'],
277
+ fix['reason']
278
+ )
279
+ added_fixes.append(fix_index)
280
+
281
+ print(f"🔧 Added {len(added_fixes)} LLM-suggested fixes to command list")
282
+ return added_fixes
283
+
284
+ def should_skip_original_command(self, original_command, fix_command, fix_stdout, fix_stderr, api_key=None):
285
+ """
286
+ Use LLM to determine if the original command should be skipped after a successful fix.
287
+
288
+ Args:
289
+ original_command: The original command that failed
290
+ fix_command: The fix command that succeeded
291
+ fix_stdout: The stdout from the fix command
292
+ fix_stderr: The stderr from the fix command
293
+ api_key: OpenAI API key
294
+
295
+ Returns:
296
+ tuple: (should_skip, reason)
297
+ """
298
+ try:
299
+ # Get API key if not provided
300
+ if not api_key:
301
+ api_key = os.environ.get("OPENAI_API_KEY")
302
+ if not api_key:
303
+ # Try to load from saved file
304
+ key_file = os.path.expanduser("~/.gitarsenal/openai_key")
305
+ if os.path.exists(key_file):
306
+ with open(key_file, "r") as f:
307
+ api_key = f.read().strip()
308
+
309
+ if not api_key:
310
+ print("⚠️ No OpenAI API key available for command list analysis")
311
+ return False, "No API key available"
312
+
313
+ # Get all commands for context
314
+ all_commands = self.get_all_commands()
315
+ commands_context = "\n".join([f"{i+1}. {cmd['command']} - {cmd['status']}" for i, cmd in enumerate(all_commands)])
316
+
317
+ # Prepare the prompt
318
+ prompt = f"""
319
+ I need to determine if an original command should be skipped after a successful fix command.
320
+
321
+ Original command (failed): {original_command}
322
+ Fix command (succeeded): {fix_command}
323
+
324
+ Fix command stdout:
325
+ {fix_stdout}
326
+
327
+ Fix command stderr:
328
+ {fix_stderr}
329
+
330
+ Current command list:
331
+ {commands_context}
332
+
333
+ Based on this information, should I skip running the original command again?
334
+ Consider:
335
+ 1. If the fix command already accomplished what the original command was trying to do
336
+ 2. If running the original command again would be redundant or cause errors
337
+ 3. If the original command is still necessary after the fix
338
+
339
+ Respond with ONLY:
340
+ SKIP: <reason>
341
+ or
342
+ RUN: <reason>
343
+ """
344
+
345
+ # Call OpenAI API
346
+ import openai
347
+ client = openai.OpenAI(api_key=api_key)
348
+
349
+ print("🔍 Analyzing if original command should be skipped...")
350
+
351
+ response = client.chat.completions.create(
352
+ model="gpt-3.5-turbo",
353
+ messages=[
354
+ {"role": "system", "content": "You are a helpful assistant that analyzes command execution."},
355
+ {"role": "user", "content": prompt}
356
+ ],
357
+ max_tokens=100,
358
+ temperature=0.3
359
+ )
360
+
361
+ response_text = response.choices[0].message.content.strip()
362
+
363
+ # Parse the response
364
+ if response_text.startswith("SKIP:"):
365
+ reason = response_text.replace("SKIP:", "").strip()
366
+ print(f"🔍 LLM suggests skipping original command: {reason}")
367
+ return True, reason
368
+ elif response_text.startswith("RUN:"):
369
+ reason = response_text.replace("RUN:", "").strip()
370
+ print(f"🔍 LLM suggests running original command: {reason}")
371
+ return False, reason
372
+ else:
373
+ # Try to interpret a free-form response
374
+ if "skip" in response_text.lower() and "should" in response_text.lower():
375
+ print(f"🔍 Interpreting response as SKIP: {response_text}")
376
+ return True, response_text
377
+ else:
378
+ print(f"🔍 Interpreting response as RUN: {response_text}")
379
+ return False, response_text
380
+
381
+ except Exception as e:
382
+ print(f"⚠️ Error analyzing command skip decision: {e}")
383
+ return False, f"Error: {e}"
384
+
385
+ def replace_command(self, command_index, new_command, reason=""):
386
+ """
387
+ Replace a command in the list with a new command.
388
+
389
+ Args:
390
+ command_index: The index of the command to replace
391
+ new_command: The new command to use
392
+ reason: The reason for the replacement
393
+
394
+ Returns:
395
+ bool: True if the command was replaced, False otherwise
396
+ """
397
+ if 0 <= command_index < len(self.commands):
398
+ old_command = self.commands[command_index]['command']
399
+ self.commands[command_index]['command'] = new_command
400
+ self.commands[command_index]['status'] = 'pending' # Reset status
401
+ self.commands[command_index]['stdout'] = ''
402
+ self.commands[command_index]['stderr'] = ''
403
+ self.commands[command_index]['execution_time'] = None
404
+ self.commands[command_index]['replacement_reason'] = reason
405
+
406
+ print(f"🔄 Replaced command {command_index + 1}: '{old_command}' with '{new_command}'")
407
+ print(f"🔍 Reason: {reason}")
408
+ return True
409
+ else:
410
+ print(f"❌ Invalid command index for replacement: {command_index}")
411
+ return False
412
+
413
+ def update_command_list_with_llm(self, api_key=None):
414
+ """
415
+ Use LLM to analyze and update the entire command list.
416
+
417
+ Args:
418
+ api_key: OpenAI API key
419
+
420
+ Returns:
421
+ bool: True if the list was updated, False otherwise
422
+ """
423
+ try:
424
+ # Get API key if not provided
425
+ if not api_key:
426
+ api_key = os.environ.get("OPENAI_API_KEY")
427
+ if not api_key:
428
+ # Try to load from saved file
429
+ key_file = os.path.expanduser("~/.gitarsenal/openai_key")
430
+ if os.path.exists(key_file):
431
+ with open(key_file, "r") as f:
432
+ api_key = f.read().strip()
433
+
434
+ if not api_key:
435
+ print("⚠️ No OpenAI API key available for command list analysis")
436
+ return False
437
+
438
+ # Get all commands for context
439
+ all_commands = self.get_all_commands()
440
+ commands_context = "\n".join([f"{i+1}. {cmd['command']} - {cmd['status']}"
441
+ for i, cmd in enumerate(all_commands)])
442
+
443
+ # Get executed commands with their outputs for context
444
+ executed_context = ""
445
+ for cmd in self.executed_commands:
446
+ executed_context += f"Command: {cmd['command']}\n"
447
+ executed_context += f"Status: {cmd['status']}\n"
448
+ if cmd['stdout']:
449
+ executed_context += f"Stdout: {cmd['stdout'][:500]}...\n" if len(cmd['stdout']) > 500 else f"Stdout: {cmd['stdout']}\n"
450
+ if cmd['stderr']:
451
+ executed_context += f"Stderr: {cmd['stderr'][:500]}...\n" if len(cmd['stderr']) > 500 else f"Stderr: {cmd['stderr']}\n"
452
+ executed_context += "\n"
453
+
454
+ # Prepare the prompt
455
+ prompt = f"""
456
+ I need you to analyze and optimize this command list. Some commands have been executed,
457
+ and some are still pending. Based on what has already been executed, I need you to:
458
+
459
+ 1. Identify any pending commands that are now redundant or unnecessary
460
+ 2. Identify any pending commands that should be modified based on previous command results
461
+ 3. Suggest any new commands that should be added
462
+
463
+ Current command list:
464
+ {commands_context}
465
+
466
+ Details of executed commands:
467
+ {executed_context}
468
+
469
+ For each pending command (starting from the next command to be executed), tell me if it should be:
470
+ 1. KEEP: Keep the command as is
471
+ 2. SKIP: Skip the command (mark as completed without running)
472
+ 3. MODIFY: Modify the command (provide the new command)
473
+ 4. ADD_AFTER: Add a new command after this one
474
+
475
+ Format your response as a JSON array of actions:
476
+ [
477
+ {{
478
+ "command_index": <index>,
479
+ "action": "KEEP|SKIP|MODIFY|ADD_AFTER",
480
+ "new_command": "<new command if MODIFY or ADD_AFTER>",
481
+ "reason": "<reason for this action>"
482
+ }},
483
+ ...
484
+ ]
485
+
486
+ Only include commands that need changes (SKIP, MODIFY, ADD_AFTER), not KEEP actions.
487
+ """
488
+
489
+ # Call OpenAI API
490
+ import openai
491
+ import json
492
+ client = openai.OpenAI(api_key=api_key)
493
+
494
+ print("🔍 Analyzing command list for optimizations...")
495
+
496
+ response = client.chat.completions.create(
497
+ model="gpt-4o-mini", # Use a more capable model for this complex task
498
+ messages=[
499
+ {"role": "system", "content": "You are a helpful assistant that analyzes and optimizes command lists."},
500
+ {"role": "user", "content": prompt}
501
+ ],
502
+ max_tokens=1000,
503
+ temperature=0.2
504
+ )
505
+
506
+ response_text = response.choices[0].message.content.strip()
507
+
508
+ # Extract JSON from the response
509
+ try:
510
+ # Find JSON array in the response
511
+ json_match = re.search(r'\[\s*\{.*\}\s*\]', response_text, re.DOTALL)
512
+ if json_match:
513
+ json_str = json_match.group(0)
514
+ actions = json.loads(json_str)
515
+ else:
516
+ # Try to parse the entire response as JSON
517
+ actions = json.loads(response_text)
518
+
519
+ if not isinstance(actions, list):
520
+ print("❌ Invalid response format from LLM - not a list")
521
+ return False
522
+
523
+ # Apply the suggested changes
524
+ changes_made = 0
525
+ commands_added = 0
526
+
527
+ # Process in reverse order to avoid index shifting issues
528
+ for action in sorted(actions, key=lambda x: x.get('command_index', 0), reverse=True):
529
+ cmd_idx = action.get('command_index')
530
+ action_type = action.get('action')
531
+ new_cmd = action.get('new_command', '')
532
+ reason = action.get('reason', 'No reason provided')
533
+
534
+ if cmd_idx is None or action_type is None:
535
+ continue
536
+
537
+ # Convert to 0-based index if needed
538
+ if cmd_idx > 0: # Assume 1-based index from LLM
539
+ cmd_idx -= 1
540
+
541
+ # Skip if the command index is invalid
542
+ if cmd_idx < 0 or cmd_idx >= len(self.commands):
543
+ print(f"❌ Invalid command index: {cmd_idx}")
544
+ continue
545
+
546
+ # Skip if the command has already been executed
547
+ if self.commands[cmd_idx]['status'] != 'pending':
548
+ print(f"⚠️ Command {cmd_idx + 1} already executed, skipping action")
549
+ continue
550
+
551
+ if action_type == "SKIP":
552
+ # Mark the command as successful without running it
553
+ self.mark_command_executed(
554
+ cmd_idx, 'main', True,
555
+ f"Command skipped: {reason}",
556
+ "", 0
557
+ )
558
+ print(f"🔄 Skipped command {cmd_idx + 1}: {reason}")
559
+ changes_made += 1
560
+
561
+ elif action_type == "MODIFY":
562
+ if new_cmd:
563
+ if self.replace_command(cmd_idx, new_cmd, reason):
564
+ changes_made += 1
565
+ else:
566
+ print(f"❌ No new command provided for MODIFY action on command {cmd_idx + 1}")
567
+
568
+ elif action_type == "ADD_AFTER":
569
+ if new_cmd:
570
+ # Add new command after the current one
571
+ insert_idx = cmd_idx + 1
572
+ new_cmd_obj = {
573
+ 'command': new_cmd,
574
+ 'status': 'pending',
575
+ 'index': insert_idx,
576
+ 'stdout': '',
577
+ 'stderr': '',
578
+ 'execution_time': None,
579
+ 'fix_attempts': 0,
580
+ 'max_fix_attempts': 3,
581
+ 'added_reason': reason
582
+ }
583
+
584
+ # Insert the new command
585
+ self.commands.insert(insert_idx, new_cmd_obj)
586
+
587
+ # Update indices for all commands after insertion
588
+ for i in range(insert_idx + 1, len(self.commands)):
589
+ self.commands[i]['index'] = i
590
+
591
+ print(f"➕ Added new command after {cmd_idx + 1}: '{new_cmd}'")
592
+ print(f"🔍 Reason: {reason}")
593
+ commands_added += 1
594
+ else:
595
+ print(f"❌ No new command provided for ADD_AFTER action on command {cmd_idx + 1}")
596
+
597
+ # Update total commands count
598
+ self.total_commands = len(self.commands)
599
+
600
+ print(f"✅ Command list updated: {changes_made} changes made, {commands_added} commands added")
601
+ return changes_made > 0 or commands_added > 0
602
+
603
+ except json.JSONDecodeError as e:
604
+ print(f"❌ Failed to parse LLM response as JSON: {e}")
605
+ print(f"Raw response: {response_text}")
606
+ return False
607
+ except Exception as e:
608
+ print(f"❌ Error updating command list: {e}")
609
+ return False
610
+
611
+ except Exception as e:
612
+ print(f"⚠️ Error analyzing command list: {e}")
613
+ return False
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes