gitarsenal-cli 1.9.21 → 1.9.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.venv_status.json +1 -1
  2. package/package.json +1 -1
  3. package/python/__pycache__/auth_manager.cpython-313.pyc +0 -0
  4. package/python/__pycache__/command_manager.cpython-313.pyc +0 -0
  5. package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
  6. package/python/__pycache__/llm_debugging.cpython-313.pyc +0 -0
  7. package/python/__pycache__/modal_container.cpython-313.pyc +0 -0
  8. package/python/__pycache__/shell.cpython-313.pyc +0 -0
  9. package/python/api_integration.py +0 -0
  10. package/python/command_manager.py +605 -0
  11. package/python/credentials_manager.py +0 -0
  12. package/python/fetch_modal_tokens.py +0 -0
  13. package/python/fix_modal_token.py +0 -0
  14. package/python/fix_modal_token_advanced.py +0 -0
  15. package/python/gitarsenal.py +0 -0
  16. package/python/gitarsenal_proxy_client.py +0 -0
  17. package/python/llm_debugging.py +1061 -0
  18. package/python/modal_container.py +626 -0
  19. package/python/setup.py +15 -0
  20. package/python/setup_modal_token.py +0 -39
  21. package/python/shell.py +610 -0
  22. package/python/test_modalSandboxScript.py +2 -2
  23. package/scripts/postinstall.js +22 -23
  24. package/python/__pycache__/credentials_manager.cpython-313.pyc +0 -0
  25. package/python/__pycache__/test_modalSandboxScript.cpython-313.pyc +0 -0
  26. package/python/__pycache__/test_modalSandboxScript_stable.cpython-313.pyc +0 -0
  27. package/python/debug_delete.py +0 -167
  28. package/python/documentation.py +0 -76
  29. package/python/fix_setup_commands.py +0 -116
  30. package/python/modal_auth_patch.py +0 -178
  31. package/python/modal_proxy_service.py +0 -665
  32. package/python/modal_token_solution.py +0 -293
  33. package/python/test_dynamic_commands.py +0 -147
  34. package/test_modalSandboxScript.py +0 -5004
package/.venv_status.json CHANGED
@@ -1 +1 @@
1
- {"created":"2025-08-06T11:52:55.135Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
1
+ {"created":"2025-08-07T07:09:00.867Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.9.21",
3
+ "version": "1.9.23",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
File without changes
@@ -0,0 +1,605 @@
1
+ import os
2
+ import time
3
+ import requests
4
+ import re
5
+ import json
6
+ import re
7
+
8
+ class CommandListManager:
9
+ """Manages a dynamic list of setup commands with status tracking and LLM-suggested fixes."""
10
+
11
+ def __init__(self, initial_commands=None):
12
+ self.commands = []
13
+ self.executed_commands = []
14
+ self.failed_commands = []
15
+ self.suggested_fixes = []
16
+ self.current_index = 0
17
+ self.total_commands = 0
18
+
19
+ if initial_commands:
20
+ self.add_commands(initial_commands)
21
+
22
+ def add_commands(self, commands):
23
+ """Add new commands to the list."""
24
+ if isinstance(commands, str):
25
+ commands = [commands]
26
+
27
+ added_count = 0
28
+ for cmd in commands:
29
+ if cmd and cmd.strip():
30
+ self.commands.append({
31
+ 'command': cmd.strip(),
32
+ 'status': 'pending',
33
+ 'index': len(self.commands),
34
+ 'stdout': '',
35
+ 'stderr': '',
36
+ 'execution_time': None,
37
+ 'fix_attempts': 0,
38
+ 'max_fix_attempts': 3
39
+ })
40
+ added_count += 1
41
+
42
+ self.total_commands = len(self.commands)
43
+ if added_count > 0:
44
+ print(f"📋 Added {added_count} commands to list. Total: {self.total_commands}")
45
+
46
+ def add_command_dynamically(self, command, priority='normal'):
47
+ """Add a single command dynamically during execution."""
48
+ if not command or not command.strip():
49
+ return False
50
+
51
+ new_command = {
52
+ 'command': command.strip(),
53
+ 'status': 'pending',
54
+ 'index': len(self.commands),
55
+ 'stdout': '',
56
+ 'stderr': '',
57
+ 'execution_time': None,
58
+ 'fix_attempts': 0,
59
+ 'max_fix_attempts': 3,
60
+ 'priority': priority
61
+ }
62
+
63
+ if priority == 'high':
64
+ # Insert at the beginning of pending commands
65
+ self.commands.insert(self.current_index, new_command)
66
+ # Update indices for all commands after insertion
67
+ for i in range(self.current_index + 1, len(self.commands)):
68
+ self.commands[i]['index'] = i
69
+ else:
70
+ # Add to the end
71
+ self.commands.append(new_command)
72
+
73
+ self.total_commands = len(self.commands)
74
+ print(f"📋 Added dynamic command: {command.strip()}")
75
+ return True
76
+
77
+ def add_suggested_fix(self, original_command, fix_command, reason=""):
78
+ """Add a LLM-suggested fix for a failed command."""
79
+ fix_entry = {
80
+ 'original_command': original_command,
81
+ 'fix_command': fix_command,
82
+ 'reason': reason,
83
+ 'status': 'pending',
84
+ 'index': len(self.suggested_fixes),
85
+ 'stdout': '',
86
+ 'stderr': '',
87
+ 'execution_time': None
88
+ }
89
+ self.suggested_fixes.append(fix_entry)
90
+ print(f"🔧 Added suggested fix: {fix_command}")
91
+ return len(self.suggested_fixes) - 1
92
+
93
+ def get_next_command(self):
94
+ """Get the next pending command to execute."""
95
+ # First, try to get a pending command from the main list
96
+ for i in range(self.current_index, len(self.commands)):
97
+ if self.commands[i]['status'] == 'pending':
98
+ return self.commands[i], 'main'
99
+
100
+ # If no pending commands in main list, check suggested fixes
101
+ for fix in self.suggested_fixes:
102
+ if fix['status'] == 'pending':
103
+ return fix, 'fix'
104
+
105
+ return None, None
106
+
107
+ def mark_command_executed(self, command_index, command_type='main', success=True, stdout='', stderr='', execution_time=None):
108
+ """Mark a command as executed with results."""
109
+ if command_type == 'main':
110
+ if 0 <= command_index < len(self.commands):
111
+ self.commands[command_index].update({
112
+ 'status': 'success' if success else 'failed',
113
+ 'stdout': stdout,
114
+ 'stderr': stderr,
115
+ 'execution_time': execution_time
116
+ })
117
+
118
+ if success:
119
+ self.executed_commands.append(self.commands[command_index])
120
+ print(f"✅ Command {command_index + 1}/{self.total_commands} completed successfully")
121
+ else:
122
+ self.failed_commands.append(self.commands[command_index])
123
+ print(f"❌ Command {command_index + 1}/{self.total_commands} failed")
124
+
125
+ self.current_index = max(self.current_index, command_index + 1)
126
+
127
+ elif command_type == 'fix':
128
+ if 0 <= command_index < len(self.suggested_fixes):
129
+ self.suggested_fixes[command_index].update({
130
+ 'status': 'success' if success else 'failed',
131
+ 'stdout': stdout,
132
+ 'stderr': stderr,
133
+ 'execution_time': execution_time
134
+ })
135
+
136
+ if success:
137
+ print(f"✅ Fix command {command_index + 1} completed successfully")
138
+ else:
139
+ print(f"❌ Fix command {command_index + 1} failed")
140
+
141
+ def get_status_summary(self):
142
+ """Get a summary of command execution status."""
143
+ total_main = len(self.commands)
144
+ total_fixes = len(self.suggested_fixes)
145
+ executed_main = len([c for c in self.commands if c['status'] == 'success'])
146
+ failed_main = len([c for c in self.commands if c['status'] == 'failed'])
147
+ pending_main = len([c for c in self.commands if c['status'] == 'pending'])
148
+ executed_fixes = len([f for f in self.suggested_fixes if f['status'] == 'success'])
149
+ failed_fixes = len([f for f in self.suggested_fixes if f['status'] == 'failed'])
150
+
151
+ return {
152
+ 'total_main_commands': total_main,
153
+ 'executed_main_commands': executed_main,
154
+ 'failed_main_commands': failed_main,
155
+ 'pending_main_commands': pending_main,
156
+ 'total_fix_commands': total_fixes,
157
+ 'executed_fix_commands': executed_fixes,
158
+ 'failed_fix_commands': failed_fixes,
159
+ 'progress_percentage': (executed_main / total_main * 100) if total_main > 0 else 0
160
+ }
161
+
162
+ def print_status(self):
163
+ """Print current status of all commands."""
164
+ summary = self.get_status_summary()
165
+
166
+ print("\n" + "="*60)
167
+ print("📋 COMMAND EXECUTION STATUS")
168
+ print("="*60)
169
+
170
+ # Main commands status
171
+ print(f"📋 Main Commands: {summary['executed_main_commands']}/{summary['total_main_commands']} completed")
172
+ print(f" ✅ Successful: {summary['executed_main_commands']}")
173
+ print(f" ❌ Failed: {summary['failed_main_commands']}")
174
+ print(f" ⏳ Pending: {summary['pending_main_commands']}")
175
+
176
+ # Fix commands status
177
+ if summary['total_fix_commands'] > 0:
178
+ print(f"🔧 Fix Commands: {summary['executed_fix_commands']}/{summary['total_fix_commands']} completed")
179
+ print(f" ✅ Successful: {summary['executed_fix_commands']}")
180
+ print(f" ❌ Failed: {summary['failed_fix_commands']}")
181
+
182
+ # Progress bar
183
+ progress = summary['progress_percentage']
184
+ bar_length = 30
185
+ filled_length = int(bar_length * progress / 100)
186
+ bar = '█' * filled_length + '░' * (bar_length - filled_length)
187
+ print(f"📊 Progress: [{bar}] {progress:.1f}%")
188
+
189
+ # Show current command if any
190
+ next_cmd, cmd_type = self.get_next_command()
191
+ if next_cmd:
192
+ cmd_type_str = "main" if cmd_type == 'main' else "fix"
193
+ cmd_text = next_cmd.get('command', next_cmd.get('fix_command', 'Unknown command'))
194
+ print(f"🔄 Current: {cmd_type_str} command - {cmd_text[:50]}...")
195
+
196
+ print("="*60)
197
+
198
+ def get_failed_commands_for_llm(self):
199
+ """Get failed commands for LLM analysis."""
200
+ failed_commands = []
201
+
202
+ # Get failed main commands
203
+ for cmd in self.commands:
204
+ if cmd['status'] == 'failed':
205
+ failed_commands.append({
206
+ 'command': cmd['command'],
207
+ 'stderr': cmd['stderr'],
208
+ 'stdout': cmd['stdout'],
209
+ 'type': 'main'
210
+ })
211
+
212
+ # Get failed fix commands
213
+ for fix in self.suggested_fixes:
214
+ if fix['status'] == 'failed':
215
+ failed_commands.append({
216
+ 'command': fix['fix_command'],
217
+ 'stderr': fix['stderr'],
218
+ 'stdout': fix['stdout'],
219
+ 'type': 'fix',
220
+ 'original_command': fix['original_command']
221
+ })
222
+
223
+ return failed_commands
224
+
225
+ def has_pending_commands(self):
226
+ """Check if there are any pending commands."""
227
+ return any(cmd['status'] == 'pending' for cmd in self.commands) or \
228
+ any(fix['status'] == 'pending' for fix in self.suggested_fixes)
229
+
230
+ def get_all_commands(self):
231
+ """Get all commands (main + fixes) in execution order."""
232
+ all_commands = []
233
+
234
+ # Add main commands
235
+ for cmd in self.commands:
236
+ all_commands.append({
237
+ **cmd,
238
+ 'type': 'main'
239
+ })
240
+
241
+ # Add fix commands
242
+ for fix in self.suggested_fixes:
243
+ all_commands.append({
244
+ **fix,
245
+ 'type': 'fix'
246
+ })
247
+
248
+ return all_commands
249
+
250
+ def analyze_failed_commands_with_llm(self, api_key=None, current_dir=None, sandbox=None):
251
+ """Analyze all failed commands using LLM and add suggested fixes."""
252
+ failed_commands = self.get_failed_commands_for_llm()
253
+
254
+ if not failed_commands:
255
+ print("✅ No failed commands to analyze")
256
+ return []
257
+
258
+ print(f"🔍 Analyzing {len(failed_commands)} failed commands with LLM...")
259
+
260
+ # Use unified batch debugging for efficiency
261
+ fixes = call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
262
+
263
+ # Add the fixes to the command list
264
+ added_fixes = []
265
+ for fix in fixes:
266
+ fix_index = self.add_suggested_fix(
267
+ fix['original_command'],
268
+ fix['fix_command'],
269
+ fix['reason']
270
+ )
271
+ added_fixes.append(fix_index)
272
+
273
+ print(f"🔧 Added {len(added_fixes)} LLM-suggested fixes to command list")
274
+ return added_fixes
275
+
276
+ def should_skip_original_command(self, original_command, fix_command, fix_stdout, fix_stderr, api_key=None):
277
+ """
278
+ Use LLM to determine if the original command should be skipped after a successful fix.
279
+
280
+ Args:
281
+ original_command: The original command that failed
282
+ fix_command: The fix command that succeeded
283
+ fix_stdout: The stdout from the fix command
284
+ fix_stderr: The stderr from the fix command
285
+ api_key: OpenAI API key
286
+
287
+ Returns:
288
+ tuple: (should_skip, reason)
289
+ """
290
+ try:
291
+ # Get API key if not provided
292
+ if not api_key:
293
+ api_key = os.environ.get("OPENAI_API_KEY")
294
+ if not api_key:
295
+ # Try to load from saved file
296
+ key_file = os.path.expanduser("~/.gitarsenal/openai_key")
297
+ if os.path.exists(key_file):
298
+ with open(key_file, "r") as f:
299
+ api_key = f.read().strip()
300
+
301
+ if not api_key:
302
+ print("⚠️ No OpenAI API key available for command list analysis")
303
+ return False, "No API key available"
304
+
305
+ # Get all commands for context
306
+ all_commands = self.get_all_commands()
307
+ commands_context = "\n".join([f"{i+1}. {cmd['command']} - {cmd['status']}" for i, cmd in enumerate(all_commands)])
308
+
309
+ # Prepare the prompt
310
+ prompt = f"""
311
+ I need to determine if an original command should be skipped after a successful fix command.
312
+
313
+ Original command (failed): {original_command}
314
+ Fix command (succeeded): {fix_command}
315
+
316
+ Fix command stdout:
317
+ {fix_stdout}
318
+
319
+ Fix command stderr:
320
+ {fix_stderr}
321
+
322
+ Current command list:
323
+ {commands_context}
324
+
325
+ Based on this information, should I skip running the original command again?
326
+ Consider:
327
+ 1. If the fix command already accomplished what the original command was trying to do
328
+ 2. If running the original command again would be redundant or cause errors
329
+ 3. If the original command is still necessary after the fix
330
+
331
+ Respond with ONLY:
332
+ SKIP: <reason>
333
+ or
334
+ RUN: <reason>
335
+ """
336
+
337
+ # Call OpenAI API
338
+ import openai
339
+ client = openai.OpenAI(api_key=api_key)
340
+
341
+ print("🔍 Analyzing if original command should be skipped...")
342
+
343
+ response = client.chat.completions.create(
344
+ model="gpt-3.5-turbo",
345
+ messages=[
346
+ {"role": "system", "content": "You are a helpful assistant that analyzes command execution."},
347
+ {"role": "user", "content": prompt}
348
+ ],
349
+ max_tokens=100,
350
+ temperature=0.3
351
+ )
352
+
353
+ response_text = response.choices[0].message.content.strip()
354
+
355
+ # Parse the response
356
+ if response_text.startswith("SKIP:"):
357
+ reason = response_text.replace("SKIP:", "").strip()
358
+ print(f"🔍 LLM suggests skipping original command: {reason}")
359
+ return True, reason
360
+ elif response_text.startswith("RUN:"):
361
+ reason = response_text.replace("RUN:", "").strip()
362
+ print(f"🔍 LLM suggests running original command: {reason}")
363
+ return False, reason
364
+ else:
365
+ # Try to interpret a free-form response
366
+ if "skip" in response_text.lower() and "should" in response_text.lower():
367
+ print(f"🔍 Interpreting response as SKIP: {response_text}")
368
+ return True, response_text
369
+ else:
370
+ print(f"🔍 Interpreting response as RUN: {response_text}")
371
+ return False, response_text
372
+
373
+ except Exception as e:
374
+ print(f"⚠️ Error analyzing command skip decision: {e}")
375
+ return False, f"Error: {e}"
376
+
377
+ def replace_command(self, command_index, new_command, reason=""):
378
+ """
379
+ Replace a command in the list with a new command.
380
+
381
+ Args:
382
+ command_index: The index of the command to replace
383
+ new_command: The new command to use
384
+ reason: The reason for the replacement
385
+
386
+ Returns:
387
+ bool: True if the command was replaced, False otherwise
388
+ """
389
+ if 0 <= command_index < len(self.commands):
390
+ old_command = self.commands[command_index]['command']
391
+ self.commands[command_index]['command'] = new_command
392
+ self.commands[command_index]['status'] = 'pending' # Reset status
393
+ self.commands[command_index]['stdout'] = ''
394
+ self.commands[command_index]['stderr'] = ''
395
+ self.commands[command_index]['execution_time'] = None
396
+ self.commands[command_index]['replacement_reason'] = reason
397
+
398
+ print(f"🔄 Replaced command {command_index + 1}: '{old_command}' with '{new_command}'")
399
+ print(f"🔍 Reason: {reason}")
400
+ return True
401
+ else:
402
+ print(f"❌ Invalid command index for replacement: {command_index}")
403
+ return False
404
+
405
+ def update_command_list_with_llm(self, api_key=None):
406
+ """
407
+ Use LLM to analyze and update the entire command list.
408
+
409
+ Args:
410
+ api_key: OpenAI API key
411
+
412
+ Returns:
413
+ bool: True if the list was updated, False otherwise
414
+ """
415
+ try:
416
+ # Get API key if not provided
417
+ if not api_key:
418
+ api_key = os.environ.get("OPENAI_API_KEY")
419
+ if not api_key:
420
+ # Try to load from saved file
421
+ key_file = os.path.expanduser("~/.gitarsenal/openai_key")
422
+ if os.path.exists(key_file):
423
+ with open(key_file, "r") as f:
424
+ api_key = f.read().strip()
425
+
426
+ if not api_key:
427
+ print("⚠️ No OpenAI API key available for command list analysis")
428
+ return False
429
+
430
+ # Get all commands for context
431
+ all_commands = self.get_all_commands()
432
+ commands_context = "\n".join([f"{i+1}. {cmd['command']} - {cmd['status']}"
433
+ for i, cmd in enumerate(all_commands)])
434
+
435
+ # Get executed commands with their outputs for context
436
+ executed_context = ""
437
+ for cmd in self.executed_commands:
438
+ executed_context += f"Command: {cmd['command']}\n"
439
+ executed_context += f"Status: {cmd['status']}\n"
440
+ if cmd['stdout']:
441
+ executed_context += f"Stdout: {cmd['stdout'][:500]}...\n" if len(cmd['stdout']) > 500 else f"Stdout: {cmd['stdout']}\n"
442
+ if cmd['stderr']:
443
+ executed_context += f"Stderr: {cmd['stderr'][:500]}...\n" if len(cmd['stderr']) > 500 else f"Stderr: {cmd['stderr']}\n"
444
+ executed_context += "\n"
445
+
446
+ # Prepare the prompt
447
+ prompt = f"""
448
+ I need you to analyze and optimize this command list. Some commands have been executed,
449
+ and some are still pending. Based on what has already been executed, I need you to:
450
+
451
+ 1. Identify any pending commands that are now redundant or unnecessary
452
+ 2. Identify any pending commands that should be modified based on previous command results
453
+ 3. Suggest any new commands that should be added
454
+
455
+ Current command list:
456
+ {commands_context}
457
+
458
+ Details of executed commands:
459
+ {executed_context}
460
+
461
+ For each pending command (starting from the next command to be executed), tell me if it should be:
462
+ 1. KEEP: Keep the command as is
463
+ 2. SKIP: Skip the command (mark as completed without running)
464
+ 3. MODIFY: Modify the command (provide the new command)
465
+ 4. ADD_AFTER: Add a new command after this one
466
+
467
+ Format your response as a JSON array of actions:
468
+ [
469
+ {{
470
+ "command_index": <index>,
471
+ "action": "KEEP|SKIP|MODIFY|ADD_AFTER",
472
+ "new_command": "<new command if MODIFY or ADD_AFTER>",
473
+ "reason": "<reason for this action>"
474
+ }},
475
+ ...
476
+ ]
477
+
478
+ Only include commands that need changes (SKIP, MODIFY, ADD_AFTER), not KEEP actions.
479
+ """
480
+
481
+ # Call OpenAI API
482
+ import openai
483
+ import json
484
+ client = openai.OpenAI(api_key=api_key)
485
+
486
+ print("🔍 Analyzing command list for optimizations...")
487
+
488
+ response = client.chat.completions.create(
489
+ model="gpt-4o-mini", # Use a more capable model for this complex task
490
+ messages=[
491
+ {"role": "system", "content": "You are a helpful assistant that analyzes and optimizes command lists."},
492
+ {"role": "user", "content": prompt}
493
+ ],
494
+ max_tokens=1000,
495
+ temperature=0.2
496
+ )
497
+
498
+ response_text = response.choices[0].message.content.strip()
499
+
500
+ # Extract JSON from the response
501
+ try:
502
+ # Find JSON array in the response
503
+ json_match = re.search(r'\[\s*\{.*\}\s*\]', response_text, re.DOTALL)
504
+ if json_match:
505
+ json_str = json_match.group(0)
506
+ actions = json.loads(json_str)
507
+ else:
508
+ # Try to parse the entire response as JSON
509
+ actions = json.loads(response_text)
510
+
511
+ if not isinstance(actions, list):
512
+ print("❌ Invalid response format from LLM - not a list")
513
+ return False
514
+
515
+ # Apply the suggested changes
516
+ changes_made = 0
517
+ commands_added = 0
518
+
519
+ # Process in reverse order to avoid index shifting issues
520
+ for action in sorted(actions, key=lambda x: x.get('command_index', 0), reverse=True):
521
+ cmd_idx = action.get('command_index')
522
+ action_type = action.get('action')
523
+ new_cmd = action.get('new_command', '')
524
+ reason = action.get('reason', 'No reason provided')
525
+
526
+ if cmd_idx is None or action_type is None:
527
+ continue
528
+
529
+ # Convert to 0-based index if needed
530
+ if cmd_idx > 0: # Assume 1-based index from LLM
531
+ cmd_idx -= 1
532
+
533
+ # Skip if the command index is invalid
534
+ if cmd_idx < 0 or cmd_idx >= len(self.commands):
535
+ print(f"❌ Invalid command index: {cmd_idx}")
536
+ continue
537
+
538
+ # Skip if the command has already been executed
539
+ if self.commands[cmd_idx]['status'] != 'pending':
540
+ print(f"⚠️ Command {cmd_idx + 1} already executed, skipping action")
541
+ continue
542
+
543
+ if action_type == "SKIP":
544
+ # Mark the command as successful without running it
545
+ self.mark_command_executed(
546
+ cmd_idx, 'main', True,
547
+ f"Command skipped: {reason}",
548
+ "", 0
549
+ )
550
+ print(f"🔄 Skipped command {cmd_idx + 1}: {reason}")
551
+ changes_made += 1
552
+
553
+ elif action_type == "MODIFY":
554
+ if new_cmd:
555
+ if self.replace_command(cmd_idx, new_cmd, reason):
556
+ changes_made += 1
557
+ else:
558
+ print(f"❌ No new command provided for MODIFY action on command {cmd_idx + 1}")
559
+
560
+ elif action_type == "ADD_AFTER":
561
+ if new_cmd:
562
+ # Add new command after the current one
563
+ insert_idx = cmd_idx + 1
564
+ new_cmd_obj = {
565
+ 'command': new_cmd,
566
+ 'status': 'pending',
567
+ 'index': insert_idx,
568
+ 'stdout': '',
569
+ 'stderr': '',
570
+ 'execution_time': None,
571
+ 'fix_attempts': 0,
572
+ 'max_fix_attempts': 3,
573
+ 'added_reason': reason
574
+ }
575
+
576
+ # Insert the new command
577
+ self.commands.insert(insert_idx, new_cmd_obj)
578
+
579
+ # Update indices for all commands after insertion
580
+ for i in range(insert_idx + 1, len(self.commands)):
581
+ self.commands[i]['index'] = i
582
+
583
+ print(f"➕ Added new command after {cmd_idx + 1}: '{new_cmd}'")
584
+ print(f"🔍 Reason: {reason}")
585
+ commands_added += 1
586
+ else:
587
+ print(f"❌ No new command provided for ADD_AFTER action on command {cmd_idx + 1}")
588
+
589
+ # Update total commands count
590
+ self.total_commands = len(self.commands)
591
+
592
+ print(f"✅ Command list updated: {changes_made} changes made, {commands_added} commands added")
593
+ return changes_made > 0 or commands_added > 0
594
+
595
+ except json.JSONDecodeError as e:
596
+ print(f"❌ Failed to parse LLM response as JSON: {e}")
597
+ print(f"Raw response: {response_text}")
598
+ return False
599
+ except Exception as e:
600
+ print(f"❌ Error updating command list: {e}")
601
+ return False
602
+
603
+ except Exception as e:
604
+ print(f"⚠️ Error analyzing command list: {e}")
605
+ return False
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes