gitarsenal-cli 1.4.4 → 1.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.4.4",
3
+ "version": "1.4.5",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -356,22 +356,48 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
356
356
  print("⚠️ Error output is empty. Cannot effectively debug the command.")
357
357
  print("⚠️ Skipping OpenAI debugging due to lack of error information.")
358
358
  return None
359
-
359
+
360
+ # Try to get API key from multiple sources
360
361
  if not api_key:
361
- # Try to get API key from environment
362
+ # First try environment variable
362
363
  api_key = os.environ.get("OPENAI_API_KEY")
363
364
 
364
- if not api_key:
365
- # Use the CredentialsManager to get the API key
366
- try:
367
- from credentials_manager import CredentialsManager
368
- credentials_manager = CredentialsManager()
369
- api_key = credentials_manager.get_openai_api_key()
370
- if not api_key:
371
- print("❌ No API key provided. Skipping debugging.")
372
- return None
373
- except ImportError:
374
- # Fall back to direct input if credentials_manager module is not available
365
+ # Store the API key in a persistent file if found
366
+ if api_key:
367
+ try:
368
+ os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
369
+ with open(os.path.expanduser("~/.gitarsenal/openai_key"), "w") as f:
370
+ f.write(api_key)
371
+ print("✅ Saved OpenAI API key for future use")
372
+ except Exception as e:
373
+ print(f"⚠️ Could not save API key: {e}")
374
+
375
+ # Try to load from saved file if not in environment
376
+ if not api_key:
377
+ try:
378
+ key_file = os.path.expanduser("~/.gitarsenal/openai_key")
379
+ if os.path.exists(key_file):
380
+ with open(key_file, "r") as f:
381
+ api_key = f.read().strip()
382
+ if api_key:
383
+ print("✅ Loaded OpenAI API key from saved file")
384
+ # Also set in environment for this session
385
+ os.environ["OPENAI_API_KEY"] = api_key
386
+ except Exception as e:
387
+ print(f"⚠️ Could not load saved API key: {e}")
388
+
389
+ # Then try credentials manager
390
+ if not api_key:
391
+ try:
392
+ from credentials_manager import CredentialsManager
393
+ credentials_manager = CredentialsManager()
394
+ api_key = credentials_manager.get_openai_api_key()
395
+ except ImportError:
396
+ # Fall back to direct input if credentials_manager is not available
397
+ pass
398
+
399
+ # Finally, prompt the user if still no API key
400
+ if not api_key:
375
401
  print("\n" + "="*60)
376
402
  print("🔑 OPENAI API KEY REQUIRED FOR DEBUGGING")
377
403
  print("="*60)
@@ -386,6 +412,8 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
386
412
  print("❌ No API key provided. Skipping debugging.")
387
413
  return None
388
414
  print("✅ API key received successfully!")
415
+ # Save the API key to environment for future use in this session
416
+ os.environ["OPENAI_API_KEY"] = api_key
389
417
  except KeyboardInterrupt:
390
418
  print("\n❌ API key input cancelled by user.")
391
419
  return None
@@ -393,9 +421,11 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
393
421
  print(f"❌ Error getting API key: {e}")
394
422
  return None
395
423
 
396
- # Get current directory context
424
+ # Gather additional context to help with debugging
397
425
  directory_context = ""
398
426
  system_info = ""
427
+ command_history = ""
428
+ file_context = ""
399
429
 
400
430
  if sandbox:
401
431
  try:
@@ -409,6 +439,7 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
409
439
  uname -a
410
440
  echo -e "\nPython Information:"
411
441
  python --version
442
+ pip --version
412
443
  echo -e "\nPackage Manager:"
413
444
  which apt 2>/dev/null && echo "apt available" || echo "apt not available"
414
445
  which yum 2>/dev/null && echo "yum available" || echo "yum not available"
@@ -463,6 +494,72 @@ Directory contents:
463
494
  {parent_context}
464
495
  """
465
496
  print("✅ Directory context gathered successfully")
497
+
498
+ # Check for relevant files that might provide additional context
499
+ # For example, if error mentions a specific file, try to get its content
500
+ relevant_files = []
501
+ error_files = re.findall(r'(?:No such file or directory|cannot open|not found): ([^\s:]+)', error_output)
502
+ if error_files:
503
+ for file_path in error_files:
504
+ # Clean up the file path
505
+ file_path = file_path.strip("'\"")
506
+ if not os.path.isabs(file_path):
507
+ file_path = os.path.join(current_dir, file_path)
508
+
509
+ # Try to get the parent directory if the file doesn't exist
510
+ if '/' in file_path:
511
+ parent_file_dir = os.path.dirname(file_path)
512
+ relevant_files.append(parent_file_dir)
513
+
514
+ # Look for package.json, requirements.txt, etc.
515
+ common_config_files = ["package.json", "requirements.txt", "pyproject.toml", "setup.py",
516
+ "Pipfile", "Dockerfile", "docker-compose.yml", "Makefile"]
517
+
518
+ for config_file in common_config_files:
519
+ check_cmd = f"test -f {current_dir}/{config_file}"
520
+ check_result = sandbox.exec("bash", "-c", check_cmd)
521
+ check_result.wait()
522
+ if check_result.returncode == 0:
523
+ relevant_files.append(f"{current_dir}/{config_file}")
524
+
525
+ # Get content of relevant files
526
+ if relevant_files:
527
+ file_context = "\nRelevant file contents:\n"
528
+ for file_path in relevant_files[:2]: # Limit to 2 files to avoid too much context
529
+ try:
530
+ file_check_cmd = f"test -f {file_path}"
531
+ file_check = sandbox.exec("bash", "-c", file_check_cmd)
532
+ file_check.wait()
533
+
534
+ if file_check.returncode == 0:
535
+ # It's a file, get its content
536
+ cat_cmd = f"cat {file_path}"
537
+ cat_result = sandbox.exec("bash", "-c", cat_cmd)
538
+ file_content = ""
539
+ for line in cat_result.stdout:
540
+ file_content += _to_str(line)
541
+ cat_result.wait()
542
+
543
+ # Truncate if too long
544
+ if len(file_content) > 1000:
545
+ file_content = file_content[:1000] + "\n... (truncated)"
546
+
547
+ file_context += f"\n--- {file_path} ---\n{file_content}\n"
548
+ else:
549
+ # It's a directory, list its contents
550
+ ls_cmd = f"ls -la {file_path}"
551
+ ls_dir_result = sandbox.exec("bash", "-c", ls_cmd)
552
+ dir_content = ""
553
+ for line in ls_dir_result.stdout:
554
+ dir_content += _to_str(line)
555
+ ls_dir_result.wait()
556
+
557
+ file_context += f"\n--- Directory: {file_path} ---\n{dir_content}\n"
558
+ except Exception as e:
559
+ print(f"⚠️ Error getting content of {file_path}: {e}")
560
+
561
+ print(f"✅ Additional file context gathered from {len(relevant_files)} relevant files")
562
+
466
563
  except Exception as e:
467
564
  print(f"⚠️ Error getting directory context: {e}")
468
565
  directory_context = f"\nCurrent directory: {current_dir}\n"
@@ -494,6 +591,8 @@ But it failed with this error:
494
591
  ```
495
592
  {system_info}
496
593
  {directory_context}
594
+ {file_context}
595
+
497
596
  Please analyze the error and provide ONLY a single terminal command that would fix the issue.
498
597
  Consider the current directory, system information, and directory contents carefully before suggesting a solution.
499
598
 
@@ -507,29 +606,127 @@ Do not provide any explanations, just the exact command to run.
507
606
  """
508
607
 
509
608
  # Prepare the API request payload
510
- payload = {
511
- "model": "gpt-4.1",
512
- "messages": [
513
- {"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue, analyze the issue first understand why its happening and then provide the command to fix the issue. If you see missing pytest errors, suggest 'pip install pytest'. For wandb login issues, suggest 'wandb login YOUR_API_KEY' and the system will handle prompting for the actual key."},
514
- {"role": "user", "content": prompt}
515
- ],
516
- "temperature": 0.2,
517
- "max_tokens": 300
518
- }
609
+ # Try to use GPT-4 first, but fall back to other models if needed
610
+ models_to_try = [
611
+ "gpt-4o-mini", # First choice: GPT-4o (most widely available)
612
+ ]
519
613
 
520
- try:
521
- print("🤖 Calling OpenAI to debug the failed command...")
522
- response = requests.post(
523
- "https://api.openai.com/v1/chat/completions",
524
- headers=headers,
525
- json=payload,
526
- timeout=30
527
- )
614
+ # Check if we have a preferred model in environment
615
+ preferred_model = os.environ.get("OPENAI_MODEL")
616
+ if preferred_model:
617
+ # Insert the preferred model at the beginning of the list
618
+ models_to_try.insert(0, preferred_model)
619
+ print(f"✅ Using preferred model from environment: {preferred_model}")
528
620
 
529
- if response.status_code == 200:
530
- result = response.json()
621
+ # Remove duplicates while preserving order
622
+ models_to_try = list(dict.fromkeys(models_to_try))
623
+
624
+ # Function to make the API call with a specific model
625
+ def try_api_call(model_name, retries=2, backoff_factor=1.5):
626
+ payload = {
627
+ "model": model_name,
628
+ "messages": [
629
+ {"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue, analyze the issue first understand why its happening and then provide the command to fix the issue. If you see missing pytest errors, suggest 'pip install pytest'. For wandb login issues, suggest 'wandb login YOUR_API_KEY' and the system will handle prompting for the actual key."},
630
+ {"role": "user", "content": prompt}
631
+ ],
632
+ "temperature": 0.2,
633
+ "max_tokens": 300
634
+ }
635
+
636
+ # Add specific handling for common errors
637
+ last_error = None
638
+ for attempt in range(retries + 1):
639
+ try:
640
+ if attempt > 0:
641
+ # Exponential backoff
642
+ wait_time = backoff_factor * (2 ** (attempt - 1))
643
+ print(f"⏱️ Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
644
+ time.sleep(wait_time)
645
+
646
+ print(f"🤖 Calling OpenAI with {model_name} model to debug the failed command...")
647
+ response = requests.post(
648
+ "https://api.openai.com/v1/chat/completions",
649
+ headers=headers,
650
+ json=payload,
651
+ timeout=45 # Increased timeout for reliability
652
+ )
653
+
654
+ # Handle specific status codes
655
+ if response.status_code == 200:
656
+ return response.json(), None
657
+ elif response.status_code == 401:
658
+ error_msg = "Authentication error: Invalid API key"
659
+ print(f"❌ {error_msg}")
660
+ # Don't retry auth errors
661
+ return None, error_msg
662
+ elif response.status_code == 429:
663
+ error_msg = "Rate limit exceeded or quota reached"
664
+ print(f"⚠️ {error_msg}")
665
+ # Always retry rate limit errors with increasing backoff
666
+ last_error = error_msg
667
+ continue
668
+ elif response.status_code == 500:
669
+ error_msg = "OpenAI server error"
670
+ print(f"⚠️ {error_msg}")
671
+ # Retry server errors
672
+ last_error = error_msg
673
+ continue
674
+ else:
675
+ error_msg = f"Status code: {response.status_code}, Response: {response.text}"
676
+ print(f"⚠️ OpenAI API error: {error_msg}")
677
+ last_error = error_msg
678
+ # Only retry if we have attempts left
679
+ if attempt < retries:
680
+ continue
681
+ return None, error_msg
682
+ except requests.exceptions.Timeout:
683
+ error_msg = "Request timed out"
684
+ print(f"⚠️ {error_msg}")
685
+ last_error = error_msg
686
+ # Always retry timeouts
687
+ continue
688
+ except requests.exceptions.ConnectionError:
689
+ error_msg = "Connection error"
690
+ print(f"⚠️ {error_msg}")
691
+ last_error = error_msg
692
+ # Always retry connection errors
693
+ continue
694
+ except Exception as e:
695
+ error_msg = str(e)
696
+ print(f"⚠️ Unexpected error: {error_msg}")
697
+ last_error = error_msg
698
+ # Only retry if we have attempts left
699
+ if attempt < retries:
700
+ continue
701
+ return None, error_msg
702
+
703
+ # If we get here, all retries failed
704
+ return None, last_error
705
+
706
+ # Try each model in sequence until one works
707
+ result = None
708
+ last_error = None
709
+
710
+ for model in models_to_try:
711
+ result, error = try_api_call(model)
712
+ if result:
713
+ print(f"✅ Successfully got response from {model}")
714
+ break
715
+ else:
716
+ print(f"⚠️ Failed to get response from {model}: {error}")
717
+ last_error = error
718
+
719
+ if not result:
720
+ print(f"❌ All model attempts failed. Last error: {last_error}")
721
+ return None
722
+
723
+ # Process the response
724
+ try:
531
725
  fix_command = result["choices"][0]["message"]["content"].strip()
532
726
 
727
+ # Save the original response for debugging
728
+ original_response = fix_command
729
+
533
730
  # Extract just the command if it's wrapped in backticks or explanation
534
731
  if "```" in fix_command:
535
732
  # Extract content between backticks
@@ -537,21 +734,73 @@ Do not provide any explanations, just the exact command to run.
537
734
  code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
538
735
  if code_blocks:
539
736
  fix_command = code_blocks[0].strip()
737
+ print(f"✅ Extracted command from code block: {fix_command}")
540
738
 
541
739
  # If the response still has explanatory text, try to extract just the command
542
740
  if len(fix_command.split('\n')) > 1:
543
- # Take the shortest non-empty line as it's likely the command
544
- lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
545
- if lines:
546
- fix_command = min(lines, key=len)
741
+ # First try to find lines that look like commands (start with common command prefixes)
742
+ command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
743
+ 'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
744
+ 'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
745
+ 'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
746
+
747
+ # Check for lines that start with common command prefixes
748
+ command_lines = [line.strip() for line in fix_command.split('\n')
749
+ if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
750
+
751
+ if command_lines:
752
+ # Use the first command line found
753
+ fix_command = command_lines[0]
754
+ print(f"✅ Identified command by prefix: {fix_command}")
755
+ else:
756
+ # Try to find lines that look like commands (contain common shell patterns)
757
+ shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
758
+ command_lines = [line.strip() for line in fix_command.split('\n')
759
+ if any(pattern in line for pattern in shell_patterns)]
760
+
761
+ if command_lines:
762
+ # Use the first command line found
763
+ fix_command = command_lines[0]
764
+ print(f"✅ Identified command by shell pattern: {fix_command}")
765
+ else:
766
+ # Fall back to the shortest non-empty line as it's likely the command
767
+ lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
768
+ if lines:
769
+ # Exclude very short lines that are likely not commands
770
+ valid_lines = [line for line in lines if len(line) > 5]
771
+ if valid_lines:
772
+ fix_command = min(valid_lines, key=len)
773
+ else:
774
+ fix_command = min(lines, key=len)
775
+ print(f"✅ Selected shortest line as command: {fix_command}")
776
+
777
+ # Clean up the command - remove any trailing periods or quotes
778
+ fix_command = fix_command.rstrip('.;"\'')
779
+
780
+ # Remove common prefixes that LLMs sometimes add
781
+ prefixes_to_remove = [
782
+ "Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
783
+ "You should run: ", "You can run: ", "You need to run: "
784
+ ]
785
+ for prefix in prefixes_to_remove:
786
+ if fix_command.startswith(prefix):
787
+ fix_command = fix_command[len(prefix):].strip()
788
+ print(f"✅ Removed prefix: {prefix}")
789
+ break
790
+
791
+ # If the command is still multi-line or very long, it might not be a valid command
792
+ if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
793
+ print("⚠️ Extracted command appears invalid (multi-line or too long)")
794
+ print("🔍 Original response from LLM:")
795
+ print("-" * 60)
796
+ print(original_response)
797
+ print("-" * 60)
798
+ print("⚠️ Using best guess for command")
547
799
 
548
800
  print(f"🔧 Suggested fix: {fix_command}")
549
801
  return fix_command
550
- else:
551
- print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
552
- return None
553
802
  except Exception as e:
554
- print(f"❌ Error calling OpenAI API: {e}")
803
+ print(f"❌ Error processing OpenAI response: {e}")
555
804
  return None
556
805
 
557
806
  def prompt_for_hf_token():
@@ -1,8 +0,0 @@
1
- {
2
- "token_id": "ak-sLhYqCjkvixiYcb9LAuCHp",
3
- "token_secret": "as-fPzD0Zm0dl6IFAEkhaH9pq",
4
- "openai_api_key": "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",
5
- "modalTokenId": "ak-sLhYqCjkvixiYcb9LAuCHp",
6
- "modalTokenSecret": "as-fPzD0Zm0dl6IFAEkhaH9pq",
7
- "openaiApiKey": "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"
8
- }
@@ -1,4 +0,0 @@
1
- {
2
- "token_id": "ak-sLhYqCjkvixiYcb9LAuCHp",
3
- "token_secret": "as-fPzD0Zm0dl6IFAEkhaH9pq"
4
- }