gitarsenal-cli 1.9.16 → 1.9.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,7 @@ import uuid
15
15
  import signal
16
16
  from pathlib import Path
17
17
  import modal
18
- import anthropic
18
+
19
19
  # Import authentication manager
20
20
  try:
21
21
  from auth_manager import AuthManager
@@ -31,7 +31,6 @@ parser.add_argument('--gpu', default='A10G', help='GPU type to use')
31
31
  parser.add_argument('--repo-url', help='Repository URL')
32
32
  parser.add_argument('--volume-name', help='Volume name')
33
33
  parser.add_argument('--use-api', action='store_true', help='Use API to fetch setup commands')
34
- parser.add_argument('--yes', action='store_true', help='Skip confirmation prompts')
35
34
 
36
35
  # Parse only known args to avoid conflicts with other arguments
37
36
  args, unknown = parser.parse_known_args()
@@ -516,57 +515,21 @@ class PersistentShell:
516
515
  REMOVE_COMMAND: <reason>
517
516
  """
518
517
 
519
- # Try OpenAI API first
520
- try:
521
- import openai
522
- client = openai.OpenAI(api_key=api_key)
523
-
524
- response = client.chat.completions.create(
525
- model="gpt-4.1",
526
- messages=[
527
- {"role": "system", "content": "You are a helpful assistant that suggests alternative commands that don't require user input."},
528
- {"role": "user", "content": prompt}
529
- ],
530
- max_tokens=150,
531
- temperature=0.7
532
- )
533
-
534
- response_text = response.choices[0].message.content.strip()
535
-
536
- except Exception as e:
537
- print(f"⚠️ OpenAI API call failed: {e}")
538
-
539
- # Try Claude as fallback if available
540
- if anthropic is not None:
541
- print("🔄 Trying Claude-4-Sonnet as fallback for alternative command suggestion...")
542
- try:
543
- # Get Anthropic API key
544
- anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
545
- if not anthropic_api_key:
546
- print("⚠️ No ANTHROPIC_API_KEY found in environment")
547
- return None
548
-
549
- # Create Anthropic client
550
- client = anthropic.Anthropic(api_key=anthropic_api_key)
551
-
552
- print("🤖 Calling Claude-4-Sonnet for alternative command suggestion...")
553
- message = client.messages.create(
554
- model="claude-3-5-sonnet-20241022",
555
- max_tokens=150,
556
- messages=[
557
- {"role": "user", "content": prompt}
558
- ]
559
- )
560
-
561
- response_text = message.content[0].text.strip()
562
- print("✅ Claude alternative command suggestion completed")
563
-
564
- except Exception as claude_error:
565
- print(f"❌ Claude API call failed: {claude_error}")
566
- return None
567
- else:
568
- print("⚠️ Claude fallback not available (anthropic library not installed)")
569
- return None
518
+ # Call OpenAI API
519
+ import openai
520
+ client = openai.OpenAI(api_key=api_key)
521
+
522
+ response = client.chat.completions.create(
523
+ model="gpt-4o-mini",
524
+ messages=[
525
+ {"role": "system", "content": "You are a helpful assistant that suggests alternative commands that don't require user input."},
526
+ {"role": "user", "content": prompt}
527
+ ],
528
+ max_tokens=150,
529
+ temperature=0.7
530
+ )
531
+
532
+ response_text = response.choices[0].message.content.strip()
570
533
 
571
534
  # Check if the response suggests removing the command
572
535
  if response_text.startswith("REMOVE_COMMAND:"):
@@ -610,10 +573,20 @@ class PersistentShell:
610
573
  return output.strip()
611
574
  return self.working_dir
612
575
 
576
+ def get_virtual_env(self):
577
+ """Get the currently activated virtual environment path."""
578
+ return self.virtual_env_path
579
+
613
580
  def is_in_venv(self):
614
581
  """Check if we're currently in a virtual environment."""
615
582
  return self.virtual_env_path is not None and self.virtual_env_path != ""
616
583
 
584
+ def get_venv_name(self):
585
+ """Get the name of the current virtual environment if active."""
586
+ if self.is_in_venv():
587
+ return os.path.basename(self.virtual_env_path)
588
+ return None
589
+
617
590
  def exec(self, *args, **kwargs):
618
591
  """Compatibility method to make PersistentShell work with call_openai_for_debug."""
619
592
  # Convert exec call to execute method
@@ -1003,59 +976,23 @@ class CommandListManager:
1003
976
  RUN: <reason>
1004
977
  """
1005
978
 
1006
- # Try OpenAI API first
1007
- try:
1008
- import openai
1009
- client = openai.OpenAI(api_key=api_key)
1010
-
1011
- print("🔍 Analyzing if original command should be skipped...")
1012
-
1013
- response = client.chat.completions.create(
1014
- model="gpt-3.5-turbo",
1015
- messages=[
1016
- {"role": "system", "content": "You are a helpful assistant that analyzes command execution."},
1017
- {"role": "user", "content": prompt}
1018
- ],
1019
- max_tokens=100,
1020
- temperature=0.3
1021
- )
1022
-
1023
- response_text = response.choices[0].message.content.strip()
1024
-
1025
- except Exception as e:
1026
- print(f"⚠️ OpenAI API call failed: {e}")
1027
-
1028
- # Try Claude as fallback if available
1029
- if anthropic is not None:
1030
- print("🔄 Trying Claude-4-Sonnet as fallback for command skip analysis...")
1031
- try:
1032
- # Get Anthropic API key
1033
- anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
1034
- if not anthropic_api_key:
1035
- print("⚠️ No ANTHROPIC_API_KEY found in environment")
1036
- return False, "No API key available"
1037
-
1038
- # Create Anthropic client
1039
- client = anthropic.Anthropic(api_key=anthropic_api_key)
1040
-
1041
- print("🤖 Calling Claude-4-Sonnet for command skip analysis...")
1042
- message = client.messages.create(
1043
- model="claude-3-5-sonnet-20241022",
1044
- max_tokens=100,
1045
- messages=[
1046
- {"role": "user", "content": prompt}
1047
- ]
1048
- )
1049
-
1050
- response_text = message.content[0].text.strip()
1051
- print("✅ Claude command skip analysis completed")
1052
-
1053
- except Exception as claude_error:
1054
- print(f"❌ Claude API call failed: {claude_error}")
1055
- return False, f"Error: {claude_error}"
1056
- else:
1057
- print("⚠️ Claude fallback not available (anthropic library not installed)")
1058
- return False, "No API key available"
979
+ # Call OpenAI API
980
+ import openai
981
+ client = openai.OpenAI(api_key=api_key)
982
+
983
+ print("🔍 Analyzing if original command should be skipped...")
984
+
985
+ response = client.chat.completions.create(
986
+ model="gpt-3.5-turbo",
987
+ messages=[
988
+ {"role": "system", "content": "You are a helpful assistant that analyzes command execution."},
989
+ {"role": "user", "content": prompt}
990
+ ],
991
+ max_tokens=100,
992
+ temperature=0.3
993
+ )
994
+
995
+ response_text = response.choices[0].message.content.strip()
1059
996
 
1060
997
  # Parse the response
1061
998
  if response_text.startswith("SKIP:"):
@@ -1183,60 +1120,24 @@ class CommandListManager:
1183
1120
  Only include commands that need changes (SKIP, MODIFY, ADD_AFTER), not KEEP actions.
1184
1121
  """
1185
1122
 
1186
- # Try OpenAI API first
1187
- try:
1188
- import openai
1189
- import json
1190
- client = openai.OpenAI(api_key=api_key)
1191
-
1192
- print("🔍 Analyzing command list for optimizations...")
1193
-
1194
- response = client.chat.completions.create(
1195
- model="gpt-4.1", # Use a more capable model for this complex task
1196
- messages=[
1197
- {"role": "system", "content": "You are a helpful assistant that analyzes and optimizes command lists."},
1198
- {"role": "user", "content": prompt}
1199
- ],
1200
- max_tokens=1000,
1201
- temperature=0.2
1202
- )
1203
-
1204
- response_text = response.choices[0].message.content.strip()
1205
-
1206
- except Exception as e:
1207
- print(f"⚠️ OpenAI API call failed: {e}")
1208
-
1209
- # Try Claude as fallback if available
1210
- if anthropic is not None:
1211
- print("🔄 Trying Claude-4-Sonnet as fallback for command list analysis...")
1212
- try:
1213
- # Get Anthropic API key
1214
- anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
1215
- if not anthropic_api_key:
1216
- print("⚠️ No ANTHROPIC_API_KEY found in environment")
1217
- return False
1218
-
1219
- # Create Anthropic client
1220
- client = anthropic.Anthropic(api_key=anthropic_api_key)
1221
-
1222
- print("🤖 Calling Claude-4-Sonnet for command list analysis...")
1223
- message = client.messages.create(
1224
- model="claude-3-5-sonnet-20241022",
1225
- max_tokens=1000,
1226
- messages=[
1227
- {"role": "user", "content": prompt}
1228
- ]
1229
- )
1230
-
1231
- response_text = message.content[0].text.strip()
1232
- print("✅ Claude command list analysis completed")
1233
-
1234
- except Exception as claude_error:
1235
- print(f"❌ Claude API call failed: {claude_error}")
1236
- return False
1237
- else:
1238
- print("⚠️ Claude fallback not available (anthropic library not installed)")
1239
- return False
1123
+ # Call OpenAI API
1124
+ import openai
1125
+ import json
1126
+ client = openai.OpenAI(api_key=api_key)
1127
+
1128
+ print("🔍 Analyzing command list for optimizations...")
1129
+
1130
+ response = client.chat.completions.create(
1131
+ model="gpt-4o-mini", # Use a more capable model for this complex task
1132
+ messages=[
1133
+ {"role": "system", "content": "You are a helpful assistant that analyzes and optimizes command lists."},
1134
+ {"role": "user", "content": prompt}
1135
+ ],
1136
+ max_tokens=1000,
1137
+ temperature=0.2
1138
+ )
1139
+
1140
+ response_text = response.choices[0].message.content.strip()
1240
1141
 
1241
1142
  # Extract JSON from the response
1242
1143
  try:
@@ -1345,6 +1246,7 @@ class CommandListManager:
1345
1246
  print(f"⚠️ Error analyzing command list: {e}")
1346
1247
  return False
1347
1248
 
1249
+
1348
1250
  # Import the fetch_modal_tokens module
1349
1251
  # print("🔄 Fetching tokens from proxy server...")
1350
1252
  from fetch_modal_tokens import get_tokens
@@ -1764,9 +1666,13 @@ IMPORTANT GUIDELINES:
1764
1666
 
1765
1667
  Do not provide any explanations, just the exact command to run.
1766
1668
  """
1767
-
1669
+
1670
+ # Prepare the API request payload
1671
+ # print("🔍 DEBUG: Preparing API request...")
1672
+
1673
+ # Try to use GPT-4 first, but fall back to other models if needed
1768
1674
  models_to_try = [
1769
- "gpt-4.1", # First choice: GPT-4o (most widely available)
1675
+ "gpt-4o-mini", # First choice: GPT-4o (most widely available)
1770
1676
  ]
1771
1677
 
1772
1678
  # Check if we have a preferred model in environment
@@ -1774,12 +1680,19 @@ Do not provide any explanations, just the exact command to run.
1774
1680
  if preferred_model:
1775
1681
  # Insert the preferred model at the beginning of the list
1776
1682
  models_to_try.insert(0, preferred_model)
1683
+ # print(f"✅ Using preferred model from environment: {preferred_model}")
1777
1684
 
1778
1685
  # Remove duplicates while preserving order
1779
1686
  models_to_try = list(dict.fromkeys(models_to_try))
1687
+ # print(f"🔍 DEBUG: Models to try: {models_to_try}")
1780
1688
 
1781
1689
  # Function to make the API call with a specific model
1782
1690
  def try_api_call(model_name, retries=2, backoff_factor=1.5):
1691
+ # print(f"🔍 DEBUG: Attempting API call with model: {model_name}")
1692
+ # print(f"🔍 DEBUG: API key available: {'Yes' if api_key else 'No'}")
1693
+ # if api_key:
1694
+ # print(f"🔍 DEBUG: API key length: {len(api_key)}")
1695
+ # print(f"🔍 DEBUG: API key starts with: {api_key[:10]}...")
1783
1696
 
1784
1697
  payload = {
1785
1698
  "model": model_name,
@@ -1882,192 +1795,15 @@ Do not provide any explanations, just the exact command to run.
1882
1795
  for model in models_to_try:
1883
1796
  result, error = try_api_call(model)
1884
1797
  if result:
1798
+ # print(f"✅ Successfully got response from {model}")
1885
1799
  break
1886
1800
  else:
1887
1801
  print(f"⚠️ Failed to get response from {model}: {error}")
1888
1802
  last_error = error
1889
1803
 
1890
1804
  if not result:
1891
- print(f"❌ All OpenAI model attempts failed. Last error: {last_error}")
1892
-
1893
- # Try Claude as fallback if available
1894
- if anthropic is not None:
1895
- print("🔄 Trying Claude-4-Sonnet as fallback...")
1896
- try:
1897
- # Get Anthropic API key
1898
- anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
1899
- if not anthropic_api_key:
1900
- print("⚠️ No ANTHROPIC_API_KEY found in environment")
1901
- return None
1902
-
1903
- # Create Anthropic client
1904
- client = anthropic.Anthropic(api_key=anthropic_api_key)
1905
-
1906
- # Prepare the same prompt for Claude
1907
- claude_prompt = f"""
1908
- I'm trying to run the following command in a Linux environment:
1909
-
1910
- ```
1911
- {command}
1912
- ```
1913
-
1914
- But it failed with this error:
1915
-
1916
- ```
1917
- {error_output}
1918
- ```
1919
- {system_info}
1920
- {directory_context}
1921
- {file_context}
1922
-
1923
- AVAILABLE CREDENTIALS:
1924
- {auth_context}
1925
-
1926
- Please analyze the error and provide ONLY a single terminal command that would fix the issue.
1927
- Consider the current directory, system information, directory contents, and available credentials carefully before suggesting a solution.
1928
-
1929
- IMPORTANT GUIDELINES:
1930
- 1. For any commands that might ask for yes/no confirmation, use the appropriate non-interactive flag:
1931
- - For apt/apt-get: use -y or --yes
1932
- - For rm: use -f or --force
1933
-
1934
- 2. If the error indicates a file is not found:
1935
- - FIRST try to search for the file using: find . -name "filename" -type f 2>/dev/null
1936
- - If found, navigate to that directory using: cd /path/to/directory
1937
- - If not found, then consider creating the file or installing missing packages
1938
-
1939
- 3. For missing packages or dependencies:
1940
- - Use pip install for Python packages
1941
- - Use apt-get install -y for system packages
1942
- - Use npm install for Node.js packages
1943
-
1944
- 4. For authentication issues:
1945
- - Analyze the error to determine what type of authentication is needed
1946
- - ALWAYS use the actual credential values from the AVAILABLE CREDENTIALS section above (NOT placeholders)
1947
- - Look for the specific API key or token needed in the auth_context and use its exact value
1948
- - Common patterns:
1949
- * wandb errors: use wandb login with the actual WANDB_API_KEY value from auth_context
1950
- * huggingface errors: use huggingface-cli login with the actual HF_TOKEN or HUGGINGFACE_TOKEN value from auth_context
1951
- * github errors: configure git credentials with the actual GITHUB_TOKEN value from auth_context
1952
- * kaggle errors: create ~/.kaggle/kaggle.json with the actual KAGGLE_USERNAME and KAGGLE_KEY values from auth_context
1953
- * API errors: export the appropriate API key as environment variable using the actual value from auth_context
1954
-
1955
- 5. Environment variable exports:
1956
- - Use export commands for API keys that need to be in environment
1957
- - ALWAYS use the actual credential values from auth_context, never use placeholders like "YOUR_API_KEY"
1958
- - Example: export OPENAI_API_KEY="sk-..." (using the actual key from auth_context)
1959
-
1960
- 6. CRITICAL: When using any API key, token, or credential:
1961
- - Find the exact value in the AVAILABLE CREDENTIALS section
1962
- - Use that exact value in your command
1963
- - Do not use generic placeholders or dummy values
1964
- - The auth_context contains real, usable credentials
1965
-
1966
- 7. For Git SSH authentication failures:
1967
- - If the error contains "Host key verification failed" or "Could not read from remote repository"
1968
- - ALWAYS convert SSH URLs to HTTPS URLs for public repositories
1969
- - Replace git@github.com:username/repo.git with https://github.com/username/repo.git
1970
- - This works for public repositories without authentication
1971
- - Example: git clone https://github.com/xg-chu/ARTalk.git
1972
-
1973
- Do not provide any explanations, just the exact command to run.
1974
- """
1975
-
1976
- print("🤖 Calling Claude-4-Sonnet to debug the failed command...")
1977
- message = client.messages.create(
1978
- model="claude-3-5-sonnet-20241022",
1979
- max_tokens=300,
1980
- messages=[
1981
- {"role": "user", "content": claude_prompt}
1982
- ]
1983
- )
1984
-
1985
- fix_command = message.content[0].text.strip()
1986
- print(f"🔍 DEBUG: Raw Claude response content: {fix_command}")
1987
-
1988
- # Process the response similar to OpenAI
1989
- original_response = fix_command
1990
-
1991
- # Extract just the command if it's wrapped in backticks or explanation
1992
- if "```" in fix_command:
1993
- # Extract content between backticks
1994
- import re
1995
- code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
1996
- if code_blocks:
1997
- fix_command = code_blocks[0].strip()
1998
- print(f"✅ Extracted command from code block: {fix_command}")
1999
-
2000
- # If the response still has explanatory text, try to extract just the command
2001
- if len(fix_command.split('\n')) > 1:
2002
- # First try to find lines that look like commands (start with common command prefixes)
2003
- command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
2004
- 'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
2005
- 'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
2006
- 'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
2007
-
2008
- # Check for lines that start with common command prefixes
2009
- command_lines = [line.strip() for line in fix_command.split('\n')
2010
- if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
2011
-
2012
- if command_lines:
2013
- # Use the first command line found
2014
- fix_command = command_lines[0]
2015
- print(f"✅ Identified command by prefix: {fix_command}")
2016
- else:
2017
- # Try to find lines that look like commands (contain common shell patterns)
2018
- shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
2019
- command_lines = [line.strip() for line in fix_command.split('\n')
2020
- if any(pattern in line for pattern in shell_patterns)]
2021
-
2022
- if command_lines:
2023
- # Use the first command line found
2024
- fix_command = command_lines[0]
2025
- print(f"✅ Identified command by shell pattern: {fix_command}")
2026
- else:
2027
- # Fall back to the shortest non-empty line as it's likely the command
2028
- lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
2029
- if lines:
2030
- # Exclude very short lines that are likely not commands
2031
- valid_lines = [line for line in lines if len(line) > 5]
2032
- if valid_lines:
2033
- fix_command = min(valid_lines, key=len)
2034
- else:
2035
- fix_command = min(lines, key=len)
2036
- print(f"✅ Selected shortest line as command: {fix_command}")
2037
-
2038
- # Clean up the command - remove any trailing periods or quotes
2039
- fix_command = fix_command.rstrip('.;"\'')
2040
-
2041
- # Remove common prefixes that LLMs sometimes add
2042
- prefixes_to_remove = [
2043
- "Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
2044
- "You should run: ", "You can run: ", "You need to run: "
2045
- ]
2046
- for prefix in prefixes_to_remove:
2047
- if fix_command.startswith(prefix):
2048
- fix_command = fix_command[len(prefix):].strip()
2049
- print(f"✅ Removed prefix: {prefix}")
2050
- break
2051
-
2052
- # If the command is still multi-line or very long, it might not be a valid command
2053
- if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
2054
- print("⚠️ Extracted command appears invalid (multi-line or too long)")
2055
- print("🔍 Original response from Claude:")
2056
- print("-" * 60)
2057
- print(original_response)
2058
- print("-" * 60)
2059
- print("⚠️ Using best guess for command")
2060
-
2061
- print(f"🔧 Claude suggested fix: {fix_command}")
2062
- print(f"🔍 DEBUG: Returning Claude fix command: {fix_command}")
2063
- return fix_command
2064
-
2065
- except Exception as e:
2066
- print(f"❌ Claude API call failed: {e}")
2067
- return None
2068
- else:
2069
- print("⚠️ Claude fallback not available (anthropic library not installed)")
2070
- return None
1805
+ print(f"❌ All model attempts failed. Last error: {last_error}")
1806
+ return None
2071
1807
 
2072
1808
  # Process the response
2073
1809
  try:
@@ -2223,7 +1959,7 @@ Provide fixes for all {len(failed_commands)} failed commands:"""
2223
1959
  }
2224
1960
 
2225
1961
  payload = {
2226
- "model": "gpt-4.1", # Use a more capable model for batch analysis
1962
+ "model": "gpt-4o-mini", # Use a more capable model for batch analysis
2227
1963
  "messages": [
2228
1964
  {"role": "system", "content": "You are a debugging assistant. Analyze failed commands and provide specific fix commands. Return only the fix commands and reasons in the specified format."},
2229
1965
  {"role": "user", "content": prompt}
@@ -2274,70 +2010,49 @@ Provide fixes for all {len(failed_commands)} failed commands:"""
2274
2010
  return fixes
2275
2011
  else:
2276
2012
  print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
2277
-
2278
- # Try Claude as fallback if available
2279
- if anthropic is not None:
2280
- print("🔄 Trying Claude-4-Sonnet as fallback for batch debugging...")
2281
- try:
2282
- # Get Anthropic API key
2283
- anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
2284
- if not anthropic_api_key:
2285
- print("⚠️ No ANTHROPIC_API_KEY found in environment")
2286
- return []
2287
-
2288
- # Create Anthropic client
2289
- client = anthropic.Anthropic(api_key=anthropic_api_key)
2290
-
2291
- print("🤖 Calling Claude-4-Sonnet for batch debugging...")
2292
- message = client.messages.create(
2293
- model="claude-3-5-sonnet-20241022",
2294
- max_tokens=1000,
2295
- messages=[
2296
- {"role": "user", "content": prompt}
2297
- ]
2298
- )
2299
-
2300
- content = message.content[0].text
2301
- print(f"✅ Claude batch analysis completed")
2302
-
2303
- # Parse the response to extract fix commands
2304
- fixes = []
2305
- for i in range(1, len(failed_commands) + 1):
2306
- fix_pattern = f"FIX_COMMAND_{i}: (.+)"
2307
- reason_pattern = f"REASON_{i}: (.+)"
2308
-
2309
- fix_match = re.search(fix_pattern, content, re.MULTILINE)
2310
- reason_match = re.search(reason_pattern, content, re.MULTILINE)
2311
-
2312
- if fix_match:
2313
- fix_command = fix_match.group(1).strip()
2314
- reason = reason_match.group(1).strip() if reason_match else "Claude suggested fix"
2315
-
2316
- # Clean up the fix command
2317
- if fix_command.startswith('`') and fix_command.endswith('`'):
2318
- fix_command = fix_command[1:-1]
2319
-
2320
- fixes.append({
2321
- 'original_command': failed_commands[i-1]['command'],
2322
- 'fix_command': fix_command,
2323
- 'reason': reason,
2324
- 'command_index': i-1
2325
- })
2326
-
2327
- print(f"🔧 Generated {len(fixes)} fix commands from Claude batch analysis")
2328
- return fixes
2329
-
2330
- except Exception as e:
2331
- print(f"❌ Claude API call failed: {e}")
2332
- return []
2333
- else:
2334
- print("⚠️ Claude fallback not available (anthropic library not installed)")
2335
- return []
2013
+ return []
2336
2014
 
2337
2015
  except Exception as e:
2338
2016
  print(f"❌ Error during batch debugging: {e}")
2339
2017
  return []
2340
2018
 
2019
+ def prompt_for_hf_token():
2020
+ """Prompt user for Hugging Face token when needed"""
2021
+ # Try to use credentials manager first
2022
+ try:
2023
+ from credentials_manager import CredentialsManager
2024
+ credentials_manager = CredentialsManager()
2025
+ token = credentials_manager.get_huggingface_token()
2026
+ if token:
2027
+ return token
2028
+ except ImportError:
2029
+ # Fall back to direct input if credentials_manager is not available
2030
+ pass
2031
+
2032
+ # Traditional direct input method as fallback
2033
+ print("\n" + "="*60)
2034
+ print("🔑 HUGGING FACE TOKEN REQUIRED")
2035
+ print("="*60)
2036
+ print("The training script requires a valid Hugging Face token.")
2037
+ print("You can get your token from: https://huggingface.co/settings/tokens")
2038
+ print("📝 Please paste your Hugging Face token below:")
2039
+ print(" (Your input will be hidden for security)")
2040
+ print("-" * 60)
2041
+
2042
+ try:
2043
+ token = getpass.getpass("HF Token: ").strip()
2044
+ if not token:
2045
+ print("❌ No token provided.")
2046
+ return None
2047
+ print("✅ Token received successfully!")
2048
+ return token
2049
+ except KeyboardInterrupt:
2050
+ print("\n❌ Token input cancelled by user.")
2051
+ return None
2052
+ except Exception as e:
2053
+ print(f"❌ Error getting token: {e}")
2054
+ return None
2055
+
2341
2056
  def generate_random_password(length=16):
2342
2057
  """Generate a random password for SSH access"""
2343
2058
  alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
@@ -2352,6 +2067,11 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
2352
2067
 
2353
2068
  # Use interactive mode if specified
2354
2069
  if interactive:
2070
+ # If GPU type is not specified, prompt for it
2071
+ if not gpu_type:
2072
+ gpu_type = prompt_for_gpu()
2073
+ else:
2074
+ print(f"✅ Using provided GPU type: {gpu_type}")
2355
2075
 
2356
2076
  # If repo URL is not specified, prompt for it
2357
2077
  if not repo_url:
@@ -3028,6 +2748,7 @@ def fetch_setup_commands_from_api(repo_url):
3028
2748
  print(f"⚠️ GitIngest CLI failed with exit code {result.returncode}")
3029
2749
  print(f"⚠️ Error output: {result.stderr}")
3030
2750
  print("Falling back to basic analysis")
2751
+ gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
3031
2752
  else:
3032
2753
  print(f"✅ GitIngest analysis completed successfully")
3033
2754
 
@@ -3111,13 +2832,13 @@ def fetch_setup_commands_from_api(repo_url):
3111
2832
  print(f"📁 Processed GitIngest data saved to: {processed_file}")
3112
2833
  except FileNotFoundError:
3113
2834
  print(f"⚠️ Output file not found at {output_file}")
3114
-
2835
+ gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
3115
2836
  except Exception as e:
3116
2837
  print(f"⚠️ Error reading GitIngest output: {e}")
3117
-
2838
+ gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
3118
2839
  else:
3119
2840
  # Fall back to basic analysis if gitingest CLI is not available
3120
- gitingest_data = "{}"
2841
+ gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
3121
2842
 
3122
2843
  # Prepare the request payload with GitIngest data
3123
2844
  payload = {
@@ -3351,6 +3072,171 @@ def generate_fallback_commands(gitingest_data):
3351
3072
 
3352
3073
  return fixed_commands
3353
3074
 
3075
+ def generate_basic_repo_analysis_from_url(repo_url):
3076
+ """Generate basic repository analysis data from a repository URL."""
3077
+ import tempfile
3078
+ import subprocess
3079
+ import os
3080
+ import shutil
3081
+
3082
+ # Create a temporary directory for cloning
3083
+ temp_dir = tempfile.mkdtemp(prefix="repo_basic_analysis_")
3084
+
3085
+ try:
3086
+ print(f"📥 Cloning repository to {temp_dir} for basic analysis...")
3087
+ clone_result = subprocess.run(
3088
+ ["git", "clone", "--depth", "1", repo_url, temp_dir],
3089
+ capture_output=True,
3090
+ text=True
3091
+ )
3092
+
3093
+ if clone_result.returncode != 0:
3094
+ print(f"❌ Failed to clone repository: {clone_result.stderr}")
3095
+ return {
3096
+ "system_info": {
3097
+ "platform": "linux",
3098
+ "python_version": "3.10",
3099
+ "detected_language": "Unknown",
3100
+ "detected_technologies": [],
3101
+ "file_count": 0,
3102
+ "repo_stars": 0,
3103
+ "repo_forks": 0,
3104
+ "primary_package_manager": "Unknown",
3105
+ "complexity_level": "low"
3106
+ },
3107
+ "repository_analysis": {
3108
+ "summary": f"Repository analysis for {repo_url}",
3109
+ "tree": "Failed to clone repository",
3110
+ "content_preview": "No content available"
3111
+ },
3112
+ "success": False
3113
+ }
3114
+
3115
+ print(f"✅ Repository cloned successfully for basic analysis")
3116
+
3117
+ # Use the existing generate_basic_repo_analysis function
3118
+ return generate_basic_repo_analysis(temp_dir)
3119
+ finally:
3120
+ # Clean up the temporary directory
3121
+ print(f"🧹 Cleaning up temporary directory for basic analysis...")
3122
+ shutil.rmtree(temp_dir, ignore_errors=True)
3123
+
3124
+ def generate_basic_repo_analysis(repo_dir):
3125
+ """Generate basic repository analysis when GitIngest is not available."""
3126
+ import os
3127
+ import subprocess
3128
+
3129
+ # Detect language and technologies based on file extensions
3130
+ file_extensions = {}
3131
+ file_count = 0
3132
+
3133
+ for root, _, files in os.walk(repo_dir):
3134
+ for file in files:
3135
+ file_count += 1
3136
+ ext = os.path.splitext(file)[1].lower()
3137
+ if ext:
3138
+ file_extensions[ext] = file_extensions.get(ext, 0) + 1
3139
+
3140
+ # Determine primary language
3141
+ language_map = {
3142
+ '.py': 'Python',
3143
+ '.js': 'JavaScript',
3144
+ '.ts': 'TypeScript',
3145
+ '.jsx': 'JavaScript',
3146
+ '.tsx': 'TypeScript',
3147
+ '.java': 'Java',
3148
+ '.cpp': 'C++',
3149
+ '.c': 'C',
3150
+ '.go': 'Go',
3151
+ '.rs': 'Rust',
3152
+ '.rb': 'Ruby',
3153
+ '.php': 'PHP',
3154
+ '.swift': 'Swift',
3155
+ '.kt': 'Kotlin',
3156
+ '.cs': 'C#'
3157
+ }
3158
+
3159
+ # Count files by language
3160
+ language_counts = {}
3161
+ for ext, count in file_extensions.items():
3162
+ if ext in language_map:
3163
+ lang = language_map[ext]
3164
+ language_counts[lang] = language_counts.get(lang, 0) + count
3165
+
3166
+ # Determine primary language
3167
+ primary_language = max(language_counts.items(), key=lambda x: x[1])[0] if language_counts else "Unknown"
3168
+
3169
+ # Detect package managers
3170
+ package_managers = []
3171
+ package_files = {
3172
+ 'requirements.txt': 'pip',
3173
+ 'setup.py': 'pip',
3174
+ 'pyproject.toml': 'pip',
3175
+ 'package.json': 'npm',
3176
+ 'yarn.lock': 'yarn',
3177
+ 'pnpm-lock.yaml': 'pnpm',
3178
+ 'Cargo.toml': 'cargo',
3179
+ 'go.mod': 'go',
3180
+ 'Gemfile': 'bundler',
3181
+ 'pom.xml': 'maven',
3182
+ 'build.gradle': 'gradle',
3183
+ 'composer.json': 'composer'
3184
+ }
3185
+
3186
+ for file, manager in package_files.items():
3187
+ if os.path.exists(os.path.join(repo_dir, file)):
3188
+ package_managers.append(manager)
3189
+
3190
+ primary_package_manager = package_managers[0] if package_managers else "Unknown"
3191
+
3192
+ # Get README content
3193
+ readme_content = ""
3194
+ for readme_name in ['README.md', 'README', 'README.txt', 'readme.md']:
3195
+ readme_path = os.path.join(repo_dir, readme_name)
3196
+ if os.path.exists(readme_path):
3197
+ with open(readme_path, 'r', encoding='utf-8', errors='ignore') as f:
3198
+ readme_content = f.read()
3199
+ break
3200
+
3201
+ # Try to get repository info
3202
+ repo_info = {}
3203
+ try:
3204
+ # Get remote origin URL
3205
+ cmd = ["git", "config", "--get", "remote.origin.url"]
3206
+ result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True)
3207
+ if result.returncode == 0:
3208
+ repo_info["url"] = result.stdout.strip()
3209
+
3210
+ # Get commit count as a proxy for activity
3211
+ cmd = ["git", "rev-list", "--count", "HEAD"]
3212
+ result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True)
3213
+ if result.returncode == 0:
3214
+ repo_info["commit_count"] = int(result.stdout.strip())
3215
+ except Exception:
3216
+ pass
3217
+
3218
+ # Build the analysis data
3219
+ return {
3220
+ "system_info": {
3221
+ "platform": "linux", # Assuming Linux for container environment
3222
+ "python_version": "3.10", # Common Python version
3223
+ "detected_language": primary_language,
3224
+ "detected_technologies": list(language_counts.keys()),
3225
+ "file_count": file_count,
3226
+ "repo_stars": repo_info.get("stars", 0),
3227
+ "repo_forks": repo_info.get("forks", 0),
3228
+ "primary_package_manager": primary_package_manager,
3229
+ "complexity_level": "medium" # Default assumption
3230
+ },
3231
+ "repository_analysis": {
3232
+ "summary": f"Repository analysis for {repo_dir}",
3233
+ "readme_content": readme_content[:5000] if readme_content else "No README found",
3234
+ "package_managers": package_managers,
3235
+ "file_extensions": list(file_extensions.keys())
3236
+ },
3237
+ "success": True
3238
+ }
3239
+
3354
3240
  def fix_setup_commands(commands):
3355
3241
  """Fix setup commands by removing placeholders and comments."""
3356
3242
  fixed_commands = []
@@ -3860,6 +3746,152 @@ def get_setup_commands_from_gitingest(repo_url):
3860
3746
  print("❌ All API endpoints failed")
3861
3747
  return generate_fallback_commands(gitingest_data)
3862
3748
 
3749
+ def prompt_for_gpu():
3750
+ """
3751
+ Prompt the user to select a GPU type from available options using arrow keys.
3752
+ Returns the selected GPU type.
3753
+ """
3754
+ import sys
3755
+ import tty
3756
+ import termios
3757
+
3758
+ print("\n🔧 GPU Selection Required")
3759
+ print("No GPU type was specified with --gpu flag.")
3760
+ print("Please select a GPU type for your container:")
3761
+
3762
+ # Define available GPU types and their specifications
3763
+ gpu_specs = {
3764
+ 'T4': {'gpu': 'T4', 'memory': '16GB'},
3765
+ 'L4': {'gpu': 'L4', 'memory': '24GB'},
3766
+ 'A10G': {'gpu': 'A10G', 'memory': '24GB'},
3767
+ 'A100-40': {'gpu': 'A100-40GB', 'memory': '40GB'},
3768
+ 'A100-80': {'gpu': 'A100-80GB', 'memory': '80GB'},
3769
+ 'L40S': {'gpu': 'L40S', 'memory': '48GB'},
3770
+ 'H100': {'gpu': 'H100', 'memory': '80GB'},
3771
+ 'H200': {'gpu': 'H200', 'memory': '141GB'},
3772
+ 'B200': {'gpu': 'B200', 'memory': '141GB'}
3773
+ }
3774
+
3775
+ # Create a list of options
3776
+ options = list(gpu_specs.keys())
3777
+ selected_index = 2 # Default to A10G (index 2)
3778
+
3779
+ def get_key():
3780
+ """Get a single keypress from the user."""
3781
+ fd = sys.stdin.fileno()
3782
+ old_settings = termios.tcgetattr(fd)
3783
+ try:
3784
+ tty.setraw(sys.stdin.fileno())
3785
+ ch = sys.stdin.read(1)
3786
+ if ch == '\x1b': # Escape sequence
3787
+ ch2 = sys.stdin.read(1)
3788
+ if ch2 == '[':
3789
+ ch3 = sys.stdin.read(1)
3790
+ if ch3 == 'A':
3791
+ return 'UP'
3792
+ elif ch3 == 'B':
3793
+ return 'DOWN'
3794
+ elif ch == '\r' or ch == '\n':
3795
+ return 'ENTER'
3796
+ elif ch == '\x03': # Ctrl+C
3797
+ return 'CTRL_C'
3798
+ return ch
3799
+ finally:
3800
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
3801
+
3802
+ def display_menu():
3803
+ """Display the GPU selection menu with current selection highlighted."""
3804
+ print("\n📊 Available GPU Options:")
3805
+ print("┌──────────────┬─────────┐")
3806
+ print("│ GPU Type │ VRAM │")
3807
+ print("├──────────────┼─────────┤")
3808
+
3809
+ for i, gpu_type in enumerate(options):
3810
+ specs = gpu_specs[gpu_type]
3811
+ # Calculate proper spacing for alignment
3812
+ number_part = f"{i+1}."
3813
+ if i == selected_index:
3814
+ prefix = "> "
3815
+ suffix = " ←"
3816
+ else:
3817
+ prefix = " "
3818
+ suffix = ""
3819
+
3820
+ # Ensure consistent width for GPU type column
3821
+ gpu_display = f"{prefix}{number_part} {gpu_type}"
3822
+ gpu_padded = f"{gpu_display:<12}" # Fixed width for GPU column
3823
+
3824
+ print(f"│ {gpu_padded} │ {specs['memory']:<7} │{suffix}")
3825
+
3826
+ print("└──────────────┴─────────┘")
3827
+ print("Use ↑/↓ arrows to select, Enter to confirm, Ctrl+C to cancel")
3828
+
3829
+ # Clear screen and show initial menu
3830
+ print("\033[2J\033[H", end="") # Clear screen and move cursor to top
3831
+ display_menu()
3832
+
3833
+ while True:
3834
+ try:
3835
+ key = get_key()
3836
+
3837
+ if key == 'UP':
3838
+ selected_index = (selected_index - 1) % len(options)
3839
+ print("\033[2J\033[H", end="") # Clear screen
3840
+ display_menu()
3841
+ elif key == 'DOWN':
3842
+ selected_index = (selected_index + 1) % len(options)
3843
+ print("\033[2J\033[H", end="") # Clear screen
3844
+ display_menu()
3845
+ elif key == 'ENTER':
3846
+ selected_gpu = options[selected_index]
3847
+ print(f"\n✅ Selected GPU: {selected_gpu}")
3848
+ return selected_gpu
3849
+ elif key == 'CTRL_C':
3850
+ print("\n🛑 Selection cancelled.")
3851
+ sys.exit(1)
3852
+
3853
+ except KeyboardInterrupt:
3854
+ print("\n🛑 Selection cancelled.")
3855
+ sys.exit(1)
3856
+ except Exception as e:
3857
+ print(f"\n❌ Error with interactive menu: {e}")
3858
+ print("🔄 Falling back to simple text input...")
3859
+ # Fall back to simple input method
3860
+ try:
3861
+ print("\n📊 Available GPU Options:")
3862
+ for i, gpu_type in enumerate(options, 1):
3863
+ specs = gpu_specs[gpu_type]
3864
+ print(f" {i}. {gpu_type} ({specs['memory']})")
3865
+ print(f" Default: A10G")
3866
+
3867
+ choice = input("\n🔍 Select GPU type (number or name, default is A10G): ").strip()
3868
+ if not choice:
3869
+ print("✅ Using default GPU: A10G")
3870
+ return "A10G"
3871
+ if choice.isdigit():
3872
+ index = int(choice) - 1
3873
+ if 0 <= index < len(options):
3874
+ selected = options[index]
3875
+ print(f"✅ Selected GPU: {selected}")
3876
+ return selected
3877
+ else:
3878
+ print(f"⚠️ Invalid number. Using default: A10G")
3879
+ return "A10G"
3880
+ elif choice in options:
3881
+ print(f"✅ Selected GPU: {choice}")
3882
+ return choice
3883
+ else:
3884
+ print(f"⚠️ Invalid choice '{choice}'. Using default: A10G")
3885
+ return "A10G"
3886
+ except KeyboardInterrupt:
3887
+ print("\n🛑 Selection cancelled.")
3888
+ sys.exit(1)
3889
+ except Exception as fallback_error:
3890
+ print(f"❌ Error in fallback input: {fallback_error}")
3891
+ print("✅ Using default GPU: A10G")
3892
+ return "A10G"
3893
+
3894
+
3863
3895
 
3864
3896
  def preprocess_commands_with_llm(setup_commands, stored_credentials, api_key=None):
3865
3897
  """
@@ -3909,57 +3941,21 @@ Return only the JSON array, no other text.
3909
3941
  print("⚠️ No OpenAI API key available for command preprocessing")
3910
3942
  return setup_commands
3911
3943
 
3912
- # Try OpenAI API first
3913
- try:
3914
- import openai
3915
- client = openai.OpenAI(api_key=api_key)
3916
-
3917
- response = client.chat.completions.create(
3918
- model="gpt-3.5-turbo",
3919
- messages=[
3920
- {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
3921
- {"role": "user", "content": prompt}
3922
- ],
3923
- temperature=0.1,
3924
- max_tokens=2000
3925
- )
3926
-
3927
- result = response.choices[0].message.content.strip()
3928
-
3929
- except Exception as e:
3930
- print(f"⚠️ OpenAI API call failed: {e}")
3931
-
3932
- # Try Claude as fallback if available
3933
- if anthropic is not None:
3934
- print("🔄 Trying Claude-4-Sonnet as fallback for command preprocessing...")
3935
- try:
3936
- # Get Anthropic API key
3937
- anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
3938
- if not anthropic_api_key:
3939
- print("⚠️ No ANTHROPIC_API_KEY found in environment")
3940
- return fallback_preprocess_commands(setup_commands, stored_credentials)
3941
-
3942
- # Create Anthropic client
3943
- client = anthropic.Anthropic(api_key=anthropic_api_key)
3944
-
3945
- print("🤖 Calling Claude-4-Sonnet for command preprocessing...")
3946
- message = client.messages.create(
3947
- model="claude-3-5-sonnet-20241022",
3948
- max_tokens=2000,
3949
- messages=[
3950
- {"role": "user", "content": prompt}
3951
- ]
3952
- )
3953
-
3954
- result = message.content[0].text.strip()
3955
- print("✅ Claude preprocessing completed")
3956
-
3957
- except Exception as claude_error:
3958
- print(f"❌ Claude API call failed: {claude_error}")
3959
- return fallback_preprocess_commands(setup_commands, stored_credentials)
3960
- else:
3961
- print("⚠️ Claude fallback not available (anthropic library not installed)")
3962
- return fallback_preprocess_commands(setup_commands, stored_credentials)
3944
+ # Call OpenAI API
3945
+ import openai
3946
+ client = openai.OpenAI(api_key=api_key)
3947
+
3948
+ response = client.chat.completions.create(
3949
+ model="gpt-3.5-turbo",
3950
+ messages=[
3951
+ {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
3952
+ {"role": "user", "content": prompt}
3953
+ ],
3954
+ temperature=0.1,
3955
+ max_tokens=2000
3956
+ )
3957
+
3958
+ result = response.choices[0].message.content.strip()
3963
3959
 
3964
3960
  # Debug: Print the raw response
3965
3961
  print(f"🔍 LLM Response: {result[:200]}...")
@@ -4202,6 +4198,7 @@ if __name__ == "__main__":
4202
4198
  parser = argparse.ArgumentParser()
4203
4199
  parser.add_argument('--gpu', type=str, help='GPU type (e.g., A10G, T4, A100-80GB). If not provided, will prompt for GPU selection.')
4204
4200
  parser.add_argument('--repo-url', type=str, help='Repository URL to clone')
4201
+ parser.add_argument('--repo-name', type=str, help='Repository name override')
4205
4202
  parser.add_argument('--setup-commands', type=str, nargs='+', help='Setup commands to run (deprecated)')
4206
4203
  parser.add_argument('--setup-commands-json', type=str, help='Setup commands as JSON array')
4207
4204
  parser.add_argument('--commands-file', type=str, help='Path to file containing setup commands (one per line)')
@@ -4238,6 +4235,11 @@ if __name__ == "__main__":
4238
4235
  _handle_auth_commands(auth_manager, args)
4239
4236
  sys.exit(0)
4240
4237
 
4238
+ # If --list-gpus is specified, just show GPU options and exit
4239
+ if args.list_gpus:
4240
+ prompt_for_gpu()
4241
+ sys.exit(0)
4242
+
4241
4243
  # If no arguments or only --show-examples is provided, show usage examples
4242
4244
  if len(sys.argv) == 1 or args.show_examples:
4243
4245
  show_usage_examples()
@@ -4296,6 +4298,7 @@ if __name__ == "__main__":
4296
4298
  else:
4297
4299
  print("\n📋 No GPU type specified with --gpu flag.")
4298
4300
  print("🔄 Prompting for GPU selection...")
4301
+ gpu_type = prompt_for_gpu()
4299
4302
  args.gpu = gpu_type
4300
4303
 
4301
4304
  # Display configuration after GPU selection
@@ -4309,7 +4312,16 @@ if __name__ == "__main__":
4309
4312
  print(f"Setup Commands: {len(args.setup_commands)} custom commands")
4310
4313
  else:
4311
4314
  print("Setup Commands: Auto-detect from repository")
4312
-
4315
+
4316
+ # Confirm settings
4317
+ try:
4318
+ proceed = input("Proceed with these settings? (Y/n): ").strip().lower()
4319
+ if proceed in ('n', 'no'):
4320
+ print("🛑 Operation cancelled by user.")
4321
+ sys.exit(0)
4322
+ except KeyboardInterrupt:
4323
+ print("\n🛑 Operation cancelled by user.")
4324
+ sys.exit(0)
4313
4325
 
4314
4326
  # Interactive mode or missing required arguments
4315
4327
  if args.interactive or not args.repo_url or not args.volume_name: