gitarsenal-cli 1.8.3 → 1.8.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.8.3",
3
+ "version": "1.8.4",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -1616,7 +1616,6 @@ Consider the current directory, system information, directory contents, and avai
1616
1616
  IMPORTANT GUIDELINES:
1617
1617
  1. For any commands that might ask for yes/no confirmation, use the appropriate non-interactive flag:
1618
1618
  - For apt/apt-get: use -y or --yes
1619
- - For pip: use --no-input
1620
1619
  - For rm: use -f or --force
1621
1620
 
1622
1621
  2. If the error indicates a file is not found:
@@ -3860,6 +3859,152 @@ def prompt_for_gpu():
3860
3859
  print("✅ Using default GPU: A10G")
3861
3860
  return "A10G"
3862
3861
 
3862
+
3863
+
3864
+ def preprocess_commands_with_llm(setup_commands, stored_credentials, api_key=None):
3865
+ """
3866
+ Use LLM to preprocess setup commands and inject available credentials.
3867
+
3868
+ Args:
3869
+ setup_commands: List of setup commands
3870
+ stored_credentials: Dictionary of stored credentials
3871
+ api_key: OpenAI API key for LLM calls
3872
+
3873
+ Returns:
3874
+ List of processed commands with credentials injected
3875
+ """
3876
+ if not setup_commands or not stored_credentials:
3877
+ return setup_commands
3878
+
3879
+ try:
3880
+ # Create context for the LLM
3881
+ credentials_info = "\n".join([f"- {key}: {value[:8]}..." for key, value in stored_credentials.items()])
3882
+
3883
+ prompt = f"""
3884
+ You are a command preprocessing assistant. Your task is to modify setup commands to use available credentials and make them non-interactive.
3885
+
3886
+ AVAILABLE CREDENTIALS:
3887
+ {credentials_info}
3888
+
3889
+ ORIGINAL COMMANDS:
3890
+ {chr(10).join([f"{i+1}. {cmd}" for i, cmd in enumerate(setup_commands)])}
3891
+
3892
+ INSTRUCTIONS:
3893
+ 1. Replace any authentication commands with token-based versions using available credentials
3894
+ 2. Make all commands non-interactive (add --yes, --no-input, -y flags where needed)
3895
+ 3. Use environment variables or direct token injection where appropriate
3896
+ 4. Skip commands that cannot be made non-interactive due to missing credentials
3897
+ 5. Add any necessary environment variable exports
3898
+
3899
+ Return the modified commands as a JSON array of strings. If a command should be skipped, prefix it with "# SKIPPED: ".
3900
+
3901
+ Example transformations:
3902
+ - "huggingface-cli login" → "huggingface-cli login --token $HUGGINGFACE_TOKEN"
3903
+ - "npm install" → "npm install --yes"
3904
+
3905
+ Return only the JSON array, no other text.
3906
+ """
3907
+
3908
+ if not api_key:
3909
+ print("⚠️ No OpenAI API key available for command preprocessing")
3910
+ return setup_commands
3911
+
3912
+ # Call OpenAI API
3913
+ import openai
3914
+ client = openai.OpenAI(api_key=api_key)
3915
+
3916
+ response = client.chat.completions.create(
3917
+ model="gpt-3.5-turbo",
3918
+ messages=[
3919
+ {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
3920
+ {"role": "user", "content": prompt}
3921
+ ],
3922
+ temperature=0.1,
3923
+ max_tokens=2000
3924
+ )
3925
+
3926
+ result = response.choices[0].message.content.strip()
3927
+
3928
+ # Debug: Print the raw response
3929
+ print(f"🔍 LLM Response: {result[:200]}...")
3930
+
3931
+ # Parse the JSON response
3932
+ import json
3933
+ try:
3934
+ processed_commands = json.loads(result)
3935
+ if isinstance(processed_commands, list):
3936
+ print(f"🔧 LLM preprocessed {len(processed_commands)} commands")
3937
+ for i, cmd in enumerate(processed_commands):
3938
+ if cmd != setup_commands[i]:
3939
+ print(f" {i+1}. {setup_commands[i]} → {cmd}")
3940
+ return processed_commands
3941
+ else:
3942
+ print("⚠️ LLM returned invalid format, using fallback preprocessing")
3943
+ return fallback_preprocess_commands(setup_commands, stored_credentials)
3944
+ except json.JSONDecodeError as e:
3945
+ print(f"⚠️ Failed to parse LLM response: {e}")
3946
+ print("🔄 Using fallback preprocessing...")
3947
+ return fallback_preprocess_commands(setup_commands, stored_credentials)
3948
+
3949
+ except Exception as e:
3950
+ print(f"⚠️ LLM preprocessing failed: {e}")
3951
+ print("🔄 Using fallback preprocessing...")
3952
+ return fallback_preprocess_commands(setup_commands, stored_credentials)
3953
+
3954
+ def fallback_preprocess_commands(setup_commands, stored_credentials):
3955
+ """
3956
+ Fallback preprocessing function that manually handles common credential injection patterns.
3957
+
3958
+ Args:
3959
+ setup_commands: List of setup commands
3960
+ stored_credentials: Dictionary of stored credentials
3961
+
3962
+ Returns:
3963
+ List of processed commands with credentials injected
3964
+ """
3965
+ if not setup_commands or not stored_credentials:
3966
+ return setup_commands
3967
+
3968
+ processed_commands = []
3969
+
3970
+ for i, command in enumerate(setup_commands):
3971
+ processed_command = command
3972
+
3973
+ # Handle Hugging Face login
3974
+ if 'huggingface-cli login' in command and '--token' not in command:
3975
+ if 'HUGGINGFACE_TOKEN' in stored_credentials:
3976
+ processed_command = f"huggingface-cli login --token $HUGGINGFACE_TOKEN"
3977
+ print(f"🔧 Fallback: Injected HF token into command {i+1}")
3978
+ else:
3979
+ processed_command = f"# SKIPPED: {command} (no HF token available)"
3980
+ print(f"🔧 Fallback: Skipped command {i+1} (no HF token)")
3981
+
3982
+ # Handle OpenAI API key
3983
+ elif 'openai' in command.lower() and 'api_key' not in command.lower():
3984
+ if 'OPENAI_API_KEY' in stored_credentials:
3985
+ processed_command = f"export OPENAI_API_KEY=$OPENAI_API_KEY && {command}"
3986
+ print(f"🔧 Fallback: Added OpenAI API key export to command {i+1}")
3987
+
3988
+ # Handle npm install
3989
+ elif 'npm install' in command and '--yes' not in command and '--no-interactive' not in command:
3990
+ processed_command = command.replace('npm install', 'npm install --yes')
3991
+ print(f"🔧 Fallback: Made npm install non-interactive in command {i+1}")
3992
+
3993
+ # Handle git clone
3994
+ elif command.strip().startswith('git clone') and '--depth 1' not in command:
3995
+ processed_command = command.replace('git clone', 'git clone --depth 1')
3996
+ print(f"🔧 Fallback: Made git clone non-interactive in command {i+1}")
3997
+
3998
+ # Handle apt-get install
3999
+ elif 'apt-get install' in command and '-y' not in command:
4000
+ processed_command = command.replace('apt-get install', 'apt-get install -y')
4001
+ print(f"🔧 Fallback: Made apt-get install non-interactive in command {i+1}")
4002
+
4003
+ processed_commands.append(processed_command)
4004
+
4005
+ print(f"🔧 Fallback preprocessing completed: {len(processed_commands)} commands")
4006
+ return processed_commands
4007
+
3863
4008
  # Replace the existing GPU argument parsing in the main section
3864
4009
  if __name__ == "__main__":
3865
4010
  # Parse command line arguments when script is run directly
@@ -4145,89 +4290,3 @@ if __name__ == "__main__":
4145
4290
  # print("🧹 Cleaning up resources...")
4146
4291
  cleanup_modal_token()
4147
4292
  sys.exit(1)
4148
-
4149
- def preprocess_commands_with_llm(setup_commands, stored_credentials, api_key=None):
4150
- """
4151
- Use LLM to preprocess setup commands and inject available credentials.
4152
-
4153
- Args:
4154
- setup_commands: List of setup commands
4155
- stored_credentials: Dictionary of stored credentials
4156
- api_key: OpenAI API key for LLM calls
4157
-
4158
- Returns:
4159
- List of processed commands with credentials injected
4160
- """
4161
- if not setup_commands or not stored_credentials:
4162
- return setup_commands
4163
-
4164
- try:
4165
- # Create context for the LLM
4166
- credentials_info = "\n".join([f"- {key}: {value[:8]}..." for key, value in stored_credentials.items()])
4167
-
4168
- prompt = f"""
4169
- You are a command preprocessing assistant. Your task is to modify setup commands to use available credentials and make them non-interactive.
4170
-
4171
- AVAILABLE CREDENTIALS:
4172
- {credentials_info}
4173
-
4174
- ORIGINAL COMMANDS:
4175
- {chr(10).join([f"{i+1}. {cmd}" for i, cmd in enumerate(setup_commands)])}
4176
-
4177
- INSTRUCTIONS:
4178
- 1. Replace any authentication commands with token-based versions using available credentials
4179
- 2. Make all commands non-interactive (add --yes, --no-input, -y flags where needed)
4180
- 3. Use environment variables or direct token injection where appropriate
4181
- 4. Skip commands that cannot be made non-interactive due to missing credentials
4182
- 5. Add any necessary environment variable exports
4183
-
4184
- Return the modified commands as a JSON array of strings. If a command should be skipped, prefix it with "# SKIPPED: ".
4185
-
4186
- Example transformations:
4187
- - "huggingface-cli login" → "huggingface-cli login --token $HUGGINGFACE_TOKEN"
4188
- - "npm install" → "npm install --yes"
4189
- - "pip install package" → "pip install package --no-input"
4190
-
4191
- Return only the JSON array, no other text.
4192
- """
4193
-
4194
- if not api_key:
4195
- print("⚠️ No OpenAI API key available for command preprocessing")
4196
- return setup_commands
4197
-
4198
- # Call OpenAI API
4199
- import openai
4200
- client = openai.OpenAI(api_key=api_key)
4201
-
4202
- response = client.chat.completions.create(
4203
- model="gpt-3.5-turbo",
4204
- messages=[
4205
- {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
4206
- {"role": "user", "content": prompt}
4207
- ],
4208
- temperature=0.1,
4209
- max_tokens=2000
4210
- )
4211
-
4212
- result = response.choices[0].message.content.strip()
4213
-
4214
- # Parse the JSON response
4215
- import json
4216
- try:
4217
- processed_commands = json.loads(result)
4218
- if isinstance(processed_commands, list):
4219
- print(f"🔧 LLM preprocessed {len(processed_commands)} commands")
4220
- for i, cmd in enumerate(processed_commands):
4221
- if cmd != setup_commands[i]:
4222
- print(f" {i+1}. {setup_commands[i]} → {cmd}")
4223
- return processed_commands
4224
- else:
4225
- print("⚠️ LLM returned invalid format, using original commands")
4226
- return setup_commands
4227
- except json.JSONDecodeError:
4228
- print("⚠️ Failed to parse LLM response, using original commands")
4229
- return setup_commands
4230
-
4231
- except Exception as e:
4232
- print(f"⚠️ LLM preprocessing failed: {e}")
4233
- return setup_commands
@@ -15,42 +15,94 @@ const pythonScriptPath = path.join(pythonScriptDir, 'test_modalSandboxScript.py'
15
15
  // Path to the original Python script
16
16
  const originalScriptPath = path.join(__dirname, '..', '..', '..', 'mcp-server', 'src', 'utils', 'test_modalSandboxScript.py');
17
17
 
18
+ // Function to check and install uv
19
+ async function checkAndInstallUv() {
20
+ try {
21
+ // Check if uv is already installed
22
+ await execAsync('uv --version');
23
+ console.log(chalk.green('✅ uv is already installed'));
24
+ return true;
25
+ } catch (error) {
26
+ console.log(chalk.yellow('⚠️ uv not found. Attempting to install...'));
27
+
28
+ // Try different methods to install uv
29
+ const installMethods = [
30
+ 'curl -LsSf https://astral.sh/uv/install.sh | sh',
31
+ 'pip install uv',
32
+ 'pip3 install uv',
33
+ 'cargo install uv'
34
+ ];
35
+
36
+ for (const method of installMethods) {
37
+ try {
38
+ console.log(chalk.gray(`🔄 Trying to install uv with: ${method}`));
39
+
40
+ if (method.includes('curl')) {
41
+ // For curl installation, we need to handle the shell script
42
+ await execAsync(method, {
43
+ env: { ...process.env, SHELL: '/bin/bash' },
44
+ stdio: 'inherit'
45
+ });
46
+ } else {
47
+ await execAsync(method);
48
+ }
49
+
50
+ // Verify installation
51
+ await execAsync('uv --version');
52
+ console.log(chalk.green('✅ uv installed successfully!'));
53
+ return true;
54
+ } catch (installError) {
55
+ console.log(chalk.gray(`⚠️ ${method} failed, trying next...`));
56
+ }
57
+ }
58
+
59
+ console.log(chalk.yellow('⚠️ Could not install uv automatically'));
60
+ console.log(chalk.yellow('💡 Please install uv manually:'));
61
+ console.log(chalk.yellow(' curl -LsSf https://astral.sh/uv/install.sh | sh'));
62
+ console.log(chalk.yellow(' or: pip install uv'));
63
+ console.log(chalk.yellow(' or: cargo install uv'));
64
+ return false;
65
+ }
66
+ }
67
+
18
68
  // Function to install Python packages
19
69
  async function installPythonPackages() {
20
70
  const packages = ['modal', 'gitingest', 'requests'];
21
71
 
22
72
  console.log(chalk.yellow(`📦 Installing Python packages: ${packages.join(', ')}`));
23
73
 
24
- // Try different pip commands
25
- const pipCommands = [
74
+ // Try uv first, then fall back to pip commands
75
+ const installCommands = [
76
+ 'uv pip install',
26
77
  'pip3 install',
27
78
  'pip install',
28
79
  'python -m pip install',
29
80
  'py -m pip install'
30
81
  ];
31
82
 
32
- for (const pipCommand of pipCommands) {
83
+ for (const installCommand of installCommands) {
33
84
  try {
34
- // Check if pip command exists
35
- const [pipCmd] = pipCommand.split(' ');
36
- await execAsync(`${pipCmd} --version`);
85
+ // Check if command exists
86
+ const [cmd] = installCommand.split(' ');
87
+ await execAsync(`${cmd} --version`);
37
88
 
38
- console.log(chalk.gray(`🔄 Using: ${pipCommand}`));
89
+ console.log(chalk.gray(`🔄 Using: ${installCommand}`));
39
90
 
40
- await execAsync(`${pipCommand} ${packages.join(' ')}`, {
91
+ await execAsync(`${installCommand} ${packages.join(' ')}`, {
41
92
  env: { ...process.env, PYTHONIOENCODING: 'utf-8' }
42
93
  });
43
94
 
44
95
  console.log(chalk.green('✅ Python packages installed successfully!'));
45
96
  return true;
46
97
  } catch (error) {
47
- console.log(chalk.gray(`⚠️ ${pipCommand} failed, trying next...`));
98
+ console.log(chalk.gray(`⚠️ ${installCommand} failed, trying next...`));
48
99
  }
49
100
  }
50
101
 
51
102
  console.log(chalk.red('❌ Failed to install Python packages'));
52
103
  console.log(chalk.yellow('💡 Please run manually:'));
53
- console.log(chalk.yellow(' pip install modal gitingest requests'));
104
+ console.log(chalk.yellow(' uv pip install modal gitingest requests'));
105
+ console.log(chalk.yellow(' or: pip install modal gitingest requests'));
54
106
  console.log(chalk.yellow(' or: pip3 install modal gitingest requests'));
55
107
  return false;
56
108
  }
@@ -105,6 +157,10 @@ async function postinstall() {
105
157
  console.log(chalk.blue('🔍 Checking Git installation...'));
106
158
  await checkGit();
107
159
 
160
+ // Check and install uv if needed
161
+ console.log(chalk.blue('🔍 Checking for uv package manager...'));
162
+ await checkAndInstallUv();
163
+
108
164
  // Install Python packages
109
165
  console.log(chalk.blue('🔍 Installing Python dependencies...'));
110
166
  await installPythonPackages();
@@ -1616,7 +1616,6 @@ Consider the current directory, system information, directory contents, and avai
1616
1616
  IMPORTANT GUIDELINES:
1617
1617
  1. For any commands that might ask for yes/no confirmation, use the appropriate non-interactive flag:
1618
1618
  - For apt/apt-get: use -y or --yes
1619
- - For pip: use --no-input
1620
1619
  - For rm: use -f or --force
1621
1620
 
1622
1621
  2. If the error indicates a file is not found:
@@ -3860,6 +3859,152 @@ def prompt_for_gpu():
3860
3859
  print("✅ Using default GPU: A10G")
3861
3860
  return "A10G"
3862
3861
 
3862
+
3863
+
3864
+ def preprocess_commands_with_llm(setup_commands, stored_credentials, api_key=None):
3865
+ """
3866
+ Use LLM to preprocess setup commands and inject available credentials.
3867
+
3868
+ Args:
3869
+ setup_commands: List of setup commands
3870
+ stored_credentials: Dictionary of stored credentials
3871
+ api_key: OpenAI API key for LLM calls
3872
+
3873
+ Returns:
3874
+ List of processed commands with credentials injected
3875
+ """
3876
+ if not setup_commands or not stored_credentials:
3877
+ return setup_commands
3878
+
3879
+ try:
3880
+ # Create context for the LLM
3881
+ credentials_info = "\n".join([f"- {key}: {value[:8]}..." for key, value in stored_credentials.items()])
3882
+
3883
+ prompt = f"""
3884
+ You are a command preprocessing assistant. Your task is to modify setup commands to use available credentials and make them non-interactive.
3885
+
3886
+ AVAILABLE CREDENTIALS:
3887
+ {credentials_info}
3888
+
3889
+ ORIGINAL COMMANDS:
3890
+ {chr(10).join([f"{i+1}. {cmd}" for i, cmd in enumerate(setup_commands)])}
3891
+
3892
+ INSTRUCTIONS:
3893
+ 1. Replace any authentication commands with token-based versions using available credentials
3894
+ 2. Make all commands non-interactive (add --yes, --no-input, -y flags where needed)
3895
+ 3. Use environment variables or direct token injection where appropriate
3896
+ 4. Skip commands that cannot be made non-interactive due to missing credentials
3897
+ 5. Add any necessary environment variable exports
3898
+
3899
+ Return the modified commands as a JSON array of strings. If a command should be skipped, prefix it with "# SKIPPED: ".
3900
+
3901
+ Example transformations:
3902
+ - "huggingface-cli login" → "huggingface-cli login --token $HUGGINGFACE_TOKEN"
3903
+ - "npm install" → "npm install --yes"
3904
+
3905
+ Return only the JSON array, no other text.
3906
+ """
3907
+
3908
+ if not api_key:
3909
+ print("⚠️ No OpenAI API key available for command preprocessing")
3910
+ return setup_commands
3911
+
3912
+ # Call OpenAI API
3913
+ import openai
3914
+ client = openai.OpenAI(api_key=api_key)
3915
+
3916
+ response = client.chat.completions.create(
3917
+ model="gpt-3.5-turbo",
3918
+ messages=[
3919
+ {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
3920
+ {"role": "user", "content": prompt}
3921
+ ],
3922
+ temperature=0.1,
3923
+ max_tokens=2000
3924
+ )
3925
+
3926
+ result = response.choices[0].message.content.strip()
3927
+
3928
+ # Debug: Print the raw response
3929
+ print(f"🔍 LLM Response: {result[:200]}...")
3930
+
3931
+ # Parse the JSON response
3932
+ import json
3933
+ try:
3934
+ processed_commands = json.loads(result)
3935
+ if isinstance(processed_commands, list):
3936
+ print(f"🔧 LLM preprocessed {len(processed_commands)} commands")
3937
+ for i, cmd in enumerate(processed_commands):
3938
+ if cmd != setup_commands[i]:
3939
+ print(f" {i+1}. {setup_commands[i]} → {cmd}")
3940
+ return processed_commands
3941
+ else:
3942
+ print("⚠️ LLM returned invalid format, using fallback preprocessing")
3943
+ return fallback_preprocess_commands(setup_commands, stored_credentials)
3944
+ except json.JSONDecodeError as e:
3945
+ print(f"⚠️ Failed to parse LLM response: {e}")
3946
+ print("🔄 Using fallback preprocessing...")
3947
+ return fallback_preprocess_commands(setup_commands, stored_credentials)
3948
+
3949
+ except Exception as e:
3950
+ print(f"⚠️ LLM preprocessing failed: {e}")
3951
+ print("🔄 Using fallback preprocessing...")
3952
+ return fallback_preprocess_commands(setup_commands, stored_credentials)
3953
+
3954
+ def fallback_preprocess_commands(setup_commands, stored_credentials):
3955
+ """
3956
+ Fallback preprocessing function that manually handles common credential injection patterns.
3957
+
3958
+ Args:
3959
+ setup_commands: List of setup commands
3960
+ stored_credentials: Dictionary of stored credentials
3961
+
3962
+ Returns:
3963
+ List of processed commands with credentials injected
3964
+ """
3965
+ if not setup_commands or not stored_credentials:
3966
+ return setup_commands
3967
+
3968
+ processed_commands = []
3969
+
3970
+ for i, command in enumerate(setup_commands):
3971
+ processed_command = command
3972
+
3973
+ # Handle Hugging Face login
3974
+ if 'huggingface-cli login' in command and '--token' not in command:
3975
+ if 'HUGGINGFACE_TOKEN' in stored_credentials:
3976
+ processed_command = f"huggingface-cli login --token $HUGGINGFACE_TOKEN"
3977
+ print(f"🔧 Fallback: Injected HF token into command {i+1}")
3978
+ else:
3979
+ processed_command = f"# SKIPPED: {command} (no HF token available)"
3980
+ print(f"🔧 Fallback: Skipped command {i+1} (no HF token)")
3981
+
3982
+ # Handle OpenAI API key
3983
+ elif 'openai' in command.lower() and 'api_key' not in command.lower():
3984
+ if 'OPENAI_API_KEY' in stored_credentials:
3985
+ processed_command = f"export OPENAI_API_KEY=$OPENAI_API_KEY && {command}"
3986
+ print(f"🔧 Fallback: Added OpenAI API key export to command {i+1}")
3987
+
3988
+ # Handle npm install
3989
+ elif 'npm install' in command and '--yes' not in command and '--no-interactive' not in command:
3990
+ processed_command = command.replace('npm install', 'npm install --yes')
3991
+ print(f"🔧 Fallback: Made npm install non-interactive in command {i+1}")
3992
+
3993
+ # Handle git clone
3994
+ elif command.strip().startswith('git clone') and '--depth 1' not in command:
3995
+ processed_command = command.replace('git clone', 'git clone --depth 1')
3996
+ print(f"🔧 Fallback: Made git clone non-interactive in command {i+1}")
3997
+
3998
+ # Handle apt-get install
3999
+ elif 'apt-get install' in command and '-y' not in command:
4000
+ processed_command = command.replace('apt-get install', 'apt-get install -y')
4001
+ print(f"🔧 Fallback: Made apt-get install non-interactive in command {i+1}")
4002
+
4003
+ processed_commands.append(processed_command)
4004
+
4005
+ print(f"🔧 Fallback preprocessing completed: {len(processed_commands)} commands")
4006
+ return processed_commands
4007
+
3863
4008
  # Replace the existing GPU argument parsing in the main section
3864
4009
  if __name__ == "__main__":
3865
4010
  # Parse command line arguments when script is run directly
@@ -4145,89 +4290,3 @@ if __name__ == "__main__":
4145
4290
  # print("🧹 Cleaning up resources...")
4146
4291
  cleanup_modal_token()
4147
4292
  sys.exit(1)
4148
-
4149
- def preprocess_commands_with_llm(setup_commands, stored_credentials, api_key=None):
4150
- """
4151
- Use LLM to preprocess setup commands and inject available credentials.
4152
-
4153
- Args:
4154
- setup_commands: List of setup commands
4155
- stored_credentials: Dictionary of stored credentials
4156
- api_key: OpenAI API key for LLM calls
4157
-
4158
- Returns:
4159
- List of processed commands with credentials injected
4160
- """
4161
- if not setup_commands or not stored_credentials:
4162
- return setup_commands
4163
-
4164
- try:
4165
- # Create context for the LLM
4166
- credentials_info = "\n".join([f"- {key}: {value[:8]}..." for key, value in stored_credentials.items()])
4167
-
4168
- prompt = f"""
4169
- You are a command preprocessing assistant. Your task is to modify setup commands to use available credentials and make them non-interactive.
4170
-
4171
- AVAILABLE CREDENTIALS:
4172
- {credentials_info}
4173
-
4174
- ORIGINAL COMMANDS:
4175
- {chr(10).join([f"{i+1}. {cmd}" for i, cmd in enumerate(setup_commands)])}
4176
-
4177
- INSTRUCTIONS:
4178
- 1. Replace any authentication commands with token-based versions using available credentials
4179
- 2. Make all commands non-interactive (add --yes, --no-input, -y flags where needed)
4180
- 3. Use environment variables or direct token injection where appropriate
4181
- 4. Skip commands that cannot be made non-interactive due to missing credentials
4182
- 5. Add any necessary environment variable exports
4183
-
4184
- Return the modified commands as a JSON array of strings. If a command should be skipped, prefix it with "# SKIPPED: ".
4185
-
4186
- Example transformations:
4187
- - "huggingface-cli login" → "huggingface-cli login --token $HUGGINGFACE_TOKEN"
4188
- - "npm install" → "npm install --yes"
4189
- - "pip install package" → "pip install package --no-input"
4190
-
4191
- Return only the JSON array, no other text.
4192
- """
4193
-
4194
- if not api_key:
4195
- print("⚠️ No OpenAI API key available for command preprocessing")
4196
- return setup_commands
4197
-
4198
- # Call OpenAI API
4199
- import openai
4200
- client = openai.OpenAI(api_key=api_key)
4201
-
4202
- response = client.chat.completions.create(
4203
- model="gpt-3.5-turbo",
4204
- messages=[
4205
- {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
4206
- {"role": "user", "content": prompt}
4207
- ],
4208
- temperature=0.1,
4209
- max_tokens=2000
4210
- )
4211
-
4212
- result = response.choices[0].message.content.strip()
4213
-
4214
- # Parse the JSON response
4215
- import json
4216
- try:
4217
- processed_commands = json.loads(result)
4218
- if isinstance(processed_commands, list):
4219
- print(f"🔧 LLM preprocessed {len(processed_commands)} commands")
4220
- for i, cmd in enumerate(processed_commands):
4221
- if cmd != setup_commands[i]:
4222
- print(f" {i+1}. {setup_commands[i]} → {cmd}")
4223
- return processed_commands
4224
- else:
4225
- print("⚠️ LLM returned invalid format, using original commands")
4226
- return setup_commands
4227
- except json.JSONDecodeError:
4228
- print("⚠️ Failed to parse LLM response, using original commands")
4229
- return setup_commands
4230
-
4231
- except Exception as e:
4232
- print(f"⚠️ LLM preprocessing failed: {e}")
4233
- return setup_commands