gitarsenal-cli 1.5.4 → 1.5.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/python/test_modalSandboxScript.py +3 -7
- package/install.sh +0 -30
- package/python/MODAL_PROXY_README.md +0 -145
- package/python/README.md +0 -68
- package/python/fix_commands.py +0 -71
- package/python/fixed_function.py +0 -58
- package/python/manage_credentials.py +0 -119
- package/python/modal_logs_patch.py +0 -86
- package/python/patch_modal_script.py +0 -75
- package/python/run_with_modal_token.py +0 -47
- package/python/test_import.py +0 -55
- package/python/test_llm_debug.py +0 -120
- package/python/test_modalSandboxScript.py.bak +0 -3672
- package/python/test_modal_auth.py +0 -90
- package/python/test_token_cleanup.py +0 -256
- package/python/verify_env_vars.py +0 -64
@@ -1,47 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
"""
|
3
|
-
Run With Modal Token - Wrapper Script
|
4
|
-
|
5
|
-
This script sets up the Modal token in the environment and then runs a specified command.
|
6
|
-
It's useful for running commands that need Modal authentication without modifying them.
|
7
|
-
|
8
|
-
Usage:
|
9
|
-
python run_with_modal_token.py <command> [args...]
|
10
|
-
|
11
|
-
Example:
|
12
|
-
python run_with_modal_token.py python modal_proxy_service.py
|
13
|
-
"""
|
14
|
-
|
15
|
-
import os
|
16
|
-
import sys
|
17
|
-
import subprocess
|
18
|
-
from setup_modal_token import setup_modal_token
|
19
|
-
|
20
|
-
def main():
|
21
|
-
# Set up Modal token (should always succeed with built-in token)
|
22
|
-
setup_modal_token()
|
23
|
-
print("✅ Using built-in Modal token for freemium service")
|
24
|
-
|
25
|
-
# Check if a command was provided
|
26
|
-
if len(sys.argv) < 2:
|
27
|
-
print("❌ No command provided")
|
28
|
-
print(f"Usage: {sys.argv[0]} <command> [args...]")
|
29
|
-
sys.exit(1)
|
30
|
-
|
31
|
-
# Get the command and arguments
|
32
|
-
cmd = sys.argv[1]
|
33
|
-
args = sys.argv[2:]
|
34
|
-
|
35
|
-
# Print what we're about to run
|
36
|
-
print(f"🚀 Running command with Modal token: {cmd} {' '.join(args)}")
|
37
|
-
|
38
|
-
# Run the command
|
39
|
-
try:
|
40
|
-
result = subprocess.run([cmd] + args)
|
41
|
-
sys.exit(result.returncode)
|
42
|
-
except Exception as e:
|
43
|
-
print(f"❌ Error running command: {e}")
|
44
|
-
sys.exit(1)
|
45
|
-
|
46
|
-
if __name__ == "__main__":
|
47
|
-
main()
|
package/python/test_import.py
DELETED
@@ -1,55 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
"""
|
3
|
-
Test importing and using get_tokens from fetch_modal_tokens
|
4
|
-
"""
|
5
|
-
|
6
|
-
import os
|
7
|
-
import sys
|
8
|
-
|
9
|
-
def main():
|
10
|
-
"""Main function to test importing and using get_tokens"""
|
11
|
-
print("🔍 Testing import of get_tokens from fetch_modal_tokens...")
|
12
|
-
|
13
|
-
# Set a test API key for the script to use
|
14
|
-
os.environ["GITARSENAL_API_KEY"] = "test_key"
|
15
|
-
|
16
|
-
try:
|
17
|
-
# Import the function
|
18
|
-
from fetch_modal_tokens import get_tokens
|
19
|
-
print("✅ Successfully imported get_tokens from fetch_modal_tokens")
|
20
|
-
|
21
|
-
# Call the function
|
22
|
-
print("🔄 Calling get_tokens()...")
|
23
|
-
token_id, token_secret = get_tokens()
|
24
|
-
|
25
|
-
# Check the results
|
26
|
-
print("✅ Successfully called get_tokens()")
|
27
|
-
print(f" token_id: {token_id[:5]}...{token_id[-5:]}")
|
28
|
-
print(f" token_secret: {token_secret[:5]}...{token_secret[-5:]}")
|
29
|
-
|
30
|
-
# Check if environment variables were set
|
31
|
-
if os.environ.get("MODAL_TOKEN_ID") == token_id:
|
32
|
-
print("✅ MODAL_TOKEN_ID environment variable set correctly")
|
33
|
-
else:
|
34
|
-
print("❌ MODAL_TOKEN_ID environment variable not set correctly")
|
35
|
-
|
36
|
-
if os.environ.get("MODAL_TOKEN_SECRET") == token_secret:
|
37
|
-
print("✅ MODAL_TOKEN_SECRET environment variable set correctly")
|
38
|
-
else:
|
39
|
-
print("❌ MODAL_TOKEN_SECRET environment variable not set correctly")
|
40
|
-
|
41
|
-
# Check if OPENAI_API_KEY was set
|
42
|
-
if os.environ.get("OPENAI_API_KEY"):
|
43
|
-
print("✅ OPENAI_API_KEY environment variable set")
|
44
|
-
print(f" length: {len(os.environ.get('OPENAI_API_KEY'))}")
|
45
|
-
else:
|
46
|
-
print("❌ OPENAI_API_KEY environment variable not set")
|
47
|
-
|
48
|
-
return True
|
49
|
-
except Exception as e:
|
50
|
-
print(f"❌ Error: {e}")
|
51
|
-
return False
|
52
|
-
|
53
|
-
if __name__ == "__main__":
|
54
|
-
success = main()
|
55
|
-
sys.exit(0 if success else 1)
|
package/python/test_llm_debug.py
DELETED
@@ -1,120 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
"""
|
3
|
-
Test script to verify LLM debugging functionality
|
4
|
-
"""
|
5
|
-
|
6
|
-
import os
|
7
|
-
import sys
|
8
|
-
import requests
|
9
|
-
import getpass
|
10
|
-
|
11
|
-
def test_openai_connection():
|
12
|
-
"""Test if we can connect to OpenAI API"""
|
13
|
-
print("🔍 Testing OpenAI API connection...")
|
14
|
-
|
15
|
-
# Try to get API key
|
16
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
17
|
-
if not api_key:
|
18
|
-
print("❌ No OPENAI_API_KEY environment variable found")
|
19
|
-
print("💡 Please set your OpenAI API key:")
|
20
|
-
print(" export OPENAI_API_KEY='your-api-key-here'")
|
21
|
-
return False
|
22
|
-
|
23
|
-
print(f"✅ Found API key (length: {len(api_key)})")
|
24
|
-
|
25
|
-
# Test API connection
|
26
|
-
headers = {
|
27
|
-
"Content-Type": "application/json",
|
28
|
-
"Authorization": f"Bearer {api_key}"
|
29
|
-
}
|
30
|
-
|
31
|
-
payload = {
|
32
|
-
"model": "gpt-4o-mini",
|
33
|
-
"messages": [
|
34
|
-
{"role": "user", "content": "Say 'Hello, LLM debugging is working!'"}
|
35
|
-
],
|
36
|
-
"max_tokens": 50
|
37
|
-
}
|
38
|
-
|
39
|
-
try:
|
40
|
-
print("🤖 Testing API call...")
|
41
|
-
response = requests.post(
|
42
|
-
"https://api.openai.com/v1/chat/completions",
|
43
|
-
headers=headers,
|
44
|
-
json=payload,
|
45
|
-
timeout=30
|
46
|
-
)
|
47
|
-
|
48
|
-
if response.status_code == 200:
|
49
|
-
result = response.json()
|
50
|
-
message = result["choices"][0]["message"]["content"]
|
51
|
-
print(f"✅ API test successful: {message}")
|
52
|
-
return True
|
53
|
-
else:
|
54
|
-
print(f"❌ API test failed with status code: {response.status_code}")
|
55
|
-
print(f"Response: {response.text}")
|
56
|
-
return False
|
57
|
-
|
58
|
-
except Exception as e:
|
59
|
-
print(f"❌ API test failed with exception: {e}")
|
60
|
-
return False
|
61
|
-
|
62
|
-
def test_llm_debug_function():
|
63
|
-
"""Test the LLM debug function from the main script"""
|
64
|
-
print("\n🔍 Testing LLM debug function...")
|
65
|
-
|
66
|
-
# Import the function from the main script
|
67
|
-
try:
|
68
|
-
# Add the current directory to Python path
|
69
|
-
sys.path.insert(0, os.path.dirname(__file__))
|
70
|
-
|
71
|
-
# Import the function
|
72
|
-
from test_modalSandboxScript import call_openai_for_debug
|
73
|
-
|
74
|
-
# Test with a simple command failure
|
75
|
-
test_command = "ls /nonexistent/directory"
|
76
|
-
test_error = "ls: cannot access '/nonexistent/directory': No such file or directory"
|
77
|
-
|
78
|
-
print(f"🧪 Testing with command: {test_command}")
|
79
|
-
print(f"🧪 Error: {test_error}")
|
80
|
-
|
81
|
-
result = call_openai_for_debug(test_command, test_error)
|
82
|
-
|
83
|
-
if result:
|
84
|
-
print(f"✅ LLM debug function returned: {result}")
|
85
|
-
return True
|
86
|
-
else:
|
87
|
-
print("❌ LLM debug function returned None")
|
88
|
-
return False
|
89
|
-
|
90
|
-
except Exception as e:
|
91
|
-
print(f"❌ Error testing LLM debug function: {e}")
|
92
|
-
return False
|
93
|
-
|
94
|
-
if __name__ == "__main__":
|
95
|
-
print("🧪 Testing LLM debugging functionality...")
|
96
|
-
print("=" * 60)
|
97
|
-
|
98
|
-
# Test 1: OpenAI API connection
|
99
|
-
api_ok = test_openai_connection()
|
100
|
-
|
101
|
-
# Test 2: LLM debug function
|
102
|
-
if api_ok:
|
103
|
-
debug_ok = test_llm_debug_function()
|
104
|
-
else:
|
105
|
-
debug_ok = False
|
106
|
-
|
107
|
-
print("\n" + "=" * 60)
|
108
|
-
print("📊 Test Results:")
|
109
|
-
print(f" OpenAI API Connection: {'✅ PASS' if api_ok else '❌ FAIL'}")
|
110
|
-
print(f" LLM Debug Function: {'✅ PASS' if debug_ok else '❌ FAIL'}")
|
111
|
-
|
112
|
-
if api_ok and debug_ok:
|
113
|
-
print("\n🎉 All tests passed! LLM debugging should work.")
|
114
|
-
else:
|
115
|
-
print("\n⚠️ Some tests failed. LLM debugging may not work properly.")
|
116
|
-
if not api_ok:
|
117
|
-
print("💡 To fix OpenAI API issues:")
|
118
|
-
print(" 1. Get an API key from https://platform.openai.com/api-keys")
|
119
|
-
print(" 2. Set it as environment variable: export OPENAI_API_KEY='your-key'")
|
120
|
-
print(" 3. Run this test again")
|