gitarsenal-cli 1.5.4 โ†’ 1.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,120 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Test script to verify LLM debugging functionality
4
- """
5
-
6
- import os
7
- import sys
8
- import requests
9
- import getpass
10
-
11
- def test_openai_connection():
12
- """Test if we can connect to OpenAI API"""
13
- print("๐Ÿ” Testing OpenAI API connection...")
14
-
15
- # Try to get API key
16
- api_key = os.environ.get("OPENAI_API_KEY")
17
- if not api_key:
18
- print("โŒ No OPENAI_API_KEY environment variable found")
19
- print("๐Ÿ’ก Please set your OpenAI API key:")
20
- print(" export OPENAI_API_KEY='your-api-key-here'")
21
- return False
22
-
23
- print(f"โœ… Found API key (length: {len(api_key)})")
24
-
25
- # Test API connection
26
- headers = {
27
- "Content-Type": "application/json",
28
- "Authorization": f"Bearer {api_key}"
29
- }
30
-
31
- payload = {
32
- "model": "gpt-4o-mini",
33
- "messages": [
34
- {"role": "user", "content": "Say 'Hello, LLM debugging is working!'"}
35
- ],
36
- "max_tokens": 50
37
- }
38
-
39
- try:
40
- print("๐Ÿค– Testing API call...")
41
- response = requests.post(
42
- "https://api.openai.com/v1/chat/completions",
43
- headers=headers,
44
- json=payload,
45
- timeout=30
46
- )
47
-
48
- if response.status_code == 200:
49
- result = response.json()
50
- message = result["choices"][0]["message"]["content"]
51
- print(f"โœ… API test successful: {message}")
52
- return True
53
- else:
54
- print(f"โŒ API test failed with status code: {response.status_code}")
55
- print(f"Response: {response.text}")
56
- return False
57
-
58
- except Exception as e:
59
- print(f"โŒ API test failed with exception: {e}")
60
- return False
61
-
62
- def test_llm_debug_function():
63
- """Test the LLM debug function from the main script"""
64
- print("\n๐Ÿ” Testing LLM debug function...")
65
-
66
- # Import the function from the main script
67
- try:
68
- # Add the current directory to Python path
69
- sys.path.insert(0, os.path.dirname(__file__))
70
-
71
- # Import the function
72
- from test_modalSandboxScript import call_openai_for_debug
73
-
74
- # Test with a simple command failure
75
- test_command = "ls /nonexistent/directory"
76
- test_error = "ls: cannot access '/nonexistent/directory': No such file or directory"
77
-
78
- print(f"๐Ÿงช Testing with command: {test_command}")
79
- print(f"๐Ÿงช Error: {test_error}")
80
-
81
- result = call_openai_for_debug(test_command, test_error)
82
-
83
- if result:
84
- print(f"โœ… LLM debug function returned: {result}")
85
- return True
86
- else:
87
- print("โŒ LLM debug function returned None")
88
- return False
89
-
90
- except Exception as e:
91
- print(f"โŒ Error testing LLM debug function: {e}")
92
- return False
93
-
94
- if __name__ == "__main__":
95
- print("๐Ÿงช Testing LLM debugging functionality...")
96
- print("=" * 60)
97
-
98
- # Test 1: OpenAI API connection
99
- api_ok = test_openai_connection()
100
-
101
- # Test 2: LLM debug function
102
- if api_ok:
103
- debug_ok = test_llm_debug_function()
104
- else:
105
- debug_ok = False
106
-
107
- print("\n" + "=" * 60)
108
- print("๐Ÿ“Š Test Results:")
109
- print(f" OpenAI API Connection: {'โœ… PASS' if api_ok else 'โŒ FAIL'}")
110
- print(f" LLM Debug Function: {'โœ… PASS' if debug_ok else 'โŒ FAIL'}")
111
-
112
- if api_ok and debug_ok:
113
- print("\n๐ŸŽ‰ All tests passed! LLM debugging should work.")
114
- else:
115
- print("\nโš ๏ธ Some tests failed. LLM debugging may not work properly.")
116
- if not api_ok:
117
- print("๐Ÿ’ก To fix OpenAI API issues:")
118
- print(" 1. Get an API key from https://platform.openai.com/api-keys")
119
- print(" 2. Set it as environment variable: export OPENAI_API_KEY='your-key'")
120
- print(" 3. Run this test again")