issuer 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +7 -0
  2. data/.rspec +3 -0
  3. data/.vale/config/vocabularies/issuer/accept.txt +63 -0
  4. data/.vale/config/vocabularies/issuer/reject.txt +21 -0
  5. data/.vale.ini +42 -0
  6. data/Dockerfile +43 -0
  7. data/LICENSE +21 -0
  8. data/README.adoc +539 -0
  9. data/Rakefile +70 -0
  10. data/bin/console +0 -0
  11. data/bin/issuer +13 -0
  12. data/bin/setup +0 -0
  13. data/examples/README.adoc +56 -0
  14. data/examples/advanced-stub-example.yml +50 -0
  15. data/examples/basic-example.yml +33 -0
  16. data/examples/minimal-example.yml +9 -0
  17. data/examples/new-project-issues.yml +162 -0
  18. data/examples/validation-test.yml +8 -0
  19. data/exe/issuer +5 -0
  20. data/issuer.gemspec +43 -0
  21. data/lib/issuer/apis/github/client.rb +124 -0
  22. data/lib/issuer/cache.rb +197 -0
  23. data/lib/issuer/cli.rb +241 -0
  24. data/lib/issuer/issue.rb +393 -0
  25. data/lib/issuer/ops.rb +281 -0
  26. data/lib/issuer/sites/base.rb +109 -0
  27. data/lib/issuer/sites/factory.rb +31 -0
  28. data/lib/issuer/sites/github.rb +248 -0
  29. data/lib/issuer/version.rb +21 -0
  30. data/lib/issuer.rb +238 -0
  31. data/scripts/build.sh +40 -0
  32. data/scripts/lint-docs.sh +64 -0
  33. data/scripts/manage-runs.rb +175 -0
  34. data/scripts/pre-commit-template.sh +54 -0
  35. data/scripts/publish.sh +92 -0
  36. data/scripts/setup-vale.sh +59 -0
  37. data/specs/tests/README.adoc +451 -0
  38. data/specs/tests/check-github-connectivity.sh +130 -0
  39. data/specs/tests/cleanup-github-tests.sh +374 -0
  40. data/specs/tests/github-api/01-auth-connection.yml +21 -0
  41. data/specs/tests/github-api/02-basic-issues.yml +90 -0
  42. data/specs/tests/github-api/03-milestone-tests.yml +58 -0
  43. data/specs/tests/github-api/04-label-tests.yml +98 -0
  44. data/specs/tests/github-api/05-assignment-tests.yml +55 -0
  45. data/specs/tests/github-api/06-automation-tests.yml +102 -0
  46. data/specs/tests/github-api/07-error-tests.yml +29 -0
  47. data/specs/tests/github-api/08-complex-tests.yml +197 -0
  48. data/specs/tests/github-api/config.yml.example +17 -0
  49. data/specs/tests/rspec/cli_spec.rb +127 -0
  50. data/specs/tests/rspec/issue_spec.rb +184 -0
  51. data/specs/tests/rspec/issuer_spec.rb +5 -0
  52. data/specs/tests/rspec/ops_spec.rb +124 -0
  53. data/specs/tests/rspec/spec_helper.rb +54 -0
  54. data/specs/tests/run-github-api-tests.sh +424 -0
  55. metadata +200 -0
@@ -0,0 +1,124 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "spec_helper"
4
+
5
+ RSpec.describe Issuer::Ops do
6
+ describe '.process_issues_data' do
7
+ it 'handles scalar string issues' do
8
+ issues_data = ['String issue 1', 'String issue 2']
9
+ defaults = {'vrsn' => '1.0.0'}
10
+
11
+ issues = Issuer::Ops.process_issues_data(issues_data, defaults)
12
+
13
+ expect(issues.length).to eq(2)
14
+ expect(issues[0].summ).to eq('String issue 1')
15
+ expect(issues[1].summ).to eq('String issue 2')
16
+ expect(issues[0].vrsn).to eq('1.0.0')
17
+ end
18
+
19
+ it 'handles mixed scalar and hash issues' do
20
+ issues_data = [
21
+ 'String issue',
22
+ {'summ' => 'Hash issue', 'body' => 'Custom body'}
23
+ ]
24
+ defaults = {'user' => 'testuser'}
25
+
26
+ issues = Issuer::Ops.process_issues_data(issues_data, defaults)
27
+
28
+ expect(issues.length).to eq(2)
29
+ expect(issues[0].summ).to eq('String issue')
30
+ expect(issues[1].summ).to eq('Hash issue')
31
+ expect(issues[1].body).to eq('Custom body')
32
+ end
33
+ end
34
+
35
+ describe '.apply_tag_logic' do
36
+ let(:issues) do
37
+ [
38
+ Issuer::Issue.new({'summ' => 'Issue with no tags'}, {}),
39
+ Issuer::Issue.new({'summ' => 'Issue with tags', 'tags' => ['bug']}, {}),
40
+ Issuer::Issue.new({'summ' => 'Issue with append tag', 'tags' => ['+critical']}, {})
41
+ ]
42
+ end
43
+
44
+ it 'applies append tags to all issues' do
45
+ result = Issuer::Ops.apply_tag_logic(issues, '+urgent,docs')
46
+
47
+ expect(result[0].tags).to include('urgent')
48
+ expect(result[1].tags).to include('urgent')
49
+ expect(result[2].tags).to include('urgent')
50
+ end
51
+
52
+ it 'applies default tags only to issues without explicit tags' do
53
+ result = Issuer::Ops.apply_tag_logic(issues, '+urgent,docs')
54
+
55
+ # Issue with no tags should get default tags
56
+ expect(result[0].tags).to include('docs')
57
+
58
+ # Issue with explicit tags should not get default tags
59
+ expect(result[1].tags).not_to include('docs')
60
+ expect(result[1].tags).to include('bug')
61
+ end
62
+
63
+ it 'processes + prefix correctly in existing tags' do
64
+ result = Issuer::Ops.apply_tag_logic(issues, 'docs')
65
+
66
+ # Issue with +critical should have critical (without +) and docs
67
+ expect(result[2].tags).to include('critical')
68
+ expect(result[2].tags).to include('docs')
69
+ expect(result[2].tags).not_to include('+critical')
70
+ end
71
+ end
72
+
73
+ describe '.apply_stub_logic' do
74
+ let(:defaults) do
75
+ {
76
+ 'stub' => true,
77
+ 'head' => 'HEADER',
78
+ 'body' => 'DEFAULT BODY',
79
+ 'tail' => 'FOOTER'
80
+ }
81
+ end
82
+
83
+ it 'applies stub components when stub is true' do
84
+ issues = [
85
+ Issuer::Issue.new({'summ' => 'Test', 'stub' => true}, defaults)
86
+ ]
87
+
88
+ result = Issuer::Ops.apply_stub_logic(issues, defaults)
89
+
90
+ expect(result[0].body).to eq("HEADER\nDEFAULT BODY\nFOOTER")
91
+ end
92
+
93
+ it 'does not apply stub components when stub is false' do
94
+ issues = [
95
+ Issuer::Issue.new({'summ' => 'Test', 'body' => 'Custom', 'stub' => false}, defaults)
96
+ ]
97
+
98
+ result = Issuer::Ops.apply_stub_logic(issues, defaults)
99
+
100
+ expect(result[0].body).to eq('Custom')
101
+ end
102
+
103
+ it 'uses default stub setting when issue-level stub is not specified' do
104
+ issues = [
105
+ Issuer::Issue.new({'summ' => 'Test'}, defaults)
106
+ ]
107
+
108
+ result = Issuer::Ops.apply_stub_logic(issues, defaults)
109
+
110
+ expect(result[0].body).to eq("HEADER\nDEFAULT BODY\nFOOTER")
111
+ end
112
+
113
+ it 'handles missing stub components gracefully' do
114
+ minimal_defaults = {'stub' => true, 'body' => 'BODY ONLY'}
115
+ issues = [
116
+ Issuer::Issue.new({'summ' => 'Test'}, minimal_defaults)
117
+ ]
118
+
119
+ result = Issuer::Ops.apply_stub_logic(issues, minimal_defaults)
120
+
121
+ expect(result[0].body).to eq('BODY ONLY')
122
+ end
123
+ end
124
+ end
@@ -0,0 +1,54 @@
1
+ require "bundler/setup"
2
+ require "issuer"
3
+ require "yaml"
4
+ require "tempfile"
5
+
6
+ RSpec.configure do |config|
7
+ # Enable flags like --only-failures and --next-failure
8
+ config.example_status_persistence_file_path = ".rspec_status"
9
+
10
+ # Disable RSpec exposing methods globally on `main`
11
+ config.disable_monkey_patching!
12
+
13
+ config.expect_with :rspec do |c|
14
+ c.syntax = :expect
15
+ end
16
+ end
17
+
18
+ # Helper methods for tests
19
+ def create_temp_yaml_file content
20
+ file = Tempfile.new(['test', '.yml'])
21
+ if content.is_a?(Hash)
22
+ file.write(YAML.dump(content))
23
+ else
24
+ file.write(content)
25
+ end
26
+ file.close
27
+ file.path
28
+ end
29
+
30
+ def sample_imyml_content
31
+ {
32
+ '$meta' => {
33
+ 'proj' => 'test/repo',
34
+ 'defaults' => {
35
+ 'vrsn' => '1.0.0',
36
+ 'user' => 'testuser',
37
+ 'tags' => ['enhancement', '+automated'],
38
+ 'stub' => true,
39
+ 'head' => 'HEADER TEXT',
40
+ 'body' => 'DEFAULT BODY',
41
+ 'tail' => 'FOOTER TEXT'
42
+ }
43
+ },
44
+ 'issues' => [
45
+ {
46
+ 'summ' => 'Test issue 1',
47
+ 'body' => 'Test description',
48
+ 'tags' => ['bug'],
49
+ 'stub' => false
50
+ },
51
+ 'Simple string issue'
52
+ ]
53
+ }
54
+ end
@@ -0,0 +1,424 @@
1
+ #!/usr/bin/env zsh
2
+ # GitHub API Integration Test Runner
3
+ # Comprehensive testing suite for issuer CLI GitHub integration
4
+
5
+ set -e
6
+
7
+ # Colors for output
8
+ RED='\033[0;31m'
9
+ GREEN='\033[0;32m'
10
+ YELLOW='\033[1;33m'
11
+ BLUE='\033[0;34m'
12
+ PURPLE='\033[0;35m'
13
+ CYAN='\033[0;36m'
14
+ NC='\033[0m' # No Color
15
+
16
+ # Configuration
17
+ TEST_DIR="$(dirname "$0")"
18
+ TESTS_DIR="${TEST_DIR}/github-api"
19
+ CONFIG_FILE="${TESTS_DIR}/config.yml"
20
+ RESULTS_DIR="${TEST_DIR}/results"
21
+ TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
22
+ RESULTS_FILE="${RESULTS_DIR}/test_results_${TIMESTAMP}.log"
23
+
24
+ # Default configuration (override with config.yml)
25
+ TEST_REPO=""
26
+ TEST_USERNAME=""
27
+ DRY_RUN_FIRST=true
28
+ CLEANUP_AFTER_TESTS=false
29
+ VERBOSE=false
30
+ INTERACTIVE=true
31
+
32
+ # Test tracking
33
+ TOTAL_TESTS=0
34
+ PASSED_TESTS=0
35
+ FAILED_TESTS=0
36
+ SKIPPED_TESTS=0
37
+
38
+ # Functions
39
+ print_header() {
40
+ echo
41
+ echo -e "${BLUE}===========================================${NC}"
42
+ echo -e "${BLUE} 🧪 GitHub API Integration Test Suite ${NC}"
43
+ echo -e "${BLUE}===========================================${NC}"
44
+ echo
45
+ }
46
+
47
+ print_section() {
48
+ echo
49
+ echo -e "${PURPLE}📋 $1${NC}"
50
+ echo -e "${PURPLE}$(printf '=%.0s' {1..50})${NC}"
51
+ }
52
+
53
+ print_test() {
54
+ echo -e "${CYAN}🔍 Test: $1${NC}"
55
+ }
56
+
57
+ print_success() {
58
+ echo -e "${GREEN}✅ $1${NC}"
59
+ }
60
+
61
+ print_error() {
62
+ echo -e "${RED}❌ $1${NC}"
63
+ }
64
+
65
+ print_warning() {
66
+ echo -e "${YELLOW}⚠️ $1${NC}"
67
+ }
68
+
69
+ print_info() {
70
+ echo -e "${BLUE}ℹ️ $1${NC}"
71
+ }
72
+
73
+ load_config() {
74
+ if [[ -f "$CONFIG_FILE" ]]; then
75
+ print_info "Loading configuration from $CONFIG_FILE"
76
+ # Parse YAML config (simple parsing for our needs)
77
+ while IFS=': ' read -r key value; do
78
+ case "$key" in
79
+ "test_repo")
80
+ TEST_REPO=$(echo "$value" | tr -d '"' | tr -d "'")
81
+ ;;
82
+ "test_username")
83
+ TEST_USERNAME=$(echo "$value" | tr -d '"' | tr -d "'")
84
+ ;;
85
+ "dry_run_first")
86
+ DRY_RUN_FIRST=$(echo "$value" | tr -d '"' | tr -d "'")
87
+ ;;
88
+ "cleanup_after_tests")
89
+ CLEANUP_AFTER_TESTS=$(echo "$value" | tr -d '"' | tr -d "'")
90
+ ;;
91
+ "verbose_output")
92
+ VERBOSE=$(echo "$value" | tr -d '"' | tr -d "'")
93
+ ;;
94
+ esac
95
+ done < "$CONFIG_FILE"
96
+ else
97
+ print_warning "No config file found at $CONFIG_FILE"
98
+ print_info "Run: cp ${TESTS_DIR}/config.yml.example ${CONFIG_FILE}"
99
+ print_info "Then edit the config file with your test repository details"
100
+ fi
101
+ }
102
+
103
+ validate_config() {
104
+ local errors=0
105
+
106
+ if [[ -z "$TEST_REPO" ]]; then
107
+ print_error "TEST_REPO not configured"
108
+ errors=$((errors + 1))
109
+ fi
110
+
111
+ if [[ -z "$TEST_USERNAME" ]]; then
112
+ print_error "TEST_USERNAME not configured"
113
+ errors=$((errors + 1))
114
+ fi
115
+
116
+ # Check for GitHub token
117
+ if [[ -z "$GITHUB_TOKEN" && -z "$GITHUB_ACCESS_TOKEN" && -z "$ISSUER_API_TOKEN" && -z "$ISSUER_GITHUB_TOKEN" ]]; then
118
+ print_error "No GitHub token found in environment variables"
119
+ print_info "Set one of: GITHUB_TOKEN, GITHUB_ACCESS_TOKEN, ISSUER_API_TOKEN, ISSUER_GITHUB_TOKEN"
120
+ errors=$((errors + 1))
121
+ fi
122
+
123
+ return $errors
124
+ }
125
+
126
+ update_test_files() {
127
+ print_section "Updating test files with configuration"
128
+
129
+ # Update all test files with the correct repository and username
130
+ for test_file in "${TESTS_DIR}"/*.yml; do
131
+ if [[ -f "$test_file" ]]; then
132
+ # Create a backup
133
+ cp "$test_file" "${test_file}.bak"
134
+
135
+ # Update repository
136
+ sed -i "s|your-username/issuer-test-repo|${TEST_REPO}|g" "$test_file"
137
+
138
+ # Update username
139
+ sed -i "s|your-username|${TEST_USERNAME}|g" "$test_file"
140
+
141
+ print_info "Updated $(basename "$test_file")"
142
+ fi
143
+ done
144
+ }
145
+
146
+ restore_test_files() {
147
+ print_info "Restoring original test files"
148
+ for backup_file in "${TESTS_DIR}"/*.yml.bak; do
149
+ if [[ -f "$backup_file" ]]; then
150
+ original_file="${backup_file%.bak}"
151
+ mv "$backup_file" "$original_file"
152
+ fi
153
+ done
154
+ }
155
+
156
+ run_single_test() {
157
+ local test_file="$1"
158
+ local test_name="$2"
159
+ local flags="$3"
160
+ local should_succeed="$4"
161
+
162
+ TOTAL_TESTS=$((TOTAL_TESTS + 1))
163
+
164
+ print_test "Running $test_name"
165
+
166
+ # Create results directory if it doesn't exist
167
+ mkdir -p "$RESULTS_DIR"
168
+
169
+ # Run the test
170
+ local output_file="${RESULTS_DIR}/$(basename "$test_file" .yml)_${TIMESTAMP}.log"
171
+
172
+ if [[ "$VERBOSE" == "true" ]]; then
173
+ echo "Command: bundle exec ruby -I lib exe/issuer \"$test_file\" $flags"
174
+ fi
175
+
176
+ # Execute the command and capture output with timeout
177
+ if timeout 120 bundle exec ruby -I lib exe/issuer "$test_file" $flags > "$output_file" 2>&1; then
178
+ local exit_code=0
179
+ else
180
+ local exit_code=$?
181
+ if [[ $exit_code -eq 124 ]]; then
182
+ echo "Test timed out after 120 seconds" >> "$output_file"
183
+ print_error "$test_name timed out (120s timeout)"
184
+ fi
185
+ fi
186
+
187
+ # Check results
188
+ if [[ "$should_succeed" == "true" ]]; then
189
+ if [[ $exit_code -eq 0 ]]; then
190
+ print_success "$test_name passed"
191
+ PASSED_TESTS=$((PASSED_TESTS + 1))
192
+ echo "✅ PASS: $test_name" >> "$RESULTS_FILE"
193
+ else
194
+ print_error "$test_name failed (exit code: $exit_code)"
195
+ FAILED_TESTS=$((FAILED_TESTS + 1))
196
+ echo "❌ FAIL: $test_name (exit code: $exit_code)" >> "$RESULTS_FILE"
197
+ if [[ "$VERBOSE" == "true" ]]; then
198
+ echo "Output:"
199
+ cat "$output_file"
200
+ fi
201
+ fi
202
+ else
203
+ # Test should fail (e.g., error tests)
204
+ if [[ $exit_code -ne 0 ]]; then
205
+ print_success "$test_name failed as expected"
206
+ PASSED_TESTS=$((PASSED_TESTS + 1))
207
+ echo "✅ PASS: $test_name (failed as expected)" >> "$RESULTS_FILE"
208
+ else
209
+ print_error "$test_name should have failed but succeeded"
210
+ FAILED_TESTS=$((FAILED_TESTS + 1))
211
+ echo "❌ FAIL: $test_name (should have failed)" >> "$RESULTS_FILE"
212
+ fi
213
+ fi
214
+
215
+ # Save detailed output
216
+ echo "=== $test_name ===" >> "$RESULTS_FILE"
217
+ cat "$output_file" >> "$RESULTS_FILE"
218
+ echo >> "$RESULTS_FILE"
219
+ }
220
+
221
+ run_test_suite() {
222
+ print_section "Running Test Suite"
223
+
224
+ # Initialize results file
225
+ mkdir -p "$RESULTS_DIR"
226
+ echo "GitHub API Integration Test Results" > "$RESULTS_FILE"
227
+ echo "Timestamp: $(date)" >> "$RESULTS_FILE"
228
+ echo "Test Repository: $TEST_REPO" >> "$RESULTS_FILE"
229
+ echo "Test Username: $TEST_USERNAME" >> "$RESULTS_FILE"
230
+ echo "======================================" >> "$RESULTS_FILE"
231
+ echo >> "$RESULTS_FILE"
232
+
233
+ # Test 1: Authentication and Connection (dry-run first)
234
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
235
+ run_single_test "${TESTS_DIR}/01-auth-connection.yml" "01-auth-connection (dry-run)" "--dry" true
236
+ fi
237
+ run_single_test "${TESTS_DIR}/01-auth-connection.yml" "01-auth-connection" "" true
238
+
239
+ # Test 2: Basic Issues (dry-run first)
240
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
241
+ run_single_test "${TESTS_DIR}/02-basic-issues.yml" "02-basic-issues (dry-run)" "--dry" true
242
+ fi
243
+ run_single_test "${TESTS_DIR}/02-basic-issues.yml" "02-basic-issues" "" true
244
+
245
+ # Test 3: Milestone Tests
246
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
247
+ run_single_test "${TESTS_DIR}/03-milestone-tests.yml" "03-milestone-tests (dry-run)" "--dry" true
248
+ fi
249
+ run_single_test "${TESTS_DIR}/03-milestone-tests.yml" "03-milestone-tests (manual)" "" true
250
+ run_single_test "${TESTS_DIR}/03-milestone-tests.yml" "03-milestone-tests (auto)" "--auto-metadata" true
251
+
252
+ # Test 4: Label Tests
253
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
254
+ run_single_test "${TESTS_DIR}/04-label-tests.yml" "04-label-tests (dry-run)" "--dry" true
255
+ fi
256
+ run_single_test "${TESTS_DIR}/04-label-tests.yml" "04-label-tests (manual)" "" true
257
+ run_single_test "${TESTS_DIR}/04-label-tests.yml" "04-label-tests (auto)" "--auto-metadata" true
258
+
259
+ # Test 5: Assignment Tests
260
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
261
+ run_single_test "${TESTS_DIR}/05-assignment-tests.yml" "05-assignment-tests (dry-run)" "--dry" true
262
+ fi
263
+ run_single_test "${TESTS_DIR}/05-assignment-tests.yml" "05-assignment-tests" "" true
264
+
265
+ # Test 6: Automation Tests
266
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
267
+ run_single_test "${TESTS_DIR}/06-automation-tests.yml" "06-automation-tests (dry-run)" "--dry" true
268
+ fi
269
+ run_single_test "${TESTS_DIR}/06-automation-tests.yml" "06-automation-tests (--auto-metadata)" "--auto-metadata" true
270
+ run_single_test "${TESTS_DIR}/06-automation-tests.yml" "06-automation-tests (--auto-versions --auto-tags)" "--auto-versions --auto-tags" true
271
+ run_single_test "${TESTS_DIR}/06-automation-tests.yml" "06-automation-tests (--auto-milestones --auto-labels)" "--auto-milestones --auto-labels" true
272
+
273
+ # Test 7: Error Tests (should fail)
274
+ run_single_test "${TESTS_DIR}/07-error-tests.yml" "07-error-tests (should fail)" "" false
275
+ run_single_test "${TESTS_DIR}/07-error-tests.yml" "07-error-tests (dry-run should fail)" "--dry" false
276
+
277
+ # Test 8: Complex Tests
278
+ if [[ "$DRY_RUN_FIRST" == "true" ]]; then
279
+ run_single_test "${TESTS_DIR}/08-complex-tests.yml" "08-complex-tests (dry-run)" "--dry" true
280
+ fi
281
+ run_single_test "${TESTS_DIR}/08-complex-tests.yml" "08-complex-tests (auto)" "--auto-metadata" true
282
+ }
283
+
284
+ print_summary() {
285
+ print_section "Test Results Summary"
286
+
287
+ echo -e "${BLUE}Total Tests: ${TOTAL_TESTS}${NC}"
288
+ echo -e "${GREEN}Passed: ${PASSED_TESTS}${NC}"
289
+ echo -e "${RED}Failed: ${FAILED_TESTS}${NC}"
290
+ echo -e "${YELLOW}Skipped: ${SKIPPED_TESTS}${NC}"
291
+ echo
292
+
293
+ local success_rate=$((PASSED_TESTS * 100 / TOTAL_TESTS))
294
+ echo -e "${BLUE}Success Rate: ${success_rate}%${NC}"
295
+ echo
296
+
297
+ if [[ $FAILED_TESTS -eq 0 ]]; then
298
+ print_success "All tests passed! 🎉"
299
+ else
300
+ print_error "Some tests failed. Check the results file: $RESULTS_FILE"
301
+ fi
302
+
303
+ echo
304
+ print_info "Detailed results saved to: $RESULTS_FILE"
305
+
306
+ # Add summary to results file
307
+ echo >> "$RESULTS_FILE"
308
+ echo "=== SUMMARY ===" >> "$RESULTS_FILE"
309
+ echo "Total Tests: $TOTAL_TESTS" >> "$RESULTS_FILE"
310
+ echo "Passed: $PASSED_TESTS" >> "$RESULTS_FILE"
311
+ echo "Failed: $FAILED_TESTS" >> "$RESULTS_FILE"
312
+ echo "Success Rate: ${success_rate}%" >> "$RESULTS_FILE"
313
+ }
314
+
315
+ cleanup_tests() {
316
+ if [[ "$CLEANUP_AFTER_TESTS" == "true" ]]; then
317
+ print_section "Cleaning up test artifacts"
318
+ print_warning "Cleanup functionality not yet implemented"
319
+ print_info "You may want to manually close/delete test issues in the repository"
320
+ fi
321
+ }
322
+
323
+ main() {
324
+ # Handle command line arguments
325
+ while [[ $# -gt 0 ]]; do
326
+ case $1 in
327
+ --help|-h)
328
+ echo "GitHub API Integration Test Runner"
329
+ echo
330
+ echo "Usage: $0 [options]"
331
+ echo
332
+ echo "Options:"
333
+ echo " --help, -h Show this help message"
334
+ echo " --config FILE Use specific config file"
335
+ echo " --verbose, -v Verbose output"
336
+ echo " --no-dry-run Skip dry-run tests"
337
+ echo " --cleanup Clean up after tests"
338
+ echo " --non-interactive Run without prompts"
339
+ echo
340
+ echo "Environment Variables:"
341
+ echo " GITHUB_TOKEN GitHub personal access token"
342
+ echo " GITHUB_ACCESS_TOKEN Alternative token variable"
343
+ echo " ISSUER_API_TOKEN Issuer-specific token variable"
344
+ echo
345
+ exit 0
346
+ ;;
347
+ --config)
348
+ CONFIG_FILE="$2"
349
+ shift 2
350
+ ;;
351
+ --verbose|-v)
352
+ VERBOSE=true
353
+ shift
354
+ ;;
355
+ --no-dry-run)
356
+ DRY_RUN_FIRST=false
357
+ shift
358
+ ;;
359
+ --cleanup)
360
+ CLEANUP_AFTER_TESTS=true
361
+ shift
362
+ ;;
363
+ --non-interactive)
364
+ INTERACTIVE=false
365
+ shift
366
+ ;;
367
+ *)
368
+ print_error "Unknown option: $1"
369
+ exit 1
370
+ ;;
371
+ esac
372
+ done
373
+
374
+ print_header
375
+
376
+ # Load and validate configuration
377
+ load_config
378
+
379
+ if ! validate_config; then
380
+ print_error "Configuration validation failed"
381
+ print_info "Please check your configuration and try again"
382
+ exit 1
383
+ fi
384
+
385
+ print_info "Test Repository: $TEST_REPO"
386
+ print_info "Test Username: $TEST_USERNAME"
387
+ print_info "Dry-run first: $DRY_RUN_FIRST"
388
+ print_info "Cleanup after: $CLEANUP_AFTER_TESTS"
389
+
390
+ # Interactive confirmation
391
+ if [[ "$INTERACTIVE" == "true" ]]; then
392
+ echo
393
+ echo -n "Proceed with running tests? [y/N] "
394
+ read -r response
395
+ if [[ ! "$response" =~ ^[Yy]$ ]]; then
396
+ print_info "Tests cancelled by user"
397
+ exit 0
398
+ fi
399
+ fi
400
+
401
+ # Update test files with configuration
402
+ update_test_files
403
+
404
+ # Trap to restore files on exit
405
+ trap restore_test_files EXIT
406
+
407
+ # Run the test suite
408
+ run_test_suite
409
+
410
+ # Print summary
411
+ print_summary
412
+
413
+ # Cleanup if requested
414
+ cleanup_tests
415
+
416
+ # Exit with appropriate code
417
+ if [[ $FAILED_TESTS -eq 0 ]]; then
418
+ exit 0
419
+ else
420
+ exit 1
421
+ fi
422
+ }
423
+
424
+ main "$@"