jleechanorg-pr-automation 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jleechanorg-pr-automation might be problematic. Click here for more details.

@@ -0,0 +1,615 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Comprehensive test suite for AutomationSafetyManager
4
+ Using TDD methodology with 150+ test cases covering all safety logic
5
+ """
6
+
7
+ import pytest
8
+ import json
9
+ import tempfile
10
+ import os
11
+ import threading
12
+ import time
13
+ from pathlib import Path
14
+ from datetime import datetime, timedelta
15
+ from unittest.mock import Mock, patch, MagicMock
16
+
17
+ # Import the automation safety manager using proper Python module path
18
+ from jleechanorg_pr_automation.automation_safety_manager import AutomationSafetyManager
19
+
20
+
21
+ class TestAutomationSafetyManagerInit:
22
+ """Test suite for AutomationSafetyManager initialization"""
23
+
24
+ def test_init_with_data_dir(self):
25
+ """Test initialization with provided data directory"""
26
+ with tempfile.TemporaryDirectory() as temp_dir:
27
+ manager = AutomationSafetyManager(temp_dir)
28
+ assert manager.data_dir == temp_dir
29
+ assert manager.global_limit > 0
30
+ assert manager.pr_limit > 0
31
+
32
+ def test_init_creates_data_dir(self):
33
+ """Test that initialization creates data directory if it doesn't exist"""
34
+ with tempfile.TemporaryDirectory() as parent_dir:
35
+ data_dir = os.path.join(parent_dir, "new_safety_dir")
36
+ manager = AutomationSafetyManager(data_dir)
37
+ assert os.path.exists(data_dir)
38
+
39
+ def test_init_reads_config_file(self):
40
+ """Test that initialization reads existing config file"""
41
+ with tempfile.TemporaryDirectory() as temp_dir:
42
+ # Create config file
43
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
44
+ config_data = {
45
+ "global_limit": 50,
46
+ "pr_limit": 3,
47
+ "daily_limit": 100
48
+ }
49
+ with open(config_file, 'w') as f:
50
+ json.dump(config_data, f)
51
+
52
+ manager = AutomationSafetyManager(temp_dir)
53
+ assert manager.global_limit == 50
54
+ assert manager.pr_limit == 3
55
+
56
+ def test_init_creates_default_config(self):
57
+ """Test that initialization creates default config if none exists"""
58
+ with tempfile.TemporaryDirectory() as temp_dir:
59
+ manager = AutomationSafetyManager(temp_dir)
60
+
61
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
62
+ assert os.path.exists(config_file)
63
+
64
+ with open(config_file, 'r') as f:
65
+ config = json.load(f)
66
+ assert "global_limit" in config
67
+ assert "pr_limit" in config
68
+
69
+ def test_init_invalid_config_uses_defaults(self):
70
+ """Test that invalid config file falls back to defaults"""
71
+ with tempfile.TemporaryDirectory() as temp_dir:
72
+ # Create invalid config file
73
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
74
+ with open(config_file, 'w') as f:
75
+ f.write("{ invalid json")
76
+
77
+ manager = AutomationSafetyManager(temp_dir)
78
+ assert manager.global_limit > 0 # Should use defaults
79
+
80
+
81
+ class TestGlobalLimits:
82
+ """Test suite for global automation limits"""
83
+
84
+ @pytest.fixture
85
+ def manager(self):
86
+ with tempfile.TemporaryDirectory() as temp_dir:
87
+ yield AutomationSafetyManager(temp_dir)
88
+
89
+ def test_can_start_global_run_under_limit(self, manager):
90
+ """Test global run allowed when under limit"""
91
+ # Clear any existing runs
92
+ manager._clear_global_runs()
93
+ assert manager.can_start_global_run() == True
94
+
95
+ def test_can_start_global_run_at_limit(self, manager):
96
+ """Test global run denied when at limit"""
97
+ # Fill up to limit
98
+ for _ in range(manager.global_limit):
99
+ manager.record_global_run()
100
+
101
+ assert manager.can_start_global_run() == False
102
+
103
+ def test_record_global_run_increments_count(self, manager):
104
+ """Test that recording global run increments counter"""
105
+ initial_count = manager.get_global_runs()
106
+ manager.record_global_run()
107
+ assert manager.get_global_runs() == initial_count + 1
108
+
109
+ def test_get_global_runs_returns_count(self, manager):
110
+ """Test that get_global_runs returns correct count"""
111
+ manager._clear_global_runs()
112
+ assert manager.get_global_runs() == 0
113
+
114
+ manager.record_global_run()
115
+ assert manager.get_global_runs() == 1
116
+
117
+ def test_global_runs_file_persistence(self, manager):
118
+ """Test that global runs are persisted to file"""
119
+ manager._clear_global_runs()
120
+ manager.record_global_run()
121
+ manager.record_global_run()
122
+
123
+ # Create new manager with same data dir
124
+ new_manager = AutomationSafetyManager(manager.data_dir)
125
+ assert new_manager.get_global_runs() == 2
126
+
127
+ def test_global_runs_thread_safety(self, manager):
128
+ """Test that global runs are thread-safe"""
129
+ manager._clear_global_runs()
130
+
131
+ def record_runs():
132
+ for _ in range(10):
133
+ manager.record_global_run()
134
+
135
+ threads = [threading.Thread(target=record_runs) for _ in range(5)]
136
+ for thread in threads:
137
+ thread.start()
138
+ for thread in threads:
139
+ thread.join()
140
+
141
+ # Should have exactly 50 runs (5 threads × 10 runs each)
142
+ assert manager.get_global_runs() == 50
143
+
144
+ def test_clear_global_runs(self, manager):
145
+ """Test clearing global runs"""
146
+ manager.record_global_run()
147
+ manager.record_global_run()
148
+ assert manager.get_global_runs() > 0
149
+
150
+ manager._clear_global_runs()
151
+ assert manager.get_global_runs() == 0
152
+
153
+
154
+ class TestPRLimits:
155
+ """Test suite for per-PR automation limits"""
156
+
157
+ @pytest.fixture
158
+ def manager(self):
159
+ with tempfile.TemporaryDirectory() as temp_dir:
160
+ yield AutomationSafetyManager(temp_dir)
161
+
162
+ @pytest.mark.parametrize("pr_key,expected_can_process", [
163
+ ("repo-123", True), # New PR
164
+ ("repo-456", True), # Different PR
165
+ ("repo/with/slashes-789", True), # PR with slashes in name
166
+ ])
167
+ def test_can_process_pr_new_prs(self, manager, pr_key, expected_can_process):
168
+ """Test that new PRs can be processed"""
169
+ result = manager.can_process_pr(pr_key)
170
+ assert result == expected_can_process
171
+
172
+ def test_can_process_pr_under_limit(self, manager):
173
+ """Test PR processing allowed when under limit"""
174
+ pr_key = "test-repo-123"
175
+
176
+ # Process PR up to limit - 1
177
+ for _ in range(manager.pr_limit - 1):
178
+ manager.record_pr_attempt(pr_key, "success")
179
+
180
+ assert manager.can_process_pr(pr_key) == True
181
+
182
+ def test_can_process_pr_at_limit(self, manager):
183
+ """Test PR processing denied when at limit"""
184
+ pr_key = "test-repo-123"
185
+
186
+ # Process PR up to limit
187
+ for _ in range(manager.pr_limit):
188
+ manager.record_pr_attempt(pr_key, "success")
189
+
190
+ assert manager.can_process_pr(pr_key) == False
191
+
192
+ def test_record_pr_attempt_success(self, manager):
193
+ """Test recording successful PR attempt"""
194
+ pr_key = "test-repo-123"
195
+
196
+ manager.record_pr_attempt(pr_key, "success")
197
+
198
+ # Should have one attempt recorded
199
+ attempts = manager.get_pr_attempt_list(pr_key)
200
+ assert len(attempts) == 1
201
+ assert attempts[0]["result"] == "success"
202
+
203
+ def test_record_pr_attempt_failure(self, manager):
204
+ """Test recording failed PR attempt"""
205
+ pr_key = "test-repo-456"
206
+
207
+ manager.record_pr_attempt(pr_key, "failure")
208
+
209
+ attempts = manager.get_pr_attempt_list(pr_key)
210
+ assert len(attempts) == 1
211
+ assert attempts[0]["result"] == "failure"
212
+
213
+ @pytest.mark.parametrize("result", ["success", "failure", "error", "timeout"])
214
+ def test_record_pr_attempt_various_results(self, manager, result):
215
+ """Test recording PR attempts with various result types"""
216
+ pr_key = f"test-repo-{result}"
217
+
218
+ manager.record_pr_attempt(pr_key, result)
219
+
220
+ attempts = manager.get_pr_attempt_list(pr_key)
221
+ assert len(attempts) == 1
222
+ assert attempts[0]["result"] == result
223
+
224
+ def test_record_pr_attempt_includes_timestamp(self, manager):
225
+ """Test that PR attempts include timestamps"""
226
+ pr_key = "test-repo-timestamp"
227
+
228
+ before_time = datetime.now()
229
+ manager.record_pr_attempt(pr_key, "success")
230
+ after_time = datetime.now()
231
+
232
+ attempts = manager.get_pr_attempt_list(pr_key)
233
+ assert len(attempts) == 1
234
+
235
+ timestamp_str = attempts[0]["timestamp"]
236
+ timestamp = datetime.fromisoformat(timestamp_str)
237
+ assert before_time <= timestamp <= after_time
238
+
239
+ def test_get_pr_attempts_empty(self, manager):
240
+ """Test getting attempts for PR with no history"""
241
+ attempts = manager.get_pr_attempt_list("nonexistent-pr")
242
+ assert attempts == []
243
+
244
+ def test_get_pr_attempts_multiple(self, manager):
245
+ """Test getting multiple attempts for same PR"""
246
+ pr_key = "test-repo-multiple"
247
+
248
+ manager.record_pr_attempt(pr_key, "failure")
249
+ manager.record_pr_attempt(pr_key, "success")
250
+ manager.record_pr_attempt(pr_key, "success")
251
+
252
+ attempts = manager.get_pr_attempt_list(pr_key)
253
+ assert len(attempts) == 3
254
+ assert attempts[0]["result"] == "failure"
255
+ assert attempts[1]["result"] == "success"
256
+ assert attempts[2]["result"] == "success"
257
+
258
+ def test_pr_attempts_file_persistence(self, manager):
259
+ """Test that PR attempts are persisted to file"""
260
+ pr_key = "test-repo-persist"
261
+ manager.record_pr_attempt(pr_key, "success")
262
+
263
+ # Create new manager with same data dir
264
+ new_manager = AutomationSafetyManager(manager.data_dir)
265
+ attempts = new_manager.get_pr_attempt_list(pr_key)
266
+ assert len(attempts) == 1
267
+ assert attempts[0]["result"] == "success"
268
+
269
+ def test_pr_attempts_thread_safety(self, manager):
270
+ """Test that PR attempt recording is thread-safe"""
271
+ pr_key = "test-repo-threading"
272
+
273
+ def record_attempts():
274
+ for i in range(5):
275
+ manager.record_pr_attempt(pr_key, f"attempt-{i}")
276
+
277
+ threads = [threading.Thread(target=record_attempts) for _ in range(3)]
278
+ for thread in threads:
279
+ thread.start()
280
+ for thread in threads:
281
+ thread.join()
282
+
283
+ attempts = manager.get_pr_attempt_list(pr_key)
284
+ assert len(attempts) == 15 # 3 threads × 5 attempts each
285
+
286
+
287
+ class TestEmailNotifications:
288
+ """Test suite for email notification functionality"""
289
+
290
+ @pytest.fixture
291
+ def manager(self):
292
+ with tempfile.TemporaryDirectory() as temp_dir:
293
+ yield AutomationSafetyManager(temp_dir)
294
+
295
+ @patch.dict(os.environ, {
296
+ 'SMTP_SERVER': 'smtp.example.com',
297
+ 'SMTP_PORT': '587',
298
+ 'EMAIL_USER': 'test@example.com',
299
+ 'EMAIL_PASS': 'password',
300
+ 'EMAIL_TO': 'admin@example.com'
301
+ })
302
+ def test_email_config_complete(self, manager):
303
+ """Test email configuration detection when complete"""
304
+ assert manager._is_email_configured() == True
305
+
306
+ @patch.dict(os.environ, {}, clear=True)
307
+ def test_email_config_incomplete(self, manager):
308
+ """Test email configuration detection when incomplete"""
309
+ assert manager._is_email_configured() == False
310
+
311
+ @patch.dict(os.environ, {
312
+ 'SMTP_SERVER': 'smtp.example.com',
313
+ 'EMAIL_USER': 'test@example.com'
314
+ # Missing SMTP_PORT, EMAIL_PASS, EMAIL_TO
315
+ })
316
+ def test_email_config_partial(self, manager):
317
+ """Test email configuration detection when partially configured"""
318
+ assert manager._is_email_configured() == False
319
+
320
+ @patch.dict(os.environ, {
321
+ 'SMTP_SERVER': 'smtp.example.com',
322
+ 'SMTP_PORT': '587',
323
+ 'EMAIL_USER': 'test@example.com',
324
+ 'EMAIL_PASS': 'password',
325
+ 'EMAIL_TO': 'admin@example.com'
326
+ })
327
+ @patch('smtplib.SMTP')
328
+ def test_send_notification_success(self, mock_smtp, manager):
329
+ """Test successful email notification sending"""
330
+ mock_server = Mock()
331
+ mock_smtp.return_value = mock_server
332
+
333
+ result = manager.send_notification("Test Subject", "Test message")
334
+
335
+ assert result == True
336
+ mock_smtp.assert_called_once_with('smtp.example.com', 587)
337
+ mock_server.starttls.assert_called_once()
338
+ mock_server.login.assert_called_once_with('test@example.com', 'password')
339
+ mock_server.send_message.assert_called_once()
340
+ mock_server.quit.assert_called_once()
341
+
342
+ @patch.dict(os.environ, {}, clear=True)
343
+ def test_send_notification_no_config(self, manager):
344
+ """Test email notification when not configured"""
345
+ with patch.object(manager.logger, 'info') as mock_info:
346
+ result = manager.send_notification("Test", "Message")
347
+
348
+ assert result == False
349
+ mock_info.assert_called_with("Email configuration incomplete - skipping notification")
350
+
351
+ @patch.dict(os.environ, {
352
+ 'SMTP_SERVER': 'smtp.example.com',
353
+ 'SMTP_PORT': '587',
354
+ 'EMAIL_USER': 'test@example.com',
355
+ 'EMAIL_PASS': 'password',
356
+ 'EMAIL_TO': 'admin@example.com'
357
+ })
358
+ @patch('smtplib.SMTP')
359
+ def test_send_notification_smtp_error(self, mock_smtp, manager):
360
+ """Test email notification with SMTP error"""
361
+ mock_smtp.side_effect = Exception("SMTP connection failed")
362
+
363
+ with patch.object(manager.logger, 'error') as mock_error:
364
+ result = manager.send_notification("Test", "Message")
365
+
366
+ assert result == False
367
+ mock_error.assert_called()
368
+
369
+ @patch.dict(os.environ, {
370
+ 'SMTP_SERVER': 'smtp.example.com',
371
+ 'SMTP_PORT': '587',
372
+ 'EMAIL_USER': 'test@example.com',
373
+ 'EMAIL_PASS': 'password',
374
+ 'EMAIL_TO': 'admin@example.com'
375
+ })
376
+ @patch('smtplib.SMTP')
377
+ def test_send_notification_login_error(self, mock_smtp, manager):
378
+ """Test email notification with login error"""
379
+ mock_server = Mock()
380
+ mock_server.login.side_effect = Exception("Authentication failed")
381
+ mock_smtp.return_value = mock_server
382
+
383
+ with patch.object(manager.logger, 'error') as mock_error:
384
+ result = manager.send_notification("Test", "Message")
385
+
386
+ assert result == False
387
+ mock_error.assert_called()
388
+
389
+
390
+ class TestFileLocking:
391
+ """Test suite for file locking mechanisms"""
392
+
393
+ @pytest.fixture
394
+ def manager(self):
395
+ with tempfile.TemporaryDirectory() as temp_dir:
396
+ yield AutomationSafetyManager(temp_dir)
397
+
398
+ def test_concurrent_global_run_recording(self, manager):
399
+ """Test that concurrent global run recording is thread-safe"""
400
+ manager._clear_global_runs()
401
+
402
+ results = []
403
+
404
+ def record_run_with_result():
405
+ try:
406
+ manager.record_global_run()
407
+ results.append("success")
408
+ except Exception as e:
409
+ results.append(f"error: {e}")
410
+
411
+ # Start many concurrent threads
412
+ threads = [threading.Thread(target=record_run_with_result) for _ in range(20)]
413
+ for thread in threads:
414
+ thread.start()
415
+ for thread in threads:
416
+ thread.join()
417
+
418
+ # All operations should succeed
419
+ assert all(result == "success" for result in results)
420
+ assert manager.get_global_runs() == 20
421
+
422
+ def test_concurrent_pr_attempt_recording(self, manager):
423
+ """Test that concurrent PR attempt recording is thread-safe"""
424
+ pr_key = "test-repo-concurrent"
425
+ results = []
426
+
427
+ def record_attempt_with_result(attempt_id):
428
+ try:
429
+ manager.record_pr_attempt(pr_key, f"attempt-{attempt_id}")
430
+ results.append("success")
431
+ except Exception as e:
432
+ results.append(f"error: {e}")
433
+
434
+ # Start many concurrent threads
435
+ threads = [threading.Thread(target=record_attempt_with_result, args=(i,)) for i in range(15)]
436
+ for thread in threads:
437
+ thread.start()
438
+ for thread in threads:
439
+ thread.join()
440
+
441
+ # All operations should succeed
442
+ assert all(result == "success" for result in results)
443
+ attempts = manager.get_pr_attempt_list(pr_key)
444
+ assert len(attempts) == 15
445
+
446
+ def test_file_corruption_recovery(self, manager):
447
+ """Test recovery from corrupted data files"""
448
+ # Corrupt the global runs file
449
+ global_runs_file = os.path.join(manager.data_dir, "global_runs.json")
450
+ manager.record_global_run() # Create the file first
451
+
452
+ with open(global_runs_file, 'w') as f:
453
+ f.write("{ corrupted json")
454
+
455
+ # Should recover gracefully
456
+ manager.record_global_run()
457
+ assert manager.get_global_runs() >= 1
458
+
459
+ def test_permission_error_handling(self, manager):
460
+ """Test handling of file permission errors"""
461
+ with patch('jleechanorg_pr_automation.utils.json_manager.write_json', return_value=False):
462
+ with patch.object(manager.logger, 'error') as mock_error:
463
+ # Should not raise exception
464
+ manager.record_global_run()
465
+ mock_error.assert_called()
466
+
467
+
468
+ class TestConfigurationManagement:
469
+ """Test suite for configuration management"""
470
+
471
+ def test_load_config_with_all_settings(self):
472
+ """Test loading configuration with all settings"""
473
+ with tempfile.TemporaryDirectory() as temp_dir:
474
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
475
+ config_data = {
476
+ "global_limit": 25,
477
+ "pr_limit": 5,
478
+ "daily_limit": 200,
479
+ "email_notifications": True,
480
+ "max_pr_size": 1000
481
+ }
482
+ with open(config_file, 'w') as f:
483
+ json.dump(config_data, f)
484
+
485
+ manager = AutomationSafetyManager(temp_dir)
486
+ assert manager.global_limit == 25
487
+ assert manager.pr_limit == 5
488
+
489
+ def test_load_config_with_partial_settings(self):
490
+ """Test loading configuration with partial settings uses defaults"""
491
+ with tempfile.TemporaryDirectory() as temp_dir:
492
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
493
+ config_data = {
494
+ "global_limit": 15
495
+ # Missing other settings
496
+ }
497
+ with open(config_file, 'w') as f:
498
+ json.dump(config_data, f)
499
+
500
+ manager = AutomationSafetyManager(temp_dir)
501
+ assert manager.global_limit == 15
502
+ assert manager.pr_limit > 0 # Should use default
503
+
504
+ def test_save_config_creates_file(self):
505
+ """Test that save_config creates configuration file"""
506
+ with tempfile.TemporaryDirectory() as temp_dir:
507
+ manager = AutomationSafetyManager(temp_dir)
508
+
509
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
510
+ assert os.path.exists(config_file)
511
+
512
+ with open(config_file, 'r') as f:
513
+ config = json.load(f)
514
+ assert "global_limit" in config
515
+ assert "pr_limit" in config
516
+
517
+ def test_config_file_permissions(self):
518
+ """Test that config file has appropriate permissions"""
519
+ with tempfile.TemporaryDirectory() as temp_dir:
520
+ manager = AutomationSafetyManager(temp_dir)
521
+
522
+ config_file = os.path.join(temp_dir, "automation_safety_config.json")
523
+ stat_info = os.stat(config_file)
524
+
525
+ # Should be readable/writable by owner
526
+ assert stat_info.st_mode & 0o600
527
+
528
+
529
+ class TestIntegrationScenarios:
530
+ """Test suite for integration scenarios"""
531
+
532
+ @pytest.fixture
533
+ def manager(self):
534
+ with tempfile.TemporaryDirectory() as temp_dir:
535
+ yield AutomationSafetyManager(temp_dir)
536
+
537
+ def test_typical_automation_workflow(self, manager):
538
+ """Test typical automation workflow"""
539
+ # Start automation run
540
+ assert manager.can_start_global_run() == True
541
+ manager.record_global_run()
542
+
543
+ # Process multiple PRs
544
+ pr_keys = ["repo1-123", "repo2-456", "repo1-789"]
545
+
546
+ for pr_key in pr_keys:
547
+ assert manager.can_process_pr(pr_key) == True
548
+ manager.record_pr_attempt(pr_key, "success")
549
+
550
+ # Verify state
551
+ assert manager.get_global_runs() == 1
552
+ for pr_key in pr_keys:
553
+ attempts = manager.get_pr_attempt_list(pr_key)
554
+ assert len(attempts) == 1
555
+ assert attempts[0]["result"] == "success"
556
+
557
+ def test_hitting_pr_limits(self, manager):
558
+ """Test behavior when hitting PR limits"""
559
+ pr_key = "test-repo-limit"
560
+
561
+ # Process up to limit
562
+ for i in range(manager.pr_limit):
563
+ assert manager.can_process_pr(pr_key) == True
564
+ manager.record_pr_attempt(pr_key, "success")
565
+
566
+ # Should now be at limit
567
+ assert manager.can_process_pr(pr_key) == False
568
+
569
+ def test_hitting_global_limits(self, manager):
570
+ """Test behavior when hitting global limits"""
571
+ # Fill up to global limit
572
+ for i in range(manager.global_limit):
573
+ assert manager.can_start_global_run() == True
574
+ manager.record_global_run()
575
+
576
+ # Should now be at limit
577
+ assert manager.can_start_global_run() == False
578
+
579
+ def test_mixed_success_failure_attempts(self, manager):
580
+ """Test tracking mixed success/failure attempts"""
581
+ pr_key = "test-repo-mixed"
582
+
583
+ # Record mixed results
584
+ results = ["failure", "success", "error", "success", "timeout"]
585
+ for result in results:
586
+ if manager.can_process_pr(pr_key):
587
+ manager.record_pr_attempt(pr_key, result)
588
+
589
+ attempts = manager.get_pr_attempt_list(pr_key)
590
+ assert len(attempts) <= manager.pr_limit
591
+
592
+ # Verify results are recorded correctly
593
+ recorded_results = [attempt["result"] for attempt in attempts]
594
+ assert all(result in results for result in recorded_results)
595
+
596
+ def test_multiple_managers_same_data_dir(self, manager):
597
+ """Test multiple manager instances sharing same data directory"""
598
+ data_dir = manager.data_dir
599
+
600
+ # Create second manager with same data dir
601
+ manager2 = AutomationSafetyManager(data_dir)
602
+
603
+ # Record with first manager
604
+ manager.record_global_run()
605
+ manager.record_pr_attempt("test-pr", "success")
606
+
607
+ # Verify second manager sees the data
608
+ assert manager2.get_global_runs() == manager.get_global_runs()
609
+ attempts1 = manager.get_pr_attempt_list("test-pr")
610
+ attempts2 = manager2.get_pr_attempt_list("test-pr")
611
+ assert len(attempts1) == len(attempts2)
612
+
613
+
614
+ if __name__ == '__main__':
615
+ pytest.main([__file__, '-v', '--tb=short'])