devforgeai 1.0.4 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. package/CLAUDE.md +120 -0
  2. package/package.json +9 -1
  3. package/src/CLAUDE.md +699 -0
  4. package/src/claude/scripts/README.md +396 -0
  5. package/src/claude/scripts/audit-command-skill-overlap.sh +67 -0
  6. package/src/claude/scripts/check-hooks-fast.sh +70 -0
  7. package/src/claude/scripts/devforgeai-validate +6 -0
  8. package/src/claude/scripts/devforgeai_cli/README.md +531 -0
  9. package/src/claude/scripts/devforgeai_cli/__init__.py +12 -0
  10. package/src/claude/scripts/devforgeai_cli/cli.py +716 -0
  11. package/src/claude/scripts/devforgeai_cli/commands/__init__.py +1 -0
  12. package/src/claude/scripts/devforgeai_cli/commands/check_hooks.py +384 -0
  13. package/src/claude/scripts/devforgeai_cli/commands/invoke_hooks.py +149 -0
  14. package/src/claude/scripts/devforgeai_cli/commands/phase_commands.py +731 -0
  15. package/src/claude/scripts/devforgeai_cli/commands/validate_installation.py +412 -0
  16. package/src/claude/scripts/devforgeai_cli/context_extraction.py +426 -0
  17. package/src/claude/scripts/devforgeai_cli/feedback/AC_TO_TEST_MAPPING.md +636 -0
  18. package/src/claude/scripts/devforgeai_cli/feedback/DELIVERY_SUMMARY.txt +329 -0
  19. package/src/claude/scripts/devforgeai_cli/feedback/README_TEST_SPECS.md +486 -0
  20. package/src/claude/scripts/devforgeai_cli/feedback/TEST_IMPLEMENTATION_GUIDE.md +529 -0
  21. package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECIFICATIONS.md +2652 -0
  22. package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECS_INDEX.md +398 -0
  23. package/src/claude/scripts/devforgeai_cli/feedback/__init__.py +34 -0
  24. package/src/claude/scripts/devforgeai_cli/feedback/adaptive_questioning_engine.py +581 -0
  25. package/src/claude/scripts/devforgeai_cli/feedback/aggregation.py +179 -0
  26. package/src/claude/scripts/devforgeai_cli/feedback/commands.py +535 -0
  27. package/src/claude/scripts/devforgeai_cli/feedback/config_defaults.py +58 -0
  28. package/src/claude/scripts/devforgeai_cli/feedback/config_manager.py +423 -0
  29. package/src/claude/scripts/devforgeai_cli/feedback/config_models.py +192 -0
  30. package/src/claude/scripts/devforgeai_cli/feedback/config_schema.py +140 -0
  31. package/src/claude/scripts/devforgeai_cli/feedback/coverage.json +1 -0
  32. package/src/claude/scripts/devforgeai_cli/feedback/feature_flag.py +152 -0
  33. package/src/claude/scripts/devforgeai_cli/feedback/feedback_indexer.py +394 -0
  34. package/src/claude/scripts/devforgeai_cli/feedback/hot_reload.py +226 -0
  35. package/src/claude/scripts/devforgeai_cli/feedback/longitudinal.py +115 -0
  36. package/src/claude/scripts/devforgeai_cli/feedback/models.py +67 -0
  37. package/src/claude/scripts/devforgeai_cli/feedback/question_router.py +236 -0
  38. package/src/claude/scripts/devforgeai_cli/feedback/retrospective.py +233 -0
  39. package/src/claude/scripts/devforgeai_cli/feedback/skip_tracker.py +177 -0
  40. package/src/claude/scripts/devforgeai_cli/feedback/skip_tracking.py +221 -0
  41. package/src/claude/scripts/devforgeai_cli/feedback/template_engine.py +549 -0
  42. package/src/claude/scripts/devforgeai_cli/feedback/validation.py +163 -0
  43. package/src/claude/scripts/devforgeai_cli/headless/__init__.py +30 -0
  44. package/src/claude/scripts/devforgeai_cli/headless/answer_models.py +206 -0
  45. package/src/claude/scripts/devforgeai_cli/headless/answer_resolver.py +204 -0
  46. package/src/claude/scripts/devforgeai_cli/headless/exceptions.py +36 -0
  47. package/src/claude/scripts/devforgeai_cli/headless/pattern_matcher.py +156 -0
  48. package/src/claude/scripts/devforgeai_cli/hooks.py +313 -0
  49. package/src/claude/scripts/devforgeai_cli/metrics/__init__.py +46 -0
  50. package/src/claude/scripts/devforgeai_cli/metrics/command_metrics.py +142 -0
  51. package/src/claude/scripts/devforgeai_cli/metrics/failure_modes.py +152 -0
  52. package/src/claude/scripts/devforgeai_cli/metrics/story_segmentation.py +181 -0
  53. package/src/claude/scripts/devforgeai_cli/orchestrate_hooks.py +780 -0
  54. package/src/claude/scripts/devforgeai_cli/phase_state.py +1229 -0
  55. package/src/claude/scripts/devforgeai_cli/session/__init__.py +30 -0
  56. package/src/claude/scripts/devforgeai_cli/session/checkpoint.py +268 -0
  57. package/src/claude/scripts/devforgeai_cli/tests/__init__.py +1 -0
  58. package/src/claude/scripts/devforgeai_cli/tests/conftest.py +29 -0
  59. package/src/claude/scripts/devforgeai_cli/tests/feedback/TEST_EXECUTION_GUIDE.md +298 -0
  60. package/src/claude/scripts/devforgeai_cli/tests/feedback/__init__.py +3 -0
  61. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_adaptive_questioning_engine.py +2171 -0
  62. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_aggregation.py +476 -0
  63. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_defaults.py +133 -0
  64. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_manager.py +592 -0
  65. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_models.py +373 -0
  66. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_schema.py +130 -0
  67. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_configuration_management.py +1355 -0
  68. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_edge_cases.py +308 -0
  69. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feature_flag.py +307 -0
  70. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feedback_indexer.py +384 -0
  71. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_hot_reload.py +580 -0
  72. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_integration.py +402 -0
  73. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_models.py +105 -0
  74. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_question_routing.py +262 -0
  75. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_retrospective.py +333 -0
  76. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracker.py +410 -0
  77. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking.py +159 -0
  78. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking_integration.py +1155 -0
  79. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_template_engine.py +1389 -0
  80. package/src/claude/scripts/devforgeai_cli/tests/feedback/test_validation_comprehensive.py +210 -0
  81. package/src/claude/scripts/devforgeai_cli/tests/fixtures/autonomous-deferral-story.md +46 -0
  82. package/src/claude/scripts/devforgeai_cli/tests/fixtures/missing-impl-notes.md +31 -0
  83. package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-deferral-story.md +46 -0
  84. package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-story-complete.md +48 -0
  85. package/src/claude/scripts/devforgeai_cli/tests/manual_test_invoke_hooks.sh +200 -0
  86. package/src/claude/scripts/devforgeai_cli/tests/session/DELIVERABLES.md +518 -0
  87. package/src/claude/scripts/devforgeai_cli/tests/session/TEST_SUMMARY.md +468 -0
  88. package/src/claude/scripts/devforgeai_cli/tests/session/__init__.py +6 -0
  89. package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/corrupted-checkpoint.json +1 -0
  90. package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/missing-fields-checkpoint.json +4 -0
  91. package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/valid-checkpoint.json +15 -0
  92. package/src/claude/scripts/devforgeai_cli/tests/session/test_checkpoint.py +851 -0
  93. package/src/claude/scripts/devforgeai_cli/tests/test_check_hooks.py +1886 -0
  94. package/src/claude/scripts/devforgeai_cli/tests/test_depends_on_normalizer.py +171 -0
  95. package/src/claude/scripts/devforgeai_cli/tests/test_dod_validator.py +97 -0
  96. package/src/claude/scripts/devforgeai_cli/tests/test_invoke_hooks.py +1902 -0
  97. package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands.py +320 -0
  98. package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_error_handling.py +1021 -0
  99. package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_import.py +697 -0
  100. package/src/claude/scripts/devforgeai_cli/tests/test_phase_state.py +2187 -0
  101. package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking.py +2141 -0
  102. package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking_coverage_gap.py +195 -0
  103. package/src/claude/scripts/devforgeai_cli/tests/test_subagent_enforcement.py +539 -0
  104. package/src/claude/scripts/devforgeai_cli/tests/test_validate_installation.py +361 -0
  105. package/src/claude/scripts/devforgeai_cli/utils/__init__.py +11 -0
  106. package/src/claude/scripts/devforgeai_cli/utils/depends_on_normalizer.py +149 -0
  107. package/src/claude/scripts/devforgeai_cli/utils/markdown_parser.py +219 -0
  108. package/src/claude/scripts/devforgeai_cli/utils/story_analyzer.py +249 -0
  109. package/src/claude/scripts/devforgeai_cli/utils/yaml_parser.py +152 -0
  110. package/src/claude/scripts/devforgeai_cli/validators/__init__.py +27 -0
  111. package/src/claude/scripts/devforgeai_cli/validators/ast_grep_validator.py +373 -0
  112. package/src/claude/scripts/devforgeai_cli/validators/context_validator.py +180 -0
  113. package/src/claude/scripts/devforgeai_cli/validators/dod_validator.py +309 -0
  114. package/src/claude/scripts/devforgeai_cli/validators/git_validator.py +107 -0
  115. package/src/claude/scripts/devforgeai_cli/validators/grep_fallback.py +300 -0
  116. package/src/claude/scripts/install_hooks.sh +186 -0
  117. package/src/claude/scripts/invoke_feedback_hooks.sh +59 -0
  118. package/src/claude/scripts/migrate-ac-headers.sh +122 -0
  119. package/src/claude/scripts/plan_file_kb.sh +704 -0
  120. package/src/claude/scripts/requirements.txt +8 -0
  121. package/src/claude/scripts/session_catalog.sh +543 -0
  122. package/src/claude/scripts/setup.py +55 -0
  123. package/src/claude/scripts/start-devforgeai.sh +16 -0
  124. package/src/claude/scripts/statusline.sh +27 -0
  125. package/src/claude/scripts/validate_deferrals.py +344 -0
  126. package/src/claude/skills/devforgeai-qa/SKILL.md +1 -1
  127. package/src/claude/skills/researching-market/SKILL.md +2 -1
  128. package/src/cli/lib/copier.js +13 -1
  129. package/src/claude/skills/designing-systems/scripts/__pycache__/detect_anti_patterns.cpython-312.pyc +0 -0
  130. package/src/claude/skills/designing-systems/scripts/__pycache__/validate_all_context.cpython-312.pyc +0 -0
  131. package/src/claude/skills/designing-systems/scripts/__pycache__/validate_architecture.cpython-312.pyc +0 -0
  132. package/src/claude/skills/designing-systems/scripts/__pycache__/validate_dependencies.cpython-312.pyc +0 -0
  133. package/src/claude/skills/devforgeai-story-creation/scripts/__pycache__/migrate_story_v1_to_v2.cpython-312.pyc +0 -0
  134. package/src/claude/skills/devforgeai-story-creation/scripts/tests/__pycache__/measure_accuracy.cpython-312.pyc +0 -0
@@ -0,0 +1,308 @@
1
+ """
2
+ Unit tests for edge cases
3
+
4
+ Tests cover:
5
+ - Network/connection loss during feedback
6
+ - Extremely long feedback responses
7
+ - Rapid command sequence
8
+ - Failed setup scenarios
9
+ - Sensitive feedback handling
10
+ """
11
+
12
+ import pytest
13
+ import json
14
+ import tempfile
15
+ import shutil
16
+ from pathlib import Path
17
+
18
+ from devforgeai_cli.feedback.retrospective import (
19
+ capture_feedback,
20
+ save_in_progress_state,
21
+ resume_feedback,
22
+ )
23
+ from devforgeai_cli.feedback.validation import (
24
+ validate_response_length,
25
+ detect_spam,
26
+ check_sensitive_content,
27
+ )
28
+
29
+
30
+ class TestNetworkLoss:
31
+ """Edge Case 1: Network/Connection Loss During Feedback Collection"""
32
+
33
+ @pytest.fixture
34
+ def temp_feedback_dir(self):
35
+ temp_dir = tempfile.mkdtemp()
36
+ yield Path(temp_dir)
37
+ shutil.rmtree(temp_dir)
38
+
39
+ def test_save_in_progress_state_preserves_partial_responses(self, temp_feedback_dir):
40
+ """
41
+ GIVEN user is in middle of providing feedback
42
+ WHEN connection drops
43
+ THEN system saves any completed fields (partial capture)
44
+ """
45
+ # Arrange
46
+ story_id = 'STORY-001'
47
+ partial_responses = [
48
+ {'question_id': 'dev_success_01', 'response': 4, 'skip': False},
49
+ {'question_id': 'dev_success_02', 'response': 'TDD was helpful', 'skip': False},
50
+ # User disconnected before answering questions 3-5
51
+ ]
52
+
53
+ # Act
54
+ state_file = save_in_progress_state(
55
+ story_id=story_id,
56
+ responses=partial_responses,
57
+ workflow_type='dev',
58
+ feedback_dir=temp_feedback_dir
59
+ )
60
+
61
+ # Assert
62
+ assert state_file.exists()
63
+ assert state_file.name.endswith('-in-progress.json')
64
+
65
+ # Verify state was saved
66
+ with open(state_file, 'r') as f:
67
+ state_data = json.load(f)
68
+
69
+ assert state_data['story_id'] == story_id
70
+ assert len(state_data['responses']) == 2
71
+ assert state_data['status'] == 'in_progress'
72
+
73
+ def test_resume_feedback_offers_continuation_option(self, temp_feedback_dir):
74
+ """
75
+ GIVEN in-progress feedback state exists
76
+ WHEN user starts new session
77
+ THEN system offers "Continue previous feedback?" option
78
+ """
79
+ # Arrange - Save in-progress state
80
+ story_id = 'STORY-001'
81
+ partial_responses = [
82
+ {'question_id': 'dev_success_01', 'response': 4, 'skip': False},
83
+ ]
84
+ save_in_progress_state(story_id, partial_responses, 'dev', temp_feedback_dir)
85
+
86
+ # Act
87
+ resume_data = resume_feedback(story_id, temp_feedback_dir)
88
+
89
+ # Assert
90
+ assert resume_data is not None
91
+ assert 'responses' in resume_data
92
+ assert len(resume_data['responses']) == 1
93
+ assert 'timestamp' in resume_data
94
+
95
+
96
+ class TestLongResponses:
97
+ """Edge Case 2: Extremely Long Feedback Response"""
98
+
99
+ def test_validate_response_length_accepts_long_detailed_feedback(self):
100
+ """
101
+ GIVEN user provides detailed multi-paragraph feedback (>5,000 chars)
102
+ WHEN feedback length exceeds normal bounds
103
+ THEN system accepts and stores full response without truncation
104
+ """
105
+ # Arrange
106
+ long_response = "This is detailed feedback. " * 200 # ~5,400 chars
107
+
108
+ # Act
109
+ is_valid, warning = validate_response_length(long_response)
110
+
111
+ # Assert
112
+ assert is_valid is True # Accepts long feedback
113
+ assert warning is not None # But warns user
114
+ assert 'approaching' in warning.lower() or 'long' in warning.lower()
115
+
116
+ def test_validate_response_length_rejects_spam(self):
117
+ """
118
+ GIVEN user provides spam (repeated characters or noise)
119
+ WHEN validating feedback
120
+ THEN system rejects as spam
121
+ """
122
+ # Arrange
123
+ spam_responses = [
124
+ 'aaaaaaaaaaaaaaaaaaaaaaaaa' * 100, # Character repetition
125
+ '12341234123412341234' * 100, # Pattern repetition
126
+ 'asdf' * 500, # Random characters
127
+ ]
128
+
129
+ # Act & Assert
130
+ for spam in spam_responses:
131
+ is_spam = detect_spam(spam)
132
+ assert is_spam is True, f"Failed to detect spam: {spam[:50]}"
133
+
134
+
135
+ class TestRapidSequence:
136
+ """Edge Case 3: Rapid Command Sequence (No Feedback Between Runs)"""
137
+
138
+ @pytest.fixture
139
+ def temp_feedback_dir(self):
140
+ temp_dir = tempfile.mkdtemp()
141
+ yield Path(temp_dir)
142
+ shutil.rmtree(temp_dir)
143
+
144
+ def test_rapid_sequence_detection(self, temp_feedback_dir):
145
+ """
146
+ GIVEN user runs /dev STORY-001, immediately /dev STORY-002 (within 30 seconds)
147
+ WHEN second command completes
148
+ THEN system identifies rapid sequence
149
+ """
150
+ # Arrange
151
+ from devforgeai_cli.feedback.retrospective import detect_rapid_sequence
152
+ from datetime import datetime, timedelta, timezone
153
+
154
+ last_feedback_time = datetime.now(timezone.utc) - timedelta(seconds=15) # 15 seconds ago
155
+
156
+ # Act
157
+ is_rapid = detect_rapid_sequence(
158
+ last_feedback_time=last_feedback_time,
159
+ threshold_seconds=30
160
+ )
161
+
162
+ # Assert
163
+ assert is_rapid is True
164
+
165
+ def test_rapid_sequence_offers_quick_feedback_option(self, temp_feedback_dir):
166
+ """
167
+ GIVEN rapid command sequence detected
168
+ WHEN feedback prompt appears
169
+ THEN system offers "Quick feedback on last command?" or "Skip, I'm in flow state"
170
+ """
171
+ # This is a behavioral test - would require integration testing
172
+ # For now, verify the detection logic exists
173
+ from devforgeai_cli.feedback.retrospective import detect_rapid_sequence
174
+
175
+ assert callable(detect_rapid_sequence)
176
+
177
+
178
+ class TestSensitiveContent:
179
+ """Edge Case 5: Sensitive Feedback (User Reports Privacy Concern)"""
180
+
181
+ def test_check_sensitive_content_detects_api_keys(self):
182
+ """
183
+ GIVEN user mentions API keys in feedback
184
+ WHEN checking for sensitive content
185
+ THEN system detects and flags it
186
+ """
187
+ # Arrange
188
+ feedback_with_key = "I accidentally exposed my API key sk-1234567890abcdef in the logs"
189
+
190
+ # Act
191
+ is_sensitive, detected_types = check_sensitive_content(feedback_with_key)
192
+
193
+ # Assert
194
+ assert is_sensitive is True
195
+ assert 'api_key' in detected_types or 'secret' in detected_types
196
+
197
+ def test_check_sensitive_content_detects_data_loss_concerns(self):
198
+ """
199
+ GIVEN user reports data loss
200
+ WHEN checking for sensitive content
201
+ THEN system flags for careful handling
202
+ """
203
+ # Arrange
204
+ feedback = "The command deleted my production database without warning"
205
+
206
+ # Act
207
+ is_sensitive, detected_types = check_sensitive_content(feedback)
208
+
209
+ # Assert
210
+ assert is_sensitive is True
211
+ assert 'data_loss' in detected_types or 'critical_issue' in detected_types
212
+
213
+ def test_check_sensitive_content_allows_normal_feedback(self):
214
+ """
215
+ GIVEN normal feedback with no sensitive content
216
+ WHEN checking for sensitive content
217
+ THEN system does not flag it
218
+ """
219
+ # Arrange
220
+ normal_feedback = "The TDD workflow was very helpful and easy to follow"
221
+
222
+ # Act
223
+ is_sensitive, detected_types = check_sensitive_content(normal_feedback)
224
+
225
+ # Assert
226
+ assert is_sensitive is False
227
+ assert len(detected_types) == 0
228
+
229
+
230
+ class TestDataValidation:
231
+ """Test data validation rules from story spec"""
232
+
233
+ def test_story_id_pattern_validation(self):
234
+ """
235
+ GIVEN story_id input
236
+ WHEN validating format
237
+ THEN must match STORY-[0-9]+ pattern
238
+ """
239
+ from devforgeai_cli.feedback.validation import validate_story_id
240
+
241
+ # Valid patterns
242
+ assert validate_story_id('STORY-001') is True
243
+ assert validate_story_id('STORY-123') is True
244
+ assert validate_story_id('STORY-999') is True
245
+
246
+ # Invalid patterns
247
+ assert validate_story_id('story-001') is False # lowercase
248
+ assert validate_story_id('STORY-abc') is False # non-numeric
249
+ assert validate_story_id('TASK-001') is False # wrong prefix
250
+ assert validate_story_id('STORY001') is False # missing hyphen
251
+
252
+ def test_workflow_type_validation(self):
253
+ """
254
+ GIVEN workflow_type input
255
+ WHEN validating
256
+ THEN must be one of [dev, qa, orchestrate, release, ideate, create-story, create-epic, create-sprint]
257
+ """
258
+ from devforgeai_cli.feedback.validation import validate_workflow_type
259
+
260
+ valid_types = ['dev', 'qa', 'orchestrate', 'release', 'ideate', 'create-story', 'create-epic', 'create-sprint']
261
+ for wf_type in valid_types:
262
+ assert validate_workflow_type(wf_type) is True
263
+
264
+ invalid_types = ['development', 'quality', 'deploy', 'invalid']
265
+ for wf_type in invalid_types:
266
+ assert validate_workflow_type(wf_type) is False
267
+
268
+ def test_response_length_limits(self):
269
+ """
270
+ GIVEN open text response
271
+ WHEN validating length
272
+ THEN must be 5-10,000 characters (warn if >2,000)
273
+ """
274
+ from devforgeai_cli.feedback.validation import validate_response_length
275
+
276
+ # Too short
277
+ is_valid, warning = validate_response_length('abc')
278
+ assert is_valid is False
279
+
280
+ # Valid short
281
+ is_valid, warning = validate_response_length('This is helpful feedback')
282
+ assert is_valid is True
283
+ assert warning is None
284
+
285
+ # Valid long (with warning)
286
+ is_valid, warning = validate_response_length('x' * 2500)
287
+ assert is_valid is True
288
+ assert warning is not None # Warns above 2,000
289
+
290
+ # Too long
291
+ is_valid, warning = validate_response_length('x' * 10500)
292
+ assert is_valid is False
293
+
294
+ def test_coherent_text_detection(self):
295
+ """
296
+ GIVEN text response
297
+ WHEN validating coherence
298
+ THEN detect random character repetition
299
+ """
300
+ from devforgeai_cli.feedback.validation import is_coherent_text
301
+
302
+ # Coherent
303
+ assert is_coherent_text('The workflow was confusing') is True
304
+
305
+ # Not coherent (repetition)
306
+ assert is_coherent_text('aaaaaaaaaaaaa') is False
307
+ assert is_coherent_text('123412341234') is False
308
+ assert is_coherent_text('asdfasdfasdf') is False
@@ -0,0 +1,307 @@
1
+ """Unit tests for feature_flag module.
2
+
3
+ Tests feature flag evaluation, collection mode detection, and graceful degradation.
4
+ Covers all 58 statements in feature_flag.py for 100% coverage.
5
+ """
6
+
7
+ import os
8
+ import pytest
9
+ from pathlib import Path
10
+ from unittest.mock import patch, mock_open, MagicMock
11
+ import tempfile
12
+ import shutil
13
+
14
+ from devforgeai_cli.feedback.feature_flag import (
15
+ should_enable_feedback,
16
+ get_collection_mode,
17
+ should_collect_for_operation,
18
+ trigger_retrospective_if_enabled,
19
+ )
20
+
21
+
22
+ class TestShouldEnableFeedback:
23
+ """Test should_enable_feedback() function."""
24
+
25
+ def test_should_enable_feedback_with_env_var_true(self):
26
+ """Test that feedback is disabled when DEVFORGEAI_DISABLE_FEEDBACK=true."""
27
+ with patch.dict(os.environ, {'DEVFORGEAI_DISABLE_FEEDBACK': 'true'}):
28
+ assert should_enable_feedback() is False
29
+
30
+ def test_should_enable_feedback_with_env_var_false(self):
31
+ """Test that feedback is enabled when DEVFORGEAI_DISABLE_FEEDBACK is not set."""
32
+ with patch.dict(os.environ, {}, clear=True):
33
+ # No config file, should default to True
34
+ with patch.object(Path, 'exists', return_value=False):
35
+ assert should_enable_feedback() is True
36
+
37
+ def test_should_enable_feedback_with_config_disabled(self, tmp_path):
38
+ """Test that feedback is disabled when config.yaml has enable_feedback: false."""
39
+ # Create temporary config directory
40
+ config_dir = tmp_path / "devforgeai" / "feedback"
41
+ config_dir.mkdir(parents=True)
42
+ config_file = config_dir / "config.yaml"
43
+
44
+ # Write config with enable_feedback: false
45
+ config_file.write_text("enable_feedback: false\n")
46
+
47
+ with patch.dict(os.environ, {}, clear=True):
48
+ with patch.object(Path, 'exists', return_value=True):
49
+ with patch('builtins.open', mock_open(read_data="enable_feedback: false\n")):
50
+ assert should_enable_feedback() is False
51
+
52
+ def test_should_enable_feedback_with_config_mode_disabled(self):
53
+ """Test that feedback is disabled when config.yaml has mode: disabled."""
54
+ with patch.dict(os.environ, {}, clear=True):
55
+ with patch.object(Path, 'exists', return_value=True):
56
+ with patch('builtins.open', mock_open(read_data="mode: disabled\n")):
57
+ assert should_enable_feedback() is False
58
+
59
+ def test_should_enable_feedback_with_config_enabled(self):
60
+ """Test that feedback is enabled when config.yaml has enable_feedback: true."""
61
+ with patch.dict(os.environ, {}, clear=True):
62
+ with patch.object(Path, 'exists', return_value=True):
63
+ with patch('builtins.open', mock_open(read_data="enable_feedback: true\nmode: all\n")):
64
+ assert should_enable_feedback() is True
65
+
66
+ def test_should_enable_feedback_fallback_to_default_no_config(self):
67
+ """Test that feedback defaults to enabled when no config file exists."""
68
+ with patch.dict(os.environ, {}, clear=True):
69
+ with patch.object(Path, 'exists', return_value=False):
70
+ assert should_enable_feedback() is True
71
+
72
+ def test_should_enable_feedback_fallback_on_config_read_error(self):
73
+ """Test that feedback defaults to enabled when config file cannot be read."""
74
+ with patch.dict(os.environ, {}, clear=True):
75
+ with patch.object(Path, 'exists', return_value=True):
76
+ with patch('builtins.open', side_effect=IOError("Cannot read file")):
77
+ assert should_enable_feedback() is True
78
+
79
+ def test_should_enable_feedback_with_invalid_yaml(self):
80
+ """Test that feedback defaults to enabled when YAML is invalid."""
81
+ with patch.dict(os.environ, {}, clear=True):
82
+ with patch.object(Path, 'exists', return_value=True):
83
+ # Invalid YAML (will raise yaml.YAMLError)
84
+ with patch('builtins.open', mock_open(read_data="invalid: yaml: content:\n - broken")):
85
+ # Should handle exception and return True
86
+ assert should_enable_feedback() is True
87
+
88
+ def test_should_enable_feedback_env_var_takes_precedence(self):
89
+ """Test that environment variable takes precedence over config file."""
90
+ with patch.dict(os.environ, {'DEVFORGEAI_DISABLE_FEEDBACK': 'true'}):
91
+ with patch.object(Path, 'exists', return_value=True):
92
+ with patch('builtins.open', mock_open(read_data="enable_feedback: true\n")):
93
+ # Even though config says true, env var should win
94
+ assert should_enable_feedback() is False
95
+
96
+ def test_should_enable_feedback_yaml_not_available(self):
97
+ """Test fallback when PyYAML is not installed."""
98
+ with patch.dict(os.environ, {}, clear=True):
99
+ with patch('devforgeai_cli.feedback.feature_flag.YAML_AVAILABLE', False):
100
+ with patch.object(Path, 'exists', return_value=True):
101
+ # YAML not available, should skip config and default to True
102
+ assert should_enable_feedback() is True
103
+
104
+ def test_yaml_import_error_handling(self):
105
+ """Test that YAML import error is handled gracefully (lines 14-15)."""
106
+ # This test ensures the try/except ImportError block is covered
107
+ # The actual import happens at module load time, so we verify the flag
108
+ from devforgeai_cli.feedback import feature_flag
109
+
110
+ # YAML_AVAILABLE should be True if yaml is installed, False otherwise
111
+ assert isinstance(feature_flag.YAML_AVAILABLE, bool)
112
+
113
+
114
+ class TestGetCollectionMode:
115
+ """Test get_collection_mode() function."""
116
+
117
+ def test_get_collection_mode_when_disabled(self):
118
+ """Test that mode is 'disabled' when feedback is disabled."""
119
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=False):
120
+ assert get_collection_mode() == 'disabled'
121
+
122
+ def test_get_collection_mode_from_config_all(self):
123
+ """Test that mode is read from config when set to 'all'."""
124
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
125
+ with patch.object(Path, 'exists', return_value=True):
126
+ with patch('builtins.open', mock_open(read_data="mode: all\n")):
127
+ assert get_collection_mode() == 'all'
128
+
129
+ def test_get_collection_mode_from_config_failures_only(self):
130
+ """Test that mode is read from config when set to 'failures_only'."""
131
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
132
+ with patch.object(Path, 'exists', return_value=True):
133
+ with patch('builtins.open', mock_open(read_data="mode: failures_only\n")):
134
+ assert get_collection_mode() == 'failures_only'
135
+
136
+ def test_get_collection_mode_default_to_all(self):
137
+ """Test that mode defaults to 'all' when no config exists."""
138
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
139
+ with patch.object(Path, 'exists', return_value=False):
140
+ assert get_collection_mode() == 'all'
141
+
142
+ def test_get_collection_mode_config_read_error(self):
143
+ """Test that mode defaults to 'all' when config cannot be read."""
144
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
145
+ with patch.object(Path, 'exists', return_value=True):
146
+ with patch('builtins.open', side_effect=IOError("Cannot read file")):
147
+ assert get_collection_mode() == 'all'
148
+
149
+ def test_get_collection_mode_yaml_not_available(self):
150
+ """Test fallback when PyYAML is not installed."""
151
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
152
+ with patch('devforgeai_cli.feedback.feature_flag.YAML_AVAILABLE', False):
153
+ with patch.object(Path, 'exists', return_value=True):
154
+ assert get_collection_mode() == 'all'
155
+
156
+
157
+ class TestShouldCollectForOperation:
158
+ """Test should_collect_for_operation() function."""
159
+
160
+ def test_should_collect_when_feedback_disabled(self):
161
+ """Test that collection is skipped when feedback is disabled."""
162
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=False):
163
+ assert should_collect_for_operation('dev', 'success') is False
164
+
165
+ def test_should_collect_mode_disabled(self):
166
+ """Test that collection is skipped when mode is 'disabled'."""
167
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
168
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='disabled'):
169
+ assert should_collect_for_operation('dev', 'success') is False
170
+
171
+ def test_should_collect_mode_failures_only_with_success(self):
172
+ """Test that collection is skipped for success when mode is 'failures_only'."""
173
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
174
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='failures_only'):
175
+ assert should_collect_for_operation('dev', 'success') is False
176
+
177
+ def test_should_collect_mode_failures_only_with_failed(self):
178
+ """Test that collection happens for failed when mode is 'failures_only'."""
179
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
180
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='failures_only'):
181
+ assert should_collect_for_operation('dev', 'failed') is True
182
+
183
+ def test_should_collect_mode_failures_only_with_partial(self):
184
+ """Test that collection happens for partial when mode is 'failures_only'."""
185
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
186
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='failures_only'):
187
+ assert should_collect_for_operation('qa', 'partial') is True
188
+
189
+ def test_should_collect_mode_all_with_success(self):
190
+ """Test that collection happens for success when mode is 'all'."""
191
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
192
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='all'):
193
+ assert should_collect_for_operation('dev', 'success') is True
194
+
195
+ def test_should_collect_mode_all_with_failed(self):
196
+ """Test that collection happens for failed when mode is 'all'."""
197
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
198
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='all'):
199
+ assert should_collect_for_operation('orchestrate', 'failed') is True
200
+
201
+ def test_should_collect_mode_unknown(self):
202
+ """Test that unknown mode defaults to collect (True)."""
203
+ with patch('devforgeai_cli.feedback.feature_flag.should_enable_feedback', return_value=True):
204
+ with patch('devforgeai_cli.feedback.feature_flag.get_collection_mode', return_value='unknown_mode'):
205
+ # Unknown mode should default to True
206
+ assert should_collect_for_operation('dev', 'success') is True
207
+
208
+
209
+ class TestTriggerRetrospectiveIfEnabled:
210
+ """Test trigger_retrospective_if_enabled() function."""
211
+
212
+ def test_trigger_retrospective_when_collection_disabled(self):
213
+ """Test that retrospective is not triggered when collection is disabled."""
214
+ with patch('devforgeai_cli.feedback.feature_flag.should_collect_for_operation', return_value=False):
215
+ result = trigger_retrospective_if_enabled('dev', 'STORY-001', 'success')
216
+ assert result is None
217
+
218
+ def test_trigger_retrospective_success(self):
219
+ """Test that retrospective is triggered and returns feedback data."""
220
+ mock_feedback = {
221
+ 'feedback_id': 'test-123',
222
+ 'story_id': 'STORY-001',
223
+ 'workflow_type': 'dev',
224
+ 'success_status': 'success'
225
+ }
226
+
227
+ with patch('devforgeai_cli.feedback.feature_flag.should_collect_for_operation', return_value=True):
228
+ with patch('devforgeai_cli.feedback.retrospective.trigger_retrospective', return_value=mock_feedback):
229
+ result = trigger_retrospective_if_enabled('dev', 'STORY-001', 'success')
230
+ assert result == mock_feedback
231
+
232
+ def test_trigger_retrospective_graceful_degradation_on_error(self):
233
+ """Test that errors during feedback collection are handled gracefully."""
234
+ with patch('devforgeai_cli.feedback.feature_flag.should_collect_for_operation', return_value=True):
235
+ with patch('devforgeai_cli.feedback.retrospective.trigger_retrospective', side_effect=Exception("Test error")):
236
+ result = trigger_retrospective_if_enabled('dev', 'STORY-001', 'success')
237
+ # Should return None on error (graceful degradation)
238
+ assert result is None
239
+
240
+ def test_trigger_retrospective_with_failures_only_mode(self):
241
+ """Test retrospective with failures_only mode for failed operation."""
242
+ mock_feedback = {
243
+ 'feedback_id': 'test-456',
244
+ 'story_id': 'STORY-002',
245
+ 'workflow_type': 'qa',
246
+ 'success_status': 'failed'
247
+ }
248
+
249
+ with patch('devforgeai_cli.feedback.feature_flag.should_collect_for_operation', return_value=True):
250
+ with patch('devforgeai_cli.feedback.retrospective.trigger_retrospective', return_value=mock_feedback):
251
+ result = trigger_retrospective_if_enabled('qa', 'STORY-002', 'failed')
252
+ assert result == mock_feedback
253
+ assert result['success_status'] == 'failed'
254
+
255
+
256
+ # Integration tests for complete feature flag workflow
257
+ class TestFeatureFlagIntegration:
258
+ """Integration tests for complete feature flag workflow."""
259
+
260
+ def test_complete_workflow_env_var_disables(self):
261
+ """Test complete workflow when environment variable disables feedback."""
262
+ with patch.dict(os.environ, {'DEVFORGEAI_DISABLE_FEEDBACK': 'true'}):
263
+ # should_enable_feedback returns False
264
+ assert should_enable_feedback() is False
265
+
266
+ # get_collection_mode returns 'disabled'
267
+ assert get_collection_mode() == 'disabled'
268
+
269
+ # should_collect_for_operation returns False
270
+ assert should_collect_for_operation('dev', 'success') is False
271
+
272
+ # trigger_retrospective_if_enabled returns None
273
+ result = trigger_retrospective_if_enabled('dev', 'STORY-001', 'success')
274
+ assert result is None
275
+
276
+ def test_complete_workflow_config_enables_all(self):
277
+ """Test complete workflow when config enables all feedback collection."""
278
+ with patch.dict(os.environ, {}, clear=True):
279
+ with patch.object(Path, 'exists', return_value=True):
280
+ with patch('builtins.open', mock_open(read_data="enable_feedback: true\nmode: all\n")):
281
+ # should_enable_feedback returns True
282
+ assert should_enable_feedback() is True
283
+
284
+ # get_collection_mode returns 'all'
285
+ assert get_collection_mode() == 'all'
286
+
287
+ # should_collect_for_operation returns True for all operations
288
+ assert should_collect_for_operation('dev', 'success') is True
289
+ assert should_collect_for_operation('qa', 'failed') is True
290
+
291
+ def test_complete_workflow_config_failures_only(self):
292
+ """Test complete workflow when config sets failures_only mode."""
293
+ with patch.dict(os.environ, {}, clear=True):
294
+ with patch.object(Path, 'exists', return_value=True):
295
+ with patch('builtins.open', mock_open(read_data="mode: failures_only\n")):
296
+ # should_enable_feedback returns True
297
+ assert should_enable_feedback() is True
298
+
299
+ # get_collection_mode returns 'failures_only'
300
+ assert get_collection_mode() == 'failures_only'
301
+
302
+ # should_collect_for_operation returns False for success
303
+ assert should_collect_for_operation('dev', 'success') is False
304
+
305
+ # should_collect_for_operation returns True for failures
306
+ assert should_collect_for_operation('qa', 'failed') is True
307
+ assert should_collect_for_operation('orchestrate', 'partial') is True