devforgeai 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAUDE.md +120 -0
- package/package.json +9 -1
- package/src/CLAUDE.md +699 -0
- package/src/claude/scripts/README.md +396 -0
- package/src/claude/scripts/audit-command-skill-overlap.sh +67 -0
- package/src/claude/scripts/check-hooks-fast.sh +70 -0
- package/src/claude/scripts/devforgeai-validate +6 -0
- package/src/claude/scripts/devforgeai_cli/README.md +531 -0
- package/src/claude/scripts/devforgeai_cli/__init__.py +12 -0
- package/src/claude/scripts/devforgeai_cli/cli.py +716 -0
- package/src/claude/scripts/devforgeai_cli/commands/__init__.py +1 -0
- package/src/claude/scripts/devforgeai_cli/commands/check_hooks.py +384 -0
- package/src/claude/scripts/devforgeai_cli/commands/invoke_hooks.py +149 -0
- package/src/claude/scripts/devforgeai_cli/commands/phase_commands.py +731 -0
- package/src/claude/scripts/devforgeai_cli/commands/validate_installation.py +412 -0
- package/src/claude/scripts/devforgeai_cli/context_extraction.py +426 -0
- package/src/claude/scripts/devforgeai_cli/feedback/AC_TO_TEST_MAPPING.md +636 -0
- package/src/claude/scripts/devforgeai_cli/feedback/DELIVERY_SUMMARY.txt +329 -0
- package/src/claude/scripts/devforgeai_cli/feedback/README_TEST_SPECS.md +486 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_IMPLEMENTATION_GUIDE.md +529 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECIFICATIONS.md +2652 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECS_INDEX.md +398 -0
- package/src/claude/scripts/devforgeai_cli/feedback/__init__.py +34 -0
- package/src/claude/scripts/devforgeai_cli/feedback/adaptive_questioning_engine.py +581 -0
- package/src/claude/scripts/devforgeai_cli/feedback/aggregation.py +179 -0
- package/src/claude/scripts/devforgeai_cli/feedback/commands.py +535 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_defaults.py +58 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_manager.py +423 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_models.py +192 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_schema.py +140 -0
- package/src/claude/scripts/devforgeai_cli/feedback/coverage.json +1 -0
- package/src/claude/scripts/devforgeai_cli/feedback/feature_flag.py +152 -0
- package/src/claude/scripts/devforgeai_cli/feedback/feedback_indexer.py +394 -0
- package/src/claude/scripts/devforgeai_cli/feedback/hot_reload.py +226 -0
- package/src/claude/scripts/devforgeai_cli/feedback/longitudinal.py +115 -0
- package/src/claude/scripts/devforgeai_cli/feedback/models.py +67 -0
- package/src/claude/scripts/devforgeai_cli/feedback/question_router.py +236 -0
- package/src/claude/scripts/devforgeai_cli/feedback/retrospective.py +233 -0
- package/src/claude/scripts/devforgeai_cli/feedback/skip_tracker.py +177 -0
- package/src/claude/scripts/devforgeai_cli/feedback/skip_tracking.py +221 -0
- package/src/claude/scripts/devforgeai_cli/feedback/template_engine.py +549 -0
- package/src/claude/scripts/devforgeai_cli/feedback/validation.py +163 -0
- package/src/claude/scripts/devforgeai_cli/headless/__init__.py +30 -0
- package/src/claude/scripts/devforgeai_cli/headless/answer_models.py +206 -0
- package/src/claude/scripts/devforgeai_cli/headless/answer_resolver.py +204 -0
- package/src/claude/scripts/devforgeai_cli/headless/exceptions.py +36 -0
- package/src/claude/scripts/devforgeai_cli/headless/pattern_matcher.py +156 -0
- package/src/claude/scripts/devforgeai_cli/hooks.py +313 -0
- package/src/claude/scripts/devforgeai_cli/metrics/__init__.py +46 -0
- package/src/claude/scripts/devforgeai_cli/metrics/command_metrics.py +142 -0
- package/src/claude/scripts/devforgeai_cli/metrics/failure_modes.py +152 -0
- package/src/claude/scripts/devforgeai_cli/metrics/story_segmentation.py +181 -0
- package/src/claude/scripts/devforgeai_cli/orchestrate_hooks.py +780 -0
- package/src/claude/scripts/devforgeai_cli/phase_state.py +1229 -0
- package/src/claude/scripts/devforgeai_cli/session/__init__.py +30 -0
- package/src/claude/scripts/devforgeai_cli/session/checkpoint.py +268 -0
- package/src/claude/scripts/devforgeai_cli/tests/__init__.py +1 -0
- package/src/claude/scripts/devforgeai_cli/tests/conftest.py +29 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/TEST_EXECUTION_GUIDE.md +298 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/__init__.py +3 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_adaptive_questioning_engine.py +2171 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_aggregation.py +476 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_defaults.py +133 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_manager.py +592 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_models.py +373 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_schema.py +130 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_configuration_management.py +1355 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_edge_cases.py +308 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feature_flag.py +307 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feedback_indexer.py +384 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_hot_reload.py +580 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_integration.py +402 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_models.py +105 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_question_routing.py +262 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_retrospective.py +333 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracker.py +410 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking.py +159 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking_integration.py +1155 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_template_engine.py +1389 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_validation_comprehensive.py +210 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/autonomous-deferral-story.md +46 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/missing-impl-notes.md +31 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-deferral-story.md +46 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-story-complete.md +48 -0
- package/src/claude/scripts/devforgeai_cli/tests/manual_test_invoke_hooks.sh +200 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/DELIVERABLES.md +518 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/TEST_SUMMARY.md +468 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/__init__.py +6 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/corrupted-checkpoint.json +1 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/missing-fields-checkpoint.json +4 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/valid-checkpoint.json +15 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/test_checkpoint.py +851 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_check_hooks.py +1886 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_depends_on_normalizer.py +171 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_dod_validator.py +97 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_invoke_hooks.py +1902 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands.py +320 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_error_handling.py +1021 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_import.py +697 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_state.py +2187 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking.py +2141 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking_coverage_gap.py +195 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_subagent_enforcement.py +539 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_validate_installation.py +361 -0
- package/src/claude/scripts/devforgeai_cli/utils/__init__.py +11 -0
- package/src/claude/scripts/devforgeai_cli/utils/depends_on_normalizer.py +149 -0
- package/src/claude/scripts/devforgeai_cli/utils/markdown_parser.py +219 -0
- package/src/claude/scripts/devforgeai_cli/utils/story_analyzer.py +249 -0
- package/src/claude/scripts/devforgeai_cli/utils/yaml_parser.py +152 -0
- package/src/claude/scripts/devforgeai_cli/validators/__init__.py +27 -0
- package/src/claude/scripts/devforgeai_cli/validators/ast_grep_validator.py +373 -0
- package/src/claude/scripts/devforgeai_cli/validators/context_validator.py +180 -0
- package/src/claude/scripts/devforgeai_cli/validators/dod_validator.py +309 -0
- package/src/claude/scripts/devforgeai_cli/validators/git_validator.py +107 -0
- package/src/claude/scripts/devforgeai_cli/validators/grep_fallback.py +300 -0
- package/src/claude/scripts/install_hooks.sh +186 -0
- package/src/claude/scripts/invoke_feedback_hooks.sh +59 -0
- package/src/claude/scripts/migrate-ac-headers.sh +122 -0
- package/src/claude/scripts/plan_file_kb.sh +704 -0
- package/src/claude/scripts/requirements.txt +8 -0
- package/src/claude/scripts/session_catalog.sh +543 -0
- package/src/claude/scripts/setup.py +55 -0
- package/src/claude/scripts/start-devforgeai.sh +16 -0
- package/src/claude/scripts/statusline.sh +27 -0
- package/src/claude/scripts/validate_deferrals.py +344 -0
- package/src/claude/skills/devforgeai-qa/SKILL.md +1 -1
- package/src/claude/skills/researching-market/SKILL.md +2 -1
- package/src/cli/lib/copier.js +13 -1
- package/src/claude/skills/designing-systems/scripts/__pycache__/detect_anti_patterns.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_all_context.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_architecture.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_dependencies.cpython-312.pyc +0 -0
- package/src/claude/skills/devforgeai-story-creation/scripts/__pycache__/migrate_story_v1_to_v2.cpython-312.pyc +0 -0
- package/src/claude/skills/devforgeai-story-creation/scripts/tests/__pycache__/measure_accuracy.cpython-312.pyc +0 -0
|
@@ -0,0 +1,1355 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Comprehensive test suite for STORY-011: Configuration Management
|
|
3
|
+
|
|
4
|
+
This test suite validates the YAML-based feedback system configuration including:
|
|
5
|
+
- YAML parsing and validation
|
|
6
|
+
- Master enable/disable controls
|
|
7
|
+
- Trigger mode determination
|
|
8
|
+
- Conversation settings enforcement
|
|
9
|
+
- Skip tracking statistics
|
|
10
|
+
- Template preferences
|
|
11
|
+
- Error handling and defaults
|
|
12
|
+
- Hot-reload functionality
|
|
13
|
+
- Edge case handling
|
|
14
|
+
|
|
15
|
+
Test Framework: pytest
|
|
16
|
+
Pattern: AAA (Arrange, Act, Assert)
|
|
17
|
+
Coverage Target: >95% of business logic
|
|
18
|
+
|
|
19
|
+
Tests are written to FAIL initially (TDD Red phase).
|
|
20
|
+
Implementation should make these tests PASS (TDD Green phase).
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import json
|
|
24
|
+
import os
|
|
25
|
+
import pytest
|
|
26
|
+
import tempfile
|
|
27
|
+
import threading
|
|
28
|
+
import time
|
|
29
|
+
from datetime import datetime, timedelta
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Dict, List, Optional
|
|
32
|
+
from unittest.mock import Mock, MagicMock, patch, call
|
|
33
|
+
from dataclasses import dataclass, asdict
|
|
34
|
+
from enum import Enum
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# ============================================================================
|
|
38
|
+
# ENUMS AND DATA MODELS (from Technical Specification)
|
|
39
|
+
# ============================================================================
|
|
40
|
+
|
|
41
|
+
class TriggerMode(Enum):
|
|
42
|
+
"""Trigger modes for feedback collection."""
|
|
43
|
+
ALWAYS = "always"
|
|
44
|
+
FAILURES_ONLY = "failures-only"
|
|
45
|
+
SPECIFIC_OPS = "specific-operations"
|
|
46
|
+
NEVER = "never"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class TemplateFormat(Enum):
|
|
50
|
+
"""Template format options for feedback collection."""
|
|
51
|
+
STRUCTURED = "structured"
|
|
52
|
+
FREE_TEXT = "free-text"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class TemplateTone(Enum):
|
|
56
|
+
"""Template tone options for feedback questions."""
|
|
57
|
+
BRIEF = "brief"
|
|
58
|
+
DETAILED = "detailed"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class ConversationSettings:
|
|
63
|
+
"""Conversation-level settings for feedback collection."""
|
|
64
|
+
max_questions: int = 5
|
|
65
|
+
allow_skip: bool = True
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@dataclass
|
|
69
|
+
class SkipTrackingSettings:
|
|
70
|
+
"""Skip tracking configuration."""
|
|
71
|
+
enabled: bool = True
|
|
72
|
+
max_consecutive_skips: int = 3
|
|
73
|
+
reset_on_positive: bool = True
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class TemplateSettings:
|
|
78
|
+
"""Template preferences for feedback collection."""
|
|
79
|
+
format: str = "structured" # structured|free-text
|
|
80
|
+
tone: str = "brief" # brief|detailed
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@dataclass
|
|
84
|
+
class FeedbackConfiguration:
|
|
85
|
+
"""Complete feedback system configuration."""
|
|
86
|
+
enabled: bool = True
|
|
87
|
+
trigger_mode: str = "failures-only"
|
|
88
|
+
operations: Optional[List[str]] = None
|
|
89
|
+
conversation_settings: ConversationSettings = None
|
|
90
|
+
skip_tracking: SkipTrackingSettings = None
|
|
91
|
+
templates: TemplateSettings = None
|
|
92
|
+
|
|
93
|
+
def __post_init__(self):
|
|
94
|
+
"""Initialize nested objects with defaults and convert dicts to objects."""
|
|
95
|
+
# Convert dict to dataclass objects if needed
|
|
96
|
+
if isinstance(self.conversation_settings, dict):
|
|
97
|
+
self.conversation_settings = ConversationSettings(**self.conversation_settings)
|
|
98
|
+
elif self.conversation_settings is None:
|
|
99
|
+
self.conversation_settings = ConversationSettings()
|
|
100
|
+
|
|
101
|
+
if isinstance(self.skip_tracking, dict):
|
|
102
|
+
self.skip_tracking = SkipTrackingSettings(**self.skip_tracking)
|
|
103
|
+
elif self.skip_tracking is None:
|
|
104
|
+
self.skip_tracking = SkipTrackingSettings()
|
|
105
|
+
|
|
106
|
+
if isinstance(self.templates, dict):
|
|
107
|
+
self.templates = TemplateSettings(**self.templates)
|
|
108
|
+
elif self.templates is None:
|
|
109
|
+
self.templates = TemplateSettings()
|
|
110
|
+
|
|
111
|
+
# Validate trigger_mode
|
|
112
|
+
valid_modes = ["always", "failures-only", "specific-operations", "never"]
|
|
113
|
+
if self.trigger_mode not in valid_modes:
|
|
114
|
+
raise ValueError(
|
|
115
|
+
f"Invalid trigger_mode value: '{self.trigger_mode}'. "
|
|
116
|
+
f"Must be one of: {', '.join(valid_modes)}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
# ============================================================================
|
|
121
|
+
# FIXTURES
|
|
122
|
+
# ============================================================================
|
|
123
|
+
|
|
124
|
+
@pytest.fixture
|
|
125
|
+
def temp_config_dir():
|
|
126
|
+
"""Create temporary directory for configuration files."""
|
|
127
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
128
|
+
config_dir = Path(tmpdir)
|
|
129
|
+
yield config_dir
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
@pytest.fixture
|
|
133
|
+
def config_file(temp_config_dir):
|
|
134
|
+
"""Create path to feedback.yaml configuration file."""
|
|
135
|
+
return temp_config_dir / "feedback.yaml"
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
@pytest.fixture
|
|
139
|
+
def logs_dir(temp_config_dir):
|
|
140
|
+
"""Create temporary logs directory."""
|
|
141
|
+
logs_dir = temp_config_dir / "logs"
|
|
142
|
+
logs_dir.mkdir(exist_ok=True)
|
|
143
|
+
return logs_dir
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@pytest.fixture
|
|
147
|
+
def valid_config_dict():
|
|
148
|
+
"""Provide valid configuration dictionary."""
|
|
149
|
+
return {
|
|
150
|
+
"enabled": True,
|
|
151
|
+
"trigger_mode": "failures-only",
|
|
152
|
+
"operations": ["qa", "deployment"],
|
|
153
|
+
"conversation_settings": {
|
|
154
|
+
"max_questions": 5,
|
|
155
|
+
"allow_skip": True
|
|
156
|
+
},
|
|
157
|
+
"skip_tracking": {
|
|
158
|
+
"enabled": True,
|
|
159
|
+
"max_consecutive_skips": 3,
|
|
160
|
+
"reset_on_positive": True
|
|
161
|
+
},
|
|
162
|
+
"templates": {
|
|
163
|
+
"format": "structured",
|
|
164
|
+
"tone": "brief"
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
@pytest.fixture
|
|
170
|
+
def config_manager(temp_config_dir, logs_dir):
|
|
171
|
+
"""Provide configuration manager instance (mock)."""
|
|
172
|
+
# This will be used by test to verify behavior
|
|
173
|
+
manager = MagicMock()
|
|
174
|
+
manager.config_dir = temp_config_dir
|
|
175
|
+
manager.logs_dir = logs_dir
|
|
176
|
+
return manager
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
@pytest.fixture
|
|
180
|
+
def mock_file_watcher():
|
|
181
|
+
"""Provide mock file watcher for hot-reload testing."""
|
|
182
|
+
watcher = MagicMock()
|
|
183
|
+
watcher.start = MagicMock()
|
|
184
|
+
watcher.stop = MagicMock()
|
|
185
|
+
watcher.is_running = MagicMock(return_value=True)
|
|
186
|
+
return watcher
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@pytest.fixture
|
|
190
|
+
def default_config():
|
|
191
|
+
"""Provide default configuration."""
|
|
192
|
+
return FeedbackConfiguration(
|
|
193
|
+
enabled=True,
|
|
194
|
+
trigger_mode="failures-only",
|
|
195
|
+
operations=None,
|
|
196
|
+
conversation_settings=ConversationSettings(max_questions=5, allow_skip=True),
|
|
197
|
+
skip_tracking=SkipTrackingSettings(
|
|
198
|
+
enabled=True,
|
|
199
|
+
max_consecutive_skips=3,
|
|
200
|
+
reset_on_positive=True
|
|
201
|
+
),
|
|
202
|
+
templates=TemplateSettings(format="structured", tone="brief")
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# ============================================================================
|
|
207
|
+
# UNIT TESTS: YAML PARSING AND VALIDATION
|
|
208
|
+
# ============================================================================
|
|
209
|
+
|
|
210
|
+
class TestYamlParsing:
|
|
211
|
+
"""Test suite for YAML configuration file parsing."""
|
|
212
|
+
|
|
213
|
+
def test_valid_yaml_structure_parses_successfully(self, config_file, valid_config_dict):
|
|
214
|
+
"""AC1: Valid YAML structure is parsed successfully.
|
|
215
|
+
|
|
216
|
+
Given: config_file exists with valid YAML structure
|
|
217
|
+
When: Configuration is loaded
|
|
218
|
+
Then: All sections are accessible and no errors logged
|
|
219
|
+
"""
|
|
220
|
+
# Arrange
|
|
221
|
+
import yaml
|
|
222
|
+
with open(config_file, 'w') as f:
|
|
223
|
+
yaml.dump(valid_config_dict, f)
|
|
224
|
+
|
|
225
|
+
# Act
|
|
226
|
+
with open(config_file, 'r') as f:
|
|
227
|
+
loaded_config = yaml.safe_load(f)
|
|
228
|
+
|
|
229
|
+
# Assert
|
|
230
|
+
assert loaded_config is not None
|
|
231
|
+
assert loaded_config["enabled"] is True
|
|
232
|
+
assert loaded_config["trigger_mode"] == "failures-only"
|
|
233
|
+
assert "conversation_settings" in loaded_config
|
|
234
|
+
assert "skip_tracking" in loaded_config
|
|
235
|
+
assert "templates" in loaded_config
|
|
236
|
+
|
|
237
|
+
def test_yaml_parsing_preserves_all_sections(self, config_file, valid_config_dict):
|
|
238
|
+
"""All configuration sections are preserved during parsing."""
|
|
239
|
+
# Arrange
|
|
240
|
+
import yaml
|
|
241
|
+
with open(config_file, 'w') as f:
|
|
242
|
+
yaml.dump(valid_config_dict, f)
|
|
243
|
+
|
|
244
|
+
# Act
|
|
245
|
+
with open(config_file, 'r') as f:
|
|
246
|
+
loaded = yaml.safe_load(f)
|
|
247
|
+
|
|
248
|
+
# Assert
|
|
249
|
+
assert loaded["conversation_settings"]["max_questions"] == 5
|
|
250
|
+
assert loaded["skip_tracking"]["max_consecutive_skips"] == 3
|
|
251
|
+
assert loaded["templates"]["format"] == "structured"
|
|
252
|
+
|
|
253
|
+
def test_yaml_with_invalid_syntax_raises_error(self, config_file):
|
|
254
|
+
"""Invalid YAML syntax is detected and reported."""
|
|
255
|
+
# Arrange
|
|
256
|
+
invalid_yaml = """
|
|
257
|
+
enabled: true
|
|
258
|
+
trigger_mode: failures-only
|
|
259
|
+
invalid: [unclosed bracket
|
|
260
|
+
"""
|
|
261
|
+
with open(config_file, 'w') as f:
|
|
262
|
+
f.write(invalid_yaml)
|
|
263
|
+
|
|
264
|
+
# Act & Assert
|
|
265
|
+
import yaml
|
|
266
|
+
with pytest.raises(yaml.YAMLError):
|
|
267
|
+
with open(config_file, 'r') as f:
|
|
268
|
+
yaml.safe_load(f)
|
|
269
|
+
|
|
270
|
+
def test_empty_yaml_file_handled(self, config_file):
|
|
271
|
+
"""Empty YAML file returns None or empty dict."""
|
|
272
|
+
# Arrange
|
|
273
|
+
with open(config_file, 'w') as f:
|
|
274
|
+
f.write("")
|
|
275
|
+
|
|
276
|
+
# Act
|
|
277
|
+
import yaml
|
|
278
|
+
with open(config_file, 'r') as f:
|
|
279
|
+
result = yaml.safe_load(f)
|
|
280
|
+
|
|
281
|
+
# Assert
|
|
282
|
+
assert result is None or result == {}
|
|
283
|
+
|
|
284
|
+
def test_yaml_comments_ignored(self, config_file, valid_config_dict):
|
|
285
|
+
"""YAML comments are properly ignored during parsing."""
|
|
286
|
+
# Arrange
|
|
287
|
+
yaml_with_comments = """# Configuration for feedback system
|
|
288
|
+
# Version: 1.0
|
|
289
|
+
enabled: true # Master enable switch
|
|
290
|
+
trigger_mode: failures-only # Only collect on failures
|
|
291
|
+
"""
|
|
292
|
+
with open(config_file, 'w') as f:
|
|
293
|
+
f.write(yaml_with_comments)
|
|
294
|
+
|
|
295
|
+
# Act
|
|
296
|
+
import yaml
|
|
297
|
+
with open(config_file, 'r') as f:
|
|
298
|
+
loaded = yaml.safe_load(f)
|
|
299
|
+
|
|
300
|
+
# Assert
|
|
301
|
+
assert loaded["enabled"] is True
|
|
302
|
+
assert loaded["trigger_mode"] == "failures-only"
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
class TestConfigurationValidation:
|
|
306
|
+
"""Test suite for configuration validation."""
|
|
307
|
+
|
|
308
|
+
def test_valid_trigger_mode_always_accepted(self, valid_config_dict):
|
|
309
|
+
"""trigger_mode: always is valid."""
|
|
310
|
+
# Arrange
|
|
311
|
+
valid_config_dict["trigger_mode"] = "always"
|
|
312
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
313
|
+
|
|
314
|
+
# Act & Assert
|
|
315
|
+
assert config.trigger_mode == "always"
|
|
316
|
+
|
|
317
|
+
def test_valid_trigger_mode_failures_only_accepted(self, valid_config_dict):
|
|
318
|
+
"""trigger_mode: failures-only is valid."""
|
|
319
|
+
# Arrange
|
|
320
|
+
valid_config_dict["trigger_mode"] = "failures-only"
|
|
321
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
322
|
+
|
|
323
|
+
# Act & Assert
|
|
324
|
+
assert config.trigger_mode == "failures-only"
|
|
325
|
+
|
|
326
|
+
def test_valid_trigger_mode_specific_operations_accepted(self, valid_config_dict):
|
|
327
|
+
"""trigger_mode: specific-operations is valid."""
|
|
328
|
+
# Arrange
|
|
329
|
+
valid_config_dict["trigger_mode"] = "specific-operations"
|
|
330
|
+
valid_config_dict["operations"] = ["qa", "deployment"]
|
|
331
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
332
|
+
|
|
333
|
+
# Act & Assert
|
|
334
|
+
assert config.trigger_mode == "specific-operations"
|
|
335
|
+
assert config.operations == ["qa", "deployment"]
|
|
336
|
+
|
|
337
|
+
def test_valid_trigger_mode_never_accepted(self, valid_config_dict):
|
|
338
|
+
"""trigger_mode: never is valid."""
|
|
339
|
+
# Arrange
|
|
340
|
+
valid_config_dict["trigger_mode"] = "never"
|
|
341
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
342
|
+
|
|
343
|
+
# Act & Assert
|
|
344
|
+
assert config.trigger_mode == "never"
|
|
345
|
+
|
|
346
|
+
def test_invalid_trigger_mode_rejected(self, valid_config_dict):
|
|
347
|
+
"""AC7: Invalid trigger_mode is rejected with clear error message.
|
|
348
|
+
|
|
349
|
+
Given: Configuration has trigger_mode: invalid-mode
|
|
350
|
+
When: Configuration is validated
|
|
351
|
+
Then: Error raised with message referencing documentation
|
|
352
|
+
"""
|
|
353
|
+
# Arrange
|
|
354
|
+
valid_config_dict["trigger_mode"] = "invalid-mode"
|
|
355
|
+
|
|
356
|
+
# Act & Assert
|
|
357
|
+
with pytest.raises(ValueError) as exc_info:
|
|
358
|
+
# Validation should happen during configuration parsing
|
|
359
|
+
valid_trigger_modes = ["always", "failures-only", "specific-operations", "never"]
|
|
360
|
+
if valid_config_dict["trigger_mode"] not in valid_trigger_modes:
|
|
361
|
+
raise ValueError(
|
|
362
|
+
f"Invalid trigger_mode value: '{valid_config_dict['trigger_mode']}'. "
|
|
363
|
+
f"Must be one of: {', '.join(valid_trigger_modes)}"
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
assert "Invalid trigger_mode value" in str(exc_info.value)
|
|
367
|
+
|
|
368
|
+
def test_max_questions_zero_means_unlimited(self, valid_config_dict):
|
|
369
|
+
"""max_questions: 0 means unlimited."""
|
|
370
|
+
# Arrange
|
|
371
|
+
valid_config_dict["conversation_settings"]["max_questions"] = 0
|
|
372
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
373
|
+
|
|
374
|
+
# Act & Assert
|
|
375
|
+
assert config.conversation_settings.max_questions == 0
|
|
376
|
+
|
|
377
|
+
def test_max_questions_accepts_large_values(self, valid_config_dict):
|
|
378
|
+
"""max_questions accepts extremely large values."""
|
|
379
|
+
# Arrange
|
|
380
|
+
valid_config_dict["conversation_settings"]["max_questions"] = 1000000
|
|
381
|
+
|
|
382
|
+
# Act
|
|
383
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
384
|
+
|
|
385
|
+
# Assert
|
|
386
|
+
assert config.conversation_settings.max_questions == 1000000
|
|
387
|
+
|
|
388
|
+
def test_max_consecutive_skips_zero_means_no_limit(self, valid_config_dict):
|
|
389
|
+
"""max_consecutive_skips: 0 means no limit."""
|
|
390
|
+
# Arrange
|
|
391
|
+
valid_config_dict["skip_tracking"]["max_consecutive_skips"] = 0
|
|
392
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
393
|
+
|
|
394
|
+
# Act & Assert
|
|
395
|
+
assert config.skip_tracking.max_consecutive_skips == 0
|
|
396
|
+
|
|
397
|
+
def test_template_format_structured_valid(self, valid_config_dict):
|
|
398
|
+
"""templates.format: structured is valid."""
|
|
399
|
+
# Arrange
|
|
400
|
+
valid_config_dict["templates"]["format"] = "structured"
|
|
401
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
402
|
+
|
|
403
|
+
# Act & Assert
|
|
404
|
+
assert config.templates.format == "structured"
|
|
405
|
+
|
|
406
|
+
def test_template_format_free_text_valid(self, valid_config_dict):
|
|
407
|
+
"""templates.format: free-text is valid."""
|
|
408
|
+
# Arrange
|
|
409
|
+
valid_config_dict["templates"]["format"] = "free-text"
|
|
410
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
411
|
+
|
|
412
|
+
# Act & Assert
|
|
413
|
+
assert config.templates.format == "free-text"
|
|
414
|
+
|
|
415
|
+
def test_template_tone_brief_valid(self, valid_config_dict):
|
|
416
|
+
"""templates.tone: brief is valid."""
|
|
417
|
+
# Arrange
|
|
418
|
+
valid_config_dict["templates"]["tone"] = "brief"
|
|
419
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
420
|
+
|
|
421
|
+
# Act & Assert
|
|
422
|
+
assert config.templates.tone == "brief"
|
|
423
|
+
|
|
424
|
+
def test_template_tone_detailed_valid(self, valid_config_dict):
|
|
425
|
+
"""templates.tone: detailed is valid."""
|
|
426
|
+
# Arrange
|
|
427
|
+
valid_config_dict["templates"]["tone"] = "detailed"
|
|
428
|
+
config = FeedbackConfiguration(**valid_config_dict)
|
|
429
|
+
|
|
430
|
+
# Act & Assert
|
|
431
|
+
assert config.templates.tone == "detailed"
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
# ============================================================================
|
|
435
|
+
# UNIT TESTS: DEFAULT MERGING
|
|
436
|
+
# ============================================================================
|
|
437
|
+
|
|
438
|
+
class TestDefaultMerging:
|
|
439
|
+
"""Test suite for default configuration merging."""
|
|
440
|
+
|
|
441
|
+
def test_missing_config_file_uses_defaults(self):
|
|
442
|
+
"""AC8: Missing config file uses sensible defaults.
|
|
443
|
+
|
|
444
|
+
Given: devforgeai/config/feedback.yaml does not exist
|
|
445
|
+
When: Feedback system initializes
|
|
446
|
+
Then: Default configuration is used (enabled: true, trigger_mode: failures-only)
|
|
447
|
+
"""
|
|
448
|
+
# Arrange
|
|
449
|
+
config_file = Path("/nonexistent/path/feedback.yaml")
|
|
450
|
+
|
|
451
|
+
# Act
|
|
452
|
+
if not config_file.exists():
|
|
453
|
+
config = FeedbackConfiguration() # Use defaults
|
|
454
|
+
|
|
455
|
+
# Assert
|
|
456
|
+
assert config.enabled is True
|
|
457
|
+
assert config.trigger_mode == "failures-only"
|
|
458
|
+
assert config.conversation_settings.max_questions == 5
|
|
459
|
+
assert config.conversation_settings.allow_skip is True
|
|
460
|
+
|
|
461
|
+
def test_partial_config_merged_with_defaults(self):
|
|
462
|
+
"""Partial configuration is merged with defaults."""
|
|
463
|
+
# Arrange
|
|
464
|
+
partial_config = {
|
|
465
|
+
"enabled": False
|
|
466
|
+
# All other fields missing
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
# Act
|
|
470
|
+
# Deep merge: user values override defaults
|
|
471
|
+
defaults = asdict(FeedbackConfiguration())
|
|
472
|
+
merged = {**defaults, **partial_config}
|
|
473
|
+
|
|
474
|
+
# Assert
|
|
475
|
+
assert merged["enabled"] is False # User value
|
|
476
|
+
assert merged["trigger_mode"] == "failures-only" # Default
|
|
477
|
+
assert merged["conversation_settings"]["max_questions"] == 5 # Default
|
|
478
|
+
|
|
479
|
+
def test_empty_nested_objects_filled_with_defaults(self):
|
|
480
|
+
"""Empty nested objects are filled with defaults."""
|
|
481
|
+
# Arrange
|
|
482
|
+
config_dict = {
|
|
483
|
+
"enabled": True,
|
|
484
|
+
"trigger_mode": "always"
|
|
485
|
+
# conversation_settings missing
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
# Act
|
|
489
|
+
config = FeedbackConfiguration(
|
|
490
|
+
**config_dict,
|
|
491
|
+
conversation_settings=ConversationSettings(),
|
|
492
|
+
skip_tracking=SkipTrackingSettings(),
|
|
493
|
+
templates=TemplateSettings()
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
# Assert
|
|
497
|
+
assert config.conversation_settings.max_questions == 5
|
|
498
|
+
assert config.skip_tracking.enabled is True
|
|
499
|
+
assert config.templates.format == "structured"
|
|
500
|
+
|
|
501
|
+
def test_operations_field_conditional_on_trigger_mode(self):
|
|
502
|
+
"""operations field required only if trigger_mode: specific-operations."""
|
|
503
|
+
# Arrange
|
|
504
|
+
config_dict = {
|
|
505
|
+
"enabled": True,
|
|
506
|
+
"trigger_mode": "always"
|
|
507
|
+
# operations not required, should be None
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
# Act
|
|
511
|
+
config = FeedbackConfiguration(**config_dict)
|
|
512
|
+
|
|
513
|
+
# Assert
|
|
514
|
+
assert config.operations is None
|
|
515
|
+
|
|
516
|
+
def test_operations_field_required_for_specific_operations_mode(self):
|
|
517
|
+
"""operations field must be provided for specific-operations mode."""
|
|
518
|
+
# Arrange
|
|
519
|
+
config_dict = {
|
|
520
|
+
"enabled": True,
|
|
521
|
+
"trigger_mode": "specific-operations",
|
|
522
|
+
"operations": ["qa", "deployment"]
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
# Act
|
|
526
|
+
config = FeedbackConfiguration(**config_dict)
|
|
527
|
+
|
|
528
|
+
# Assert
|
|
529
|
+
assert config.operations == ["qa", "deployment"]
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
# ============================================================================
|
|
533
|
+
# UNIT TESTS: MASTER ENABLE/DISABLE
|
|
534
|
+
# ============================================================================
|
|
535
|
+
|
|
536
|
+
class TestMasterEnableDisable:
|
|
537
|
+
"""Test suite for master enable/disable control (AC2)."""
|
|
538
|
+
|
|
539
|
+
def test_enabled_true_allows_feedback_collection(self):
|
|
540
|
+
"""enabled: true allows feedback collection."""
|
|
541
|
+
# Arrange
|
|
542
|
+
config = FeedbackConfiguration(enabled=True)
|
|
543
|
+
|
|
544
|
+
# Act & Assert
|
|
545
|
+
assert config.enabled is True
|
|
546
|
+
|
|
547
|
+
def test_enabled_false_blocks_feedback_collection(self):
|
|
548
|
+
"""AC2: enabled: false blocks all feedback operations.
|
|
549
|
+
|
|
550
|
+
Given: Configuration has enabled: false
|
|
551
|
+
When: Any skill attempts to trigger feedback
|
|
552
|
+
Then: No feedback is collected and workflow continues
|
|
553
|
+
"""
|
|
554
|
+
# Arrange
|
|
555
|
+
config = FeedbackConfiguration(enabled=False)
|
|
556
|
+
|
|
557
|
+
# Act & Assert
|
|
558
|
+
assert config.enabled is False
|
|
559
|
+
|
|
560
|
+
def test_disabled_ignores_trigger_mode(self):
|
|
561
|
+
"""When enabled: false, trigger_mode is ignored."""
|
|
562
|
+
# Arrange
|
|
563
|
+
config = FeedbackConfiguration(
|
|
564
|
+
enabled=False,
|
|
565
|
+
trigger_mode="always" # Even though set to always
|
|
566
|
+
)
|
|
567
|
+
|
|
568
|
+
# Act & Assert
|
|
569
|
+
# Master switch takes precedence
|
|
570
|
+
assert config.enabled is False
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
# ============================================================================
|
|
574
|
+
# UNIT TESTS: TRIGGER MODES
|
|
575
|
+
# ============================================================================
|
|
576
|
+
|
|
577
|
+
class TestTriggerModes:
|
|
578
|
+
"""Test suite for trigger mode determination (AC3)."""
|
|
579
|
+
|
|
580
|
+
def test_trigger_mode_always_triggers_unconditionally(self):
|
|
581
|
+
"""AC3: trigger_mode: always triggers unconditionally.
|
|
582
|
+
|
|
583
|
+
Given: trigger_mode: always
|
|
584
|
+
When: Any skill completes a phase
|
|
585
|
+
Then: Feedback is collected
|
|
586
|
+
"""
|
|
587
|
+
# Arrange
|
|
588
|
+
config = FeedbackConfiguration(
|
|
589
|
+
enabled=True,
|
|
590
|
+
trigger_mode="always"
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
# Act & Assert
|
|
594
|
+
assert config.trigger_mode == "always"
|
|
595
|
+
assert config.enabled is True
|
|
596
|
+
|
|
597
|
+
def test_trigger_mode_failures_only_blocks_on_success(self):
|
|
598
|
+
"""AC3: trigger_mode: failures-only does not collect on success.
|
|
599
|
+
|
|
600
|
+
Given: trigger_mode: failures-only
|
|
601
|
+
When: Skill phase completes successfully
|
|
602
|
+
Then: No feedback is collected
|
|
603
|
+
"""
|
|
604
|
+
# Arrange
|
|
605
|
+
config = FeedbackConfiguration(
|
|
606
|
+
enabled=True,
|
|
607
|
+
trigger_mode="failures-only"
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
# Act & Assert
|
|
611
|
+
assert config.trigger_mode == "failures-only"
|
|
612
|
+
|
|
613
|
+
def test_trigger_mode_failures_only_triggers_on_failure(self):
|
|
614
|
+
"""AC3: trigger_mode: failures-only collects on failure.
|
|
615
|
+
|
|
616
|
+
Given: trigger_mode: failures-only
|
|
617
|
+
When: Skill phase fails
|
|
618
|
+
Then: Feedback is collected automatically
|
|
619
|
+
"""
|
|
620
|
+
# Arrange
|
|
621
|
+
config = FeedbackConfiguration(
|
|
622
|
+
enabled=True,
|
|
623
|
+
trigger_mode="failures-only"
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
# Act & Assert
|
|
627
|
+
assert config.trigger_mode == "failures-only"
|
|
628
|
+
|
|
629
|
+
def test_trigger_mode_specific_operations_filters_by_operation(self):
|
|
630
|
+
"""AC3: trigger_mode: specific-operations filters by operation.
|
|
631
|
+
|
|
632
|
+
Given: trigger_mode: specific-operations with operations: [qa, deployment]
|
|
633
|
+
When: QA or deployment operation completes
|
|
634
|
+
Then: Feedback is collected
|
|
635
|
+
|
|
636
|
+
When: Other operation completes
|
|
637
|
+
Then: Feedback is not collected
|
|
638
|
+
"""
|
|
639
|
+
# Arrange
|
|
640
|
+
config = FeedbackConfiguration(
|
|
641
|
+
enabled=True,
|
|
642
|
+
trigger_mode="specific-operations",
|
|
643
|
+
operations=["qa", "deployment"]
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
# Act & Assert
|
|
647
|
+
assert config.trigger_mode == "specific-operations"
|
|
648
|
+
assert "qa" in config.operations
|
|
649
|
+
assert "deployment" in config.operations
|
|
650
|
+
|
|
651
|
+
def test_trigger_mode_never_blocks_all_feedback(self):
|
|
652
|
+
"""AC3: trigger_mode: never blocks all feedback collection.
|
|
653
|
+
|
|
654
|
+
Given: trigger_mode: never
|
|
655
|
+
When: Any skill operation completes
|
|
656
|
+
Then: Feedback is never collected
|
|
657
|
+
"""
|
|
658
|
+
# Arrange
|
|
659
|
+
config = FeedbackConfiguration(
|
|
660
|
+
enabled=True,
|
|
661
|
+
trigger_mode="never"
|
|
662
|
+
)
|
|
663
|
+
|
|
664
|
+
# Act & Assert
|
|
665
|
+
assert config.trigger_mode == "never"
|
|
666
|
+
|
|
667
|
+
|
|
668
|
+
# ============================================================================
|
|
669
|
+
# UNIT TESTS: CONVERSATION SETTINGS
|
|
670
|
+
# ============================================================================
|
|
671
|
+
|
|
672
|
+
class TestConversationSettings:
|
|
673
|
+
"""Test suite for conversation settings enforcement (AC4)."""
|
|
674
|
+
|
|
675
|
+
def test_max_questions_limit_enforced(self):
|
|
676
|
+
"""AC4: Conversation enforces max_questions limit.
|
|
677
|
+
|
|
678
|
+
Given: max_questions: 3 is configured
|
|
679
|
+
When: User answers 3 feedback questions
|
|
680
|
+
Then: No additional questions are shown
|
|
681
|
+
"""
|
|
682
|
+
# Arrange
|
|
683
|
+
config = FeedbackConfiguration(
|
|
684
|
+
conversation_settings=ConversationSettings(max_questions=3)
|
|
685
|
+
)
|
|
686
|
+
questions_answered = 3
|
|
687
|
+
|
|
688
|
+
# Act
|
|
689
|
+
# Feedback should be silently discarded
|
|
690
|
+
should_ask = questions_answered < config.conversation_settings.max_questions
|
|
691
|
+
|
|
692
|
+
# Assert
|
|
693
|
+
assert should_ask is False # Should NOT ask
|
|
694
|
+
|
|
695
|
+
def test_max_questions_zero_means_unlimited(self):
|
|
696
|
+
"""max_questions: 0 means no limit."""
|
|
697
|
+
# Arrange
|
|
698
|
+
config = FeedbackConfiguration(
|
|
699
|
+
conversation_settings=ConversationSettings(max_questions=0)
|
|
700
|
+
)
|
|
701
|
+
questions_answered = 1000
|
|
702
|
+
|
|
703
|
+
# Act
|
|
704
|
+
# No limit, should keep asking
|
|
705
|
+
should_ask = (config.conversation_settings.max_questions == 0 or
|
|
706
|
+
questions_answered < config.conversation_settings.max_questions)
|
|
707
|
+
|
|
708
|
+
# Assert
|
|
709
|
+
assert should_ask is True
|
|
710
|
+
|
|
711
|
+
def test_allow_skip_true_shows_skip_option(self):
|
|
712
|
+
"""AC4: allow_skip: true shows skip option.
|
|
713
|
+
|
|
714
|
+
Given: allow_skip: true
|
|
715
|
+
When: AskUserQuestion displayed
|
|
716
|
+
Then: Skip option is available
|
|
717
|
+
"""
|
|
718
|
+
# Arrange
|
|
719
|
+
config = FeedbackConfiguration(
|
|
720
|
+
conversation_settings=ConversationSettings(allow_skip=True)
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
# Act & Assert
|
|
724
|
+
assert config.conversation_settings.allow_skip is True
|
|
725
|
+
|
|
726
|
+
def test_allow_skip_false_hides_skip_option(self):
|
|
727
|
+
"""AC4: allow_skip: false hides skip option.
|
|
728
|
+
|
|
729
|
+
Given: allow_skip: false
|
|
730
|
+
When: AskUserQuestion displayed
|
|
731
|
+
Then: No skip option available
|
|
732
|
+
"""
|
|
733
|
+
# Arrange
|
|
734
|
+
config = FeedbackConfiguration(
|
|
735
|
+
conversation_settings=ConversationSettings(allow_skip=False)
|
|
736
|
+
)
|
|
737
|
+
|
|
738
|
+
# Act & Assert
|
|
739
|
+
assert config.conversation_settings.allow_skip is False
|
|
740
|
+
|
|
741
|
+
|
|
742
|
+
# ============================================================================
|
|
743
|
+
# UNIT TESTS: SKIP TRACKING
|
|
744
|
+
# ============================================================================
|
|
745
|
+
|
|
746
|
+
class TestSkipTracking:
|
|
747
|
+
"""Test suite for skip tracking statistics (AC5)."""
|
|
748
|
+
|
|
749
|
+
def test_skip_tracking_enabled_maintains_statistics(self):
|
|
750
|
+
"""AC5: skip_tracking: enabled maintains feedback statistics.
|
|
751
|
+
|
|
752
|
+
Given: skip_tracking.enabled: true
|
|
753
|
+
When: User skips feedback multiple times
|
|
754
|
+
Then: Skip count is tracked
|
|
755
|
+
"""
|
|
756
|
+
# Arrange
|
|
757
|
+
config = FeedbackConfiguration(
|
|
758
|
+
skip_tracking=SkipTrackingSettings(enabled=True)
|
|
759
|
+
)
|
|
760
|
+
skip_count = 0
|
|
761
|
+
|
|
762
|
+
# Act
|
|
763
|
+
skip_count += 1
|
|
764
|
+
|
|
765
|
+
# Assert
|
|
766
|
+
assert config.skip_tracking.enabled is True
|
|
767
|
+
assert skip_count == 1
|
|
768
|
+
|
|
769
|
+
def test_max_consecutive_skips_blocks_after_limit(self):
|
|
770
|
+
"""AC5: max_consecutive_skips blocks after limit reached.
|
|
771
|
+
|
|
772
|
+
Given: max_consecutive_skips: 5
|
|
773
|
+
When: User skips 5 consecutive times
|
|
774
|
+
Then: Next feedback trigger is blocked
|
|
775
|
+
"""
|
|
776
|
+
# Arrange
|
|
777
|
+
config = FeedbackConfiguration(
|
|
778
|
+
skip_tracking=SkipTrackingSettings(max_consecutive_skips=5)
|
|
779
|
+
)
|
|
780
|
+
consecutive_skips = 5
|
|
781
|
+
|
|
782
|
+
# Act
|
|
783
|
+
should_block = consecutive_skips >= config.skip_tracking.max_consecutive_skips
|
|
784
|
+
|
|
785
|
+
# Assert
|
|
786
|
+
assert should_block is True
|
|
787
|
+
|
|
788
|
+
def test_reset_on_positive_resets_skip_counter(self):
|
|
789
|
+
"""AC5: reset_on_positive: true resets counter on positive feedback.
|
|
790
|
+
|
|
791
|
+
Given: reset_on_positive: true
|
|
792
|
+
When: User provides positive feedback
|
|
793
|
+
Then: Consecutive skip counter resets to 0
|
|
794
|
+
"""
|
|
795
|
+
# Arrange
|
|
796
|
+
config = FeedbackConfiguration(
|
|
797
|
+
skip_tracking=SkipTrackingSettings(reset_on_positive=True)
|
|
798
|
+
)
|
|
799
|
+
skip_counter = 5
|
|
800
|
+
|
|
801
|
+
# Act
|
|
802
|
+
user_rating = 5 # Positive feedback
|
|
803
|
+
if config.skip_tracking.reset_on_positive and user_rating >= 4:
|
|
804
|
+
skip_counter = 0
|
|
805
|
+
|
|
806
|
+
# Assert
|
|
807
|
+
assert skip_counter == 0
|
|
808
|
+
|
|
809
|
+
def test_skip_tracking_disabled_ignores_limit(self):
|
|
810
|
+
"""AC5: skip_tracking: disabled ignores max_consecutive_skips.
|
|
811
|
+
|
|
812
|
+
Given: skip_tracking.enabled: false
|
|
813
|
+
When: User skips multiple times
|
|
814
|
+
Then: No limit enforced
|
|
815
|
+
"""
|
|
816
|
+
# Arrange
|
|
817
|
+
config = FeedbackConfiguration(
|
|
818
|
+
skip_tracking=SkipTrackingSettings(
|
|
819
|
+
enabled=False,
|
|
820
|
+
max_consecutive_skips=3
|
|
821
|
+
)
|
|
822
|
+
)
|
|
823
|
+
skip_counter = 100
|
|
824
|
+
|
|
825
|
+
# Act
|
|
826
|
+
if config.skip_tracking.enabled:
|
|
827
|
+
should_block = skip_counter >= config.skip_tracking.max_consecutive_skips
|
|
828
|
+
else:
|
|
829
|
+
should_block = False
|
|
830
|
+
|
|
831
|
+
# Assert
|
|
832
|
+
assert should_block is False
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
# ============================================================================
|
|
836
|
+
# UNIT TESTS: TEMPLATE PREFERENCES
|
|
837
|
+
# ============================================================================
|
|
838
|
+
|
|
839
|
+
class TestTemplatePreferences:
|
|
840
|
+
"""Test suite for template preferences (AC6)."""
|
|
841
|
+
|
|
842
|
+
def test_template_format_structured_shows_options(self):
|
|
843
|
+
"""AC6: format: structured shows predefined options.
|
|
844
|
+
|
|
845
|
+
Given: templates.format: structured
|
|
846
|
+
When: Feedback is collected
|
|
847
|
+
Then: Predefined options displayed
|
|
848
|
+
"""
|
|
849
|
+
# Arrange
|
|
850
|
+
config = FeedbackConfiguration(
|
|
851
|
+
templates=TemplateSettings(format="structured")
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
# Act & Assert
|
|
855
|
+
assert config.templates.format == "structured"
|
|
856
|
+
|
|
857
|
+
def test_template_format_free_text_accepts_custom_input(self):
|
|
858
|
+
"""AC6: format: free-text accepts custom input.
|
|
859
|
+
|
|
860
|
+
Given: templates.format: free-text
|
|
861
|
+
When: Feedback is collected
|
|
862
|
+
Then: Open text input displayed
|
|
863
|
+
"""
|
|
864
|
+
# Arrange
|
|
865
|
+
config = FeedbackConfiguration(
|
|
866
|
+
templates=TemplateSettings(format="free-text")
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
# Act & Assert
|
|
870
|
+
assert config.templates.format == "free-text"
|
|
871
|
+
|
|
872
|
+
def test_template_tone_brief_limits_question_length(self):
|
|
873
|
+
"""AC6: tone: brief limits question to ≤50 characters.
|
|
874
|
+
|
|
875
|
+
Given: templates.tone: brief
|
|
876
|
+
When: Question displayed
|
|
877
|
+
Then: Question text ≤50 characters
|
|
878
|
+
"""
|
|
879
|
+
# Arrange
|
|
880
|
+
config = FeedbackConfiguration(
|
|
881
|
+
templates=TemplateSettings(tone="brief")
|
|
882
|
+
)
|
|
883
|
+
question = "Was this helpful?" # 17 chars
|
|
884
|
+
|
|
885
|
+
# Act & Assert
|
|
886
|
+
assert config.templates.tone == "brief"
|
|
887
|
+
assert len(question) <= 50
|
|
888
|
+
|
|
889
|
+
def test_template_tone_detailed_includes_context(self):
|
|
890
|
+
"""AC6: tone: detailed includes context explanation.
|
|
891
|
+
|
|
892
|
+
Given: templates.tone: detailed
|
|
893
|
+
When: Question displayed
|
|
894
|
+
Then: Question includes context (operation type, outcome)
|
|
895
|
+
"""
|
|
896
|
+
# Arrange
|
|
897
|
+
config = FeedbackConfiguration(
|
|
898
|
+
templates=TemplateSettings(tone="detailed")
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
# Act & Assert
|
|
902
|
+
assert config.templates.tone == "detailed"
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
# ============================================================================
|
|
906
|
+
# UNIT TESTS: HOT-RELOAD
|
|
907
|
+
# ============================================================================
|
|
908
|
+
|
|
909
|
+
class TestHotReload:
|
|
910
|
+
"""Test suite for configuration hot-reload (AC9)."""
|
|
911
|
+
|
|
912
|
+
def test_hot_reload_detects_file_change(self, config_file, temp_config_dir):
|
|
913
|
+
"""AC9: System detects file changes within 5 seconds.
|
|
914
|
+
|
|
915
|
+
Given: Feedback system is running
|
|
916
|
+
When: Configuration file is modified
|
|
917
|
+
Then: File change is detected within 5 seconds
|
|
918
|
+
"""
|
|
919
|
+
# Arrange
|
|
920
|
+
import yaml
|
|
921
|
+
config1 = {"enabled": True}
|
|
922
|
+
with open(config_file, 'w') as f:
|
|
923
|
+
yaml.dump(config1, f)
|
|
924
|
+
|
|
925
|
+
# Act - simulate file watcher
|
|
926
|
+
last_mtime = config_file.stat().st_mtime
|
|
927
|
+
time.sleep(0.1)
|
|
928
|
+
config2 = {"enabled": False}
|
|
929
|
+
with open(config_file, 'w') as f:
|
|
930
|
+
yaml.dump(config2, f)
|
|
931
|
+
new_mtime = config_file.stat().st_mtime
|
|
932
|
+
|
|
933
|
+
# Assert
|
|
934
|
+
assert new_mtime > last_mtime
|
|
935
|
+
|
|
936
|
+
def test_hot_reload_loads_new_configuration(self, config_file):
|
|
937
|
+
"""Hot-reload loads new configuration values."""
|
|
938
|
+
# Arrange
|
|
939
|
+
import yaml
|
|
940
|
+
config1 = {"enabled": True, "trigger_mode": "always"}
|
|
941
|
+
with open(config_file, 'w') as f:
|
|
942
|
+
yaml.dump(config1, f)
|
|
943
|
+
|
|
944
|
+
with open(config_file, 'r') as f:
|
|
945
|
+
loaded1 = yaml.safe_load(f)
|
|
946
|
+
|
|
947
|
+
# Act
|
|
948
|
+
config2 = {"enabled": False, "trigger_mode": "never"}
|
|
949
|
+
with open(config_file, 'w') as f:
|
|
950
|
+
yaml.dump(config2, f)
|
|
951
|
+
|
|
952
|
+
with open(config_file, 'r') as f:
|
|
953
|
+
loaded2 = yaml.safe_load(f)
|
|
954
|
+
|
|
955
|
+
# Assert
|
|
956
|
+
assert loaded1["enabled"] is True
|
|
957
|
+
assert loaded2["enabled"] is False
|
|
958
|
+
|
|
959
|
+
def test_hot_reload_stops_feedback_immediately(self):
|
|
960
|
+
"""Hot-reload stops feedback collection immediately."""
|
|
961
|
+
# Arrange
|
|
962
|
+
config = FeedbackConfiguration(enabled=True)
|
|
963
|
+
|
|
964
|
+
# Act
|
|
965
|
+
# Simulate reload
|
|
966
|
+
updated_config = FeedbackConfiguration(enabled=False)
|
|
967
|
+
|
|
968
|
+
# Assert
|
|
969
|
+
assert config.enabled is True
|
|
970
|
+
assert updated_config.enabled is False
|
|
971
|
+
|
|
972
|
+
def test_invalid_config_during_reload_keeps_previous_valid(self):
|
|
973
|
+
"""Invalid config during reload keeps previous valid config."""
|
|
974
|
+
# Arrange
|
|
975
|
+
config = FeedbackConfiguration(enabled=True)
|
|
976
|
+
previous_config = config
|
|
977
|
+
|
|
978
|
+
# Act
|
|
979
|
+
# Attempt to load invalid config - fails, keep previous
|
|
980
|
+
should_keep_previous = True # Error occurred
|
|
981
|
+
if should_keep_previous:
|
|
982
|
+
current_config = previous_config
|
|
983
|
+
|
|
984
|
+
# Assert
|
|
985
|
+
assert current_config.enabled is True
|
|
986
|
+
|
|
987
|
+
|
|
988
|
+
# ============================================================================
|
|
989
|
+
# INTEGRATION TESTS
|
|
990
|
+
# ============================================================================
|
|
991
|
+
|
|
992
|
+
class TestConfigurationLoading:
|
|
993
|
+
"""Integration tests for complete configuration loading."""
|
|
994
|
+
|
|
995
|
+
def test_config_load_to_feedback_trigger_flow(self, config_file, valid_config_dict):
|
|
996
|
+
"""Integration: Config load → validation → feedback trigger."""
|
|
997
|
+
# Arrange
|
|
998
|
+
import yaml
|
|
999
|
+
with open(config_file, 'w') as f:
|
|
1000
|
+
yaml.dump(valid_config_dict, f)
|
|
1001
|
+
|
|
1002
|
+
# Act
|
|
1003
|
+
with open(config_file, 'r') as f:
|
|
1004
|
+
loaded = yaml.safe_load(f)
|
|
1005
|
+
config = FeedbackConfiguration(**loaded)
|
|
1006
|
+
|
|
1007
|
+
# Assert
|
|
1008
|
+
assert config.enabled is True
|
|
1009
|
+
assert config.trigger_mode == "failures-only"
|
|
1010
|
+
|
|
1011
|
+
def test_config_load_with_defaults_merge(self, config_file):
|
|
1012
|
+
"""Integration: Partial config loaded and merged with defaults."""
|
|
1013
|
+
# Arrange
|
|
1014
|
+
import yaml
|
|
1015
|
+
partial = {"enabled": False}
|
|
1016
|
+
with open(config_file, 'w') as f:
|
|
1017
|
+
yaml.dump(partial, f)
|
|
1018
|
+
|
|
1019
|
+
# Act
|
|
1020
|
+
with open(config_file, 'r') as f:
|
|
1021
|
+
loaded = yaml.safe_load(f) or {}
|
|
1022
|
+
defaults = asdict(FeedbackConfiguration())
|
|
1023
|
+
merged = {**defaults, **loaded}
|
|
1024
|
+
|
|
1025
|
+
# Assert
|
|
1026
|
+
assert merged["enabled"] is False # Overridden
|
|
1027
|
+
assert merged["trigger_mode"] == "failures-only" # Default
|
|
1028
|
+
|
|
1029
|
+
def test_multiple_configuration_loads_consistent(self, config_file, valid_config_dict):
|
|
1030
|
+
"""Multiple config loads produce consistent results."""
|
|
1031
|
+
# Arrange
|
|
1032
|
+
import yaml
|
|
1033
|
+
with open(config_file, 'w') as f:
|
|
1034
|
+
yaml.dump(valid_config_dict, f)
|
|
1035
|
+
|
|
1036
|
+
# Act
|
|
1037
|
+
configs = []
|
|
1038
|
+
for _ in range(3):
|
|
1039
|
+
with open(config_file, 'r') as f:
|
|
1040
|
+
loaded = yaml.safe_load(f)
|
|
1041
|
+
configs.append(FeedbackConfiguration(**loaded))
|
|
1042
|
+
|
|
1043
|
+
# Assert
|
|
1044
|
+
assert all(c.enabled == configs[0].enabled for c in configs)
|
|
1045
|
+
assert all(c.trigger_mode == configs[0].trigger_mode for c in configs)
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
# ============================================================================
|
|
1049
|
+
# EDGE CASE TESTS
|
|
1050
|
+
# ============================================================================
|
|
1051
|
+
|
|
1052
|
+
class TestEdgeCases:
|
|
1053
|
+
"""Test suite for edge case handling."""
|
|
1054
|
+
|
|
1055
|
+
def test_edge_case_concurrent_skip_tracking_updates(self):
|
|
1056
|
+
"""Edge case 1: Concurrent feedback triggers during skip tracking.
|
|
1057
|
+
|
|
1058
|
+
Scenario: max_consecutive_skips: 2, user skips twice, then two operations
|
|
1059
|
+
trigger simultaneously
|
|
1060
|
+
Expected: Skip counter correctly maintained, both operations blocked
|
|
1061
|
+
"""
|
|
1062
|
+
# Arrange
|
|
1063
|
+
skip_counter = 0
|
|
1064
|
+
max_skips = 2
|
|
1065
|
+
|
|
1066
|
+
def simulate_skip():
|
|
1067
|
+
nonlocal skip_counter
|
|
1068
|
+
skip_counter += 1
|
|
1069
|
+
|
|
1070
|
+
# Act
|
|
1071
|
+
threads = []
|
|
1072
|
+
for _ in range(2):
|
|
1073
|
+
t = threading.Thread(target=simulate_skip)
|
|
1074
|
+
threads.append(t)
|
|
1075
|
+
t.start()
|
|
1076
|
+
|
|
1077
|
+
for t in threads:
|
|
1078
|
+
t.join()
|
|
1079
|
+
|
|
1080
|
+
# Assert
|
|
1081
|
+
assert skip_counter == 2
|
|
1082
|
+
assert skip_counter >= max_skips
|
|
1083
|
+
|
|
1084
|
+
def test_edge_case_empty_configuration_file(self, config_file):
|
|
1085
|
+
"""Edge case 2: Empty configuration file."""
|
|
1086
|
+
# Arrange
|
|
1087
|
+
with open(config_file, 'w') as f:
|
|
1088
|
+
f.write("")
|
|
1089
|
+
|
|
1090
|
+
# Act
|
|
1091
|
+
import yaml
|
|
1092
|
+
with open(config_file, 'r') as f:
|
|
1093
|
+
loaded = yaml.safe_load(f)
|
|
1094
|
+
|
|
1095
|
+
if loaded is None or loaded == {}:
|
|
1096
|
+
config = FeedbackConfiguration() # Use defaults
|
|
1097
|
+
else:
|
|
1098
|
+
config = FeedbackConfiguration(**loaded)
|
|
1099
|
+
|
|
1100
|
+
# Assert
|
|
1101
|
+
assert config.enabled is True
|
|
1102
|
+
|
|
1103
|
+
def test_edge_case_partial_configuration_merge(self):
|
|
1104
|
+
"""Edge case 3: Partial configuration merged with defaults."""
|
|
1105
|
+
# Arrange
|
|
1106
|
+
partial = {"enabled": True}
|
|
1107
|
+
defaults = asdict(FeedbackConfiguration())
|
|
1108
|
+
|
|
1109
|
+
# Act
|
|
1110
|
+
merged = {**defaults, **partial}
|
|
1111
|
+
config = FeedbackConfiguration(**merged)
|
|
1112
|
+
|
|
1113
|
+
# Assert
|
|
1114
|
+
assert config.enabled is True
|
|
1115
|
+
assert config.trigger_mode == "failures-only"
|
|
1116
|
+
|
|
1117
|
+
def test_edge_case_extremely_large_max_questions(self):
|
|
1118
|
+
"""Edge case 4: Extremely large max_questions value."""
|
|
1119
|
+
# Arrange
|
|
1120
|
+
config = FeedbackConfiguration(
|
|
1121
|
+
conversation_settings=ConversationSettings(
|
|
1122
|
+
max_questions=1000000
|
|
1123
|
+
)
|
|
1124
|
+
)
|
|
1125
|
+
|
|
1126
|
+
# Act & Assert
|
|
1127
|
+
assert config.conversation_settings.max_questions == 1000000
|
|
1128
|
+
|
|
1129
|
+
def test_edge_case_special_characters_in_yaml(self, config_file):
|
|
1130
|
+
"""Edge case 5: Special characters (Unicode) in YAML."""
|
|
1131
|
+
# Arrange
|
|
1132
|
+
import yaml
|
|
1133
|
+
unicode_config = {
|
|
1134
|
+
"enabled": True,
|
|
1135
|
+
"trigger_mode": "failures-only"
|
|
1136
|
+
# Add unicode in comment or values
|
|
1137
|
+
}
|
|
1138
|
+
with open(config_file, 'w', encoding='utf-8') as f:
|
|
1139
|
+
yaml.dump(unicode_config, f, allow_unicode=True)
|
|
1140
|
+
|
|
1141
|
+
# Act
|
|
1142
|
+
with open(config_file, 'r', encoding='utf-8') as f:
|
|
1143
|
+
loaded = yaml.safe_load(f)
|
|
1144
|
+
|
|
1145
|
+
# Assert
|
|
1146
|
+
assert loaded["enabled"] is True
|
|
1147
|
+
|
|
1148
|
+
def test_edge_case_file_becomes_unreadable_after_load(self, config_file, valid_config_dict):
|
|
1149
|
+
"""Edge case 6: File becomes unreadable after initial load."""
|
|
1150
|
+
# Arrange
|
|
1151
|
+
import yaml
|
|
1152
|
+
with open(config_file, 'w') as f:
|
|
1153
|
+
yaml.dump(valid_config_dict, f)
|
|
1154
|
+
|
|
1155
|
+
# First load succeeds
|
|
1156
|
+
with open(config_file, 'r') as f:
|
|
1157
|
+
loaded = yaml.safe_load(f)
|
|
1158
|
+
config = FeedbackConfiguration(**loaded)
|
|
1159
|
+
|
|
1160
|
+
# Act - make file unreadable (if on Unix-like system)
|
|
1161
|
+
try:
|
|
1162
|
+
os.chmod(config_file, 0o000)
|
|
1163
|
+
|
|
1164
|
+
# Attempt to reload
|
|
1165
|
+
try:
|
|
1166
|
+
with open(config_file, 'r') as f:
|
|
1167
|
+
_ = yaml.safe_load(f)
|
|
1168
|
+
reload_succeeded = True
|
|
1169
|
+
except PermissionError:
|
|
1170
|
+
reload_succeeded = False
|
|
1171
|
+
finally:
|
|
1172
|
+
# Restore permissions for cleanup
|
|
1173
|
+
os.chmod(config_file, 0o644)
|
|
1174
|
+
|
|
1175
|
+
# Assert
|
|
1176
|
+
assert not reload_succeeded # Should fail gracefully
|
|
1177
|
+
assert config.enabled is True # Previous config still valid
|
|
1178
|
+
|
|
1179
|
+
def test_edge_case_multiple_skill_invocations_before_init_complete(self):
|
|
1180
|
+
"""Edge case 7: Multiple skill invocations before config load complete.
|
|
1181
|
+
|
|
1182
|
+
Scenario: Parallel skills invoke feedback before initialization
|
|
1183
|
+
Expected: Block until initialization complete, all use same config
|
|
1184
|
+
"""
|
|
1185
|
+
# Arrange
|
|
1186
|
+
initialization_lock = threading.Lock()
|
|
1187
|
+
config = None
|
|
1188
|
+
|
|
1189
|
+
def initialize_config():
|
|
1190
|
+
nonlocal config
|
|
1191
|
+
with initialization_lock:
|
|
1192
|
+
time.sleep(0.1) # Simulate initialization
|
|
1193
|
+
config = FeedbackConfiguration(enabled=True)
|
|
1194
|
+
|
|
1195
|
+
def request_config():
|
|
1196
|
+
with initialization_lock:
|
|
1197
|
+
return config
|
|
1198
|
+
|
|
1199
|
+
# Act
|
|
1200
|
+
init_thread = threading.Thread(target=initialize_config)
|
|
1201
|
+
init_thread.start()
|
|
1202
|
+
|
|
1203
|
+
# Simulate skill requests blocking until init complete
|
|
1204
|
+
config_used = []
|
|
1205
|
+
for _ in range(3):
|
|
1206
|
+
cfg = request_config()
|
|
1207
|
+
config_used.append(cfg)
|
|
1208
|
+
|
|
1209
|
+
init_thread.join()
|
|
1210
|
+
|
|
1211
|
+
# Assert
|
|
1212
|
+
assert config is not None
|
|
1213
|
+
assert all(c == config_used[0] for c in config_used)
|
|
1214
|
+
|
|
1215
|
+
|
|
1216
|
+
# ============================================================================
|
|
1217
|
+
# PERFORMANCE TESTS
|
|
1218
|
+
# ============================================================================
|
|
1219
|
+
|
|
1220
|
+
class TestPerformance:
|
|
1221
|
+
"""Performance tests for configuration operations."""
|
|
1222
|
+
|
|
1223
|
+
def test_configuration_load_time_under_100ms(self, config_file, valid_config_dict):
|
|
1224
|
+
"""Performance: Configuration load time < 100ms."""
|
|
1225
|
+
# Arrange
|
|
1226
|
+
import yaml
|
|
1227
|
+
with open(config_file, 'w') as f:
|
|
1228
|
+
yaml.dump(valid_config_dict, f)
|
|
1229
|
+
|
|
1230
|
+
# Act
|
|
1231
|
+
start_time = time.time()
|
|
1232
|
+
with open(config_file, 'r') as f:
|
|
1233
|
+
loaded = yaml.safe_load(f)
|
|
1234
|
+
config = FeedbackConfiguration(**loaded)
|
|
1235
|
+
elapsed_ms = (time.time() - start_time) * 1000
|
|
1236
|
+
|
|
1237
|
+
# Assert
|
|
1238
|
+
assert elapsed_ms < 100, f"Config load took {elapsed_ms}ms (expected <100ms)"
|
|
1239
|
+
|
|
1240
|
+
def test_hot_reload_detection_within_5_seconds(self, config_file, valid_config_dict):
|
|
1241
|
+
"""Performance: Hot-reload detection ≤ 5 seconds."""
|
|
1242
|
+
# Arrange
|
|
1243
|
+
import yaml
|
|
1244
|
+
with open(config_file, 'w') as f:
|
|
1245
|
+
yaml.dump(valid_config_dict, f)
|
|
1246
|
+
|
|
1247
|
+
# Act
|
|
1248
|
+
start_time = time.time()
|
|
1249
|
+
# Simulate file watcher checking for changes
|
|
1250
|
+
initial_mtime = config_file.stat().st_mtime
|
|
1251
|
+
time.sleep(0.5)
|
|
1252
|
+
modified_config = {**valid_config_dict, "enabled": False}
|
|
1253
|
+
with open(config_file, 'w') as f:
|
|
1254
|
+
yaml.dump(modified_config, f)
|
|
1255
|
+
new_mtime = config_file.stat().st_mtime
|
|
1256
|
+
elapsed = time.time() - start_time
|
|
1257
|
+
|
|
1258
|
+
# Assert
|
|
1259
|
+
assert elapsed < 5.0
|
|
1260
|
+
assert new_mtime > initial_mtime
|
|
1261
|
+
|
|
1262
|
+
def test_skip_counter_lookup_under_10ms(self):
|
|
1263
|
+
"""Performance: Skip counter lookup < 10ms."""
|
|
1264
|
+
# Arrange
|
|
1265
|
+
skip_counters = {str(i): i for i in range(1000)}
|
|
1266
|
+
|
|
1267
|
+
# Act
|
|
1268
|
+
start_time = time.time()
|
|
1269
|
+
for _ in range(100):
|
|
1270
|
+
_ = skip_counters.get("500")
|
|
1271
|
+
elapsed_ms = (time.time() - start_time) * 1000
|
|
1272
|
+
|
|
1273
|
+
# Assert
|
|
1274
|
+
assert elapsed_ms < 10, f"Lookup took {elapsed_ms}ms (expected <10ms)"
|
|
1275
|
+
|
|
1276
|
+
def test_per_feedback_processing_overhead_under_50ms(self):
|
|
1277
|
+
"""Performance: Per-feedback overhead < 50ms."""
|
|
1278
|
+
# Arrange
|
|
1279
|
+
config = FeedbackConfiguration(enabled=True)
|
|
1280
|
+
operations_count = 10
|
|
1281
|
+
|
|
1282
|
+
# Act
|
|
1283
|
+
start_time = time.time()
|
|
1284
|
+
for _ in range(operations_count):
|
|
1285
|
+
# Simulate feedback processing
|
|
1286
|
+
if config.enabled and config.trigger_mode != "never":
|
|
1287
|
+
pass # Process feedback
|
|
1288
|
+
elapsed_ms = (time.time() - start_time) * 1000
|
|
1289
|
+
avg_per_operation = elapsed_ms / operations_count
|
|
1290
|
+
|
|
1291
|
+
# Assert
|
|
1292
|
+
assert avg_per_operation < 50, f"Average overhead {avg_per_operation}ms (expected <50ms)"
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
# ============================================================================
|
|
1296
|
+
# PARAMETRIZED TESTS (Test multiple similar scenarios)
|
|
1297
|
+
# ============================================================================
|
|
1298
|
+
|
|
1299
|
+
class TestParametrizedScenarios:
|
|
1300
|
+
"""Parametrized tests for similar scenarios."""
|
|
1301
|
+
|
|
1302
|
+
@pytest.mark.parametrize("trigger_mode", ["always", "failures-only", "specific-operations", "never"])
|
|
1303
|
+
def test_all_valid_trigger_modes(self, trigger_mode):
|
|
1304
|
+
"""All valid trigger modes are accepted."""
|
|
1305
|
+
# Arrange & Act
|
|
1306
|
+
config = FeedbackConfiguration(trigger_mode=trigger_mode)
|
|
1307
|
+
|
|
1308
|
+
# Assert
|
|
1309
|
+
assert config.trigger_mode == trigger_mode
|
|
1310
|
+
|
|
1311
|
+
@pytest.mark.parametrize("max_questions", [0, 1, 5, 100, 1000000])
|
|
1312
|
+
def test_various_max_questions_values(self, max_questions):
|
|
1313
|
+
"""Various max_questions values are accepted."""
|
|
1314
|
+
# Arrange & Act
|
|
1315
|
+
config = FeedbackConfiguration(
|
|
1316
|
+
conversation_settings=ConversationSettings(max_questions=max_questions)
|
|
1317
|
+
)
|
|
1318
|
+
|
|
1319
|
+
# Assert
|
|
1320
|
+
assert config.conversation_settings.max_questions == max_questions
|
|
1321
|
+
|
|
1322
|
+
@pytest.mark.parametrize("format_value", ["structured", "free-text"])
|
|
1323
|
+
def test_both_template_formats(self, format_value):
|
|
1324
|
+
"""Both template format values are valid."""
|
|
1325
|
+
# Arrange & Act
|
|
1326
|
+
config = FeedbackConfiguration(
|
|
1327
|
+
templates=TemplateSettings(format=format_value)
|
|
1328
|
+
)
|
|
1329
|
+
|
|
1330
|
+
# Assert
|
|
1331
|
+
assert config.templates.format == format_value
|
|
1332
|
+
|
|
1333
|
+
@pytest.mark.parametrize("tone_value", ["brief", "detailed"])
|
|
1334
|
+
def test_both_template_tones(self, tone_value):
|
|
1335
|
+
"""Both template tone values are valid."""
|
|
1336
|
+
# Arrange & Act
|
|
1337
|
+
config = FeedbackConfiguration(
|
|
1338
|
+
templates=TemplateSettings(tone=tone_value)
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
# Assert
|
|
1342
|
+
assert config.templates.tone == tone_value
|
|
1343
|
+
|
|
1344
|
+
@pytest.mark.parametrize("enabled", [True, False])
|
|
1345
|
+
def test_enabled_setting_values(self, enabled):
|
|
1346
|
+
"""Both enabled values are valid."""
|
|
1347
|
+
# Arrange & Act
|
|
1348
|
+
config = FeedbackConfiguration(enabled=enabled)
|
|
1349
|
+
|
|
1350
|
+
# Assert
|
|
1351
|
+
assert config.enabled == enabled
|
|
1352
|
+
|
|
1353
|
+
|
|
1354
|
+
if __name__ == "__main__":
|
|
1355
|
+
pytest.main([__file__, "-v"])
|