devforgeai 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAUDE.md +120 -0
- package/package.json +9 -1
- package/src/CLAUDE.md +699 -0
- package/src/claude/scripts/README.md +396 -0
- package/src/claude/scripts/audit-command-skill-overlap.sh +67 -0
- package/src/claude/scripts/check-hooks-fast.sh +70 -0
- package/src/claude/scripts/devforgeai-validate +6 -0
- package/src/claude/scripts/devforgeai_cli/README.md +531 -0
- package/src/claude/scripts/devforgeai_cli/__init__.py +12 -0
- package/src/claude/scripts/devforgeai_cli/cli.py +716 -0
- package/src/claude/scripts/devforgeai_cli/commands/__init__.py +1 -0
- package/src/claude/scripts/devforgeai_cli/commands/check_hooks.py +384 -0
- package/src/claude/scripts/devforgeai_cli/commands/invoke_hooks.py +149 -0
- package/src/claude/scripts/devforgeai_cli/commands/phase_commands.py +731 -0
- package/src/claude/scripts/devforgeai_cli/commands/validate_installation.py +412 -0
- package/src/claude/scripts/devforgeai_cli/context_extraction.py +426 -0
- package/src/claude/scripts/devforgeai_cli/feedback/AC_TO_TEST_MAPPING.md +636 -0
- package/src/claude/scripts/devforgeai_cli/feedback/DELIVERY_SUMMARY.txt +329 -0
- package/src/claude/scripts/devforgeai_cli/feedback/README_TEST_SPECS.md +486 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_IMPLEMENTATION_GUIDE.md +529 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECIFICATIONS.md +2652 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECS_INDEX.md +398 -0
- package/src/claude/scripts/devforgeai_cli/feedback/__init__.py +34 -0
- package/src/claude/scripts/devforgeai_cli/feedback/adaptive_questioning_engine.py +581 -0
- package/src/claude/scripts/devforgeai_cli/feedback/aggregation.py +179 -0
- package/src/claude/scripts/devforgeai_cli/feedback/commands.py +535 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_defaults.py +58 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_manager.py +423 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_models.py +192 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_schema.py +140 -0
- package/src/claude/scripts/devforgeai_cli/feedback/coverage.json +1 -0
- package/src/claude/scripts/devforgeai_cli/feedback/feature_flag.py +152 -0
- package/src/claude/scripts/devforgeai_cli/feedback/feedback_indexer.py +394 -0
- package/src/claude/scripts/devforgeai_cli/feedback/hot_reload.py +226 -0
- package/src/claude/scripts/devforgeai_cli/feedback/longitudinal.py +115 -0
- package/src/claude/scripts/devforgeai_cli/feedback/models.py +67 -0
- package/src/claude/scripts/devforgeai_cli/feedback/question_router.py +236 -0
- package/src/claude/scripts/devforgeai_cli/feedback/retrospective.py +233 -0
- package/src/claude/scripts/devforgeai_cli/feedback/skip_tracker.py +177 -0
- package/src/claude/scripts/devforgeai_cli/feedback/skip_tracking.py +221 -0
- package/src/claude/scripts/devforgeai_cli/feedback/template_engine.py +549 -0
- package/src/claude/scripts/devforgeai_cli/feedback/validation.py +163 -0
- package/src/claude/scripts/devforgeai_cli/headless/__init__.py +30 -0
- package/src/claude/scripts/devforgeai_cli/headless/answer_models.py +206 -0
- package/src/claude/scripts/devforgeai_cli/headless/answer_resolver.py +204 -0
- package/src/claude/scripts/devforgeai_cli/headless/exceptions.py +36 -0
- package/src/claude/scripts/devforgeai_cli/headless/pattern_matcher.py +156 -0
- package/src/claude/scripts/devforgeai_cli/hooks.py +313 -0
- package/src/claude/scripts/devforgeai_cli/metrics/__init__.py +46 -0
- package/src/claude/scripts/devforgeai_cli/metrics/command_metrics.py +142 -0
- package/src/claude/scripts/devforgeai_cli/metrics/failure_modes.py +152 -0
- package/src/claude/scripts/devforgeai_cli/metrics/story_segmentation.py +181 -0
- package/src/claude/scripts/devforgeai_cli/orchestrate_hooks.py +780 -0
- package/src/claude/scripts/devforgeai_cli/phase_state.py +1229 -0
- package/src/claude/scripts/devforgeai_cli/session/__init__.py +30 -0
- package/src/claude/scripts/devforgeai_cli/session/checkpoint.py +268 -0
- package/src/claude/scripts/devforgeai_cli/tests/__init__.py +1 -0
- package/src/claude/scripts/devforgeai_cli/tests/conftest.py +29 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/TEST_EXECUTION_GUIDE.md +298 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/__init__.py +3 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_adaptive_questioning_engine.py +2171 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_aggregation.py +476 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_defaults.py +133 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_manager.py +592 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_models.py +373 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_schema.py +130 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_configuration_management.py +1355 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_edge_cases.py +308 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feature_flag.py +307 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feedback_indexer.py +384 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_hot_reload.py +580 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_integration.py +402 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_models.py +105 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_question_routing.py +262 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_retrospective.py +333 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracker.py +410 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking.py +159 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking_integration.py +1155 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_template_engine.py +1389 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_validation_comprehensive.py +210 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/autonomous-deferral-story.md +46 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/missing-impl-notes.md +31 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-deferral-story.md +46 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-story-complete.md +48 -0
- package/src/claude/scripts/devforgeai_cli/tests/manual_test_invoke_hooks.sh +200 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/DELIVERABLES.md +518 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/TEST_SUMMARY.md +468 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/__init__.py +6 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/corrupted-checkpoint.json +1 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/missing-fields-checkpoint.json +4 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/valid-checkpoint.json +15 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/test_checkpoint.py +851 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_check_hooks.py +1886 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_depends_on_normalizer.py +171 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_dod_validator.py +97 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_invoke_hooks.py +1902 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands.py +320 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_error_handling.py +1021 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_import.py +697 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_state.py +2187 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking.py +2141 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking_coverage_gap.py +195 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_subagent_enforcement.py +539 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_validate_installation.py +361 -0
- package/src/claude/scripts/devforgeai_cli/utils/__init__.py +11 -0
- package/src/claude/scripts/devforgeai_cli/utils/depends_on_normalizer.py +149 -0
- package/src/claude/scripts/devforgeai_cli/utils/markdown_parser.py +219 -0
- package/src/claude/scripts/devforgeai_cli/utils/story_analyzer.py +249 -0
- package/src/claude/scripts/devforgeai_cli/utils/yaml_parser.py +152 -0
- package/src/claude/scripts/devforgeai_cli/validators/__init__.py +27 -0
- package/src/claude/scripts/devforgeai_cli/validators/ast_grep_validator.py +373 -0
- package/src/claude/scripts/devforgeai_cli/validators/context_validator.py +180 -0
- package/src/claude/scripts/devforgeai_cli/validators/dod_validator.py +309 -0
- package/src/claude/scripts/devforgeai_cli/validators/git_validator.py +107 -0
- package/src/claude/scripts/devforgeai_cli/validators/grep_fallback.py +300 -0
- package/src/claude/scripts/install_hooks.sh +186 -0
- package/src/claude/scripts/invoke_feedback_hooks.sh +59 -0
- package/src/claude/scripts/migrate-ac-headers.sh +122 -0
- package/src/claude/scripts/plan_file_kb.sh +704 -0
- package/src/claude/scripts/requirements.txt +8 -0
- package/src/claude/scripts/session_catalog.sh +543 -0
- package/src/claude/scripts/setup.py +55 -0
- package/src/claude/scripts/start-devforgeai.sh +16 -0
- package/src/claude/scripts/statusline.sh +27 -0
- package/src/claude/scripts/validate_deferrals.py +344 -0
- package/src/claude/skills/devforgeai-qa/SKILL.md +1 -1
- package/src/claude/skills/researching-market/SKILL.md +2 -1
- package/src/cli/lib/copier.js +13 -1
- package/src/claude/skills/designing-systems/scripts/__pycache__/detect_anti_patterns.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_all_context.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_architecture.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_dependencies.cpython-312.pyc +0 -0
- package/src/claude/skills/devforgeai-story-creation/scripts/__pycache__/migrate_story_v1_to_v2.cpython-312.pyc +0 -0
- package/src/claude/skills/devforgeai-story-creation/scripts/tests/__pycache__/measure_accuracy.cpython-312.pyc +0 -0
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Longitudinal feedback tracking (AC6)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Dict, List, Any
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def correlate_feedback_across_stories(feedback_dir: Path, user_id: str) -> Dict[str, Any]:
|
|
12
|
+
"""
|
|
13
|
+
Correlate feedback across stories for a user.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Dict with 'timeline' list showing progression
|
|
17
|
+
"""
|
|
18
|
+
all_feedback = []
|
|
19
|
+
|
|
20
|
+
for story_dir in feedback_dir.iterdir():
|
|
21
|
+
if not story_dir.is_dir():
|
|
22
|
+
continue
|
|
23
|
+
|
|
24
|
+
for feedback_file in story_dir.glob('*-retrospective.json'):
|
|
25
|
+
with open(feedback_file, 'r') as f:
|
|
26
|
+
feedback_data = json.load(f)
|
|
27
|
+
all_feedback.append(feedback_data)
|
|
28
|
+
|
|
29
|
+
# Sort by timestamp
|
|
30
|
+
all_feedback.sort(key=lambda x: x['timestamp'])
|
|
31
|
+
|
|
32
|
+
timeline = []
|
|
33
|
+
for fb in all_feedback:
|
|
34
|
+
timeline.append({
|
|
35
|
+
'timestamp': fb['timestamp'],
|
|
36
|
+
'story_id': fb['story_id'],
|
|
37
|
+
'workflow_type': fb['workflow_type'],
|
|
38
|
+
'success_status': fb['success_status'],
|
|
39
|
+
})
|
|
40
|
+
|
|
41
|
+
return {'timeline': timeline}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def identify_improvement_trajectories(feedback_dir: Path, user_id: str) -> Dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
Identify improvement trajectories over time.
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Dict with 'metrics' list showing trends
|
|
50
|
+
"""
|
|
51
|
+
correlation = correlate_feedback_across_stories(feedback_dir, user_id)
|
|
52
|
+
|
|
53
|
+
# Extract confidence ratings over time (dev_success_01)
|
|
54
|
+
confidence_values = []
|
|
55
|
+
|
|
56
|
+
for story_dir in feedback_dir.iterdir():
|
|
57
|
+
if not story_dir.is_dir():
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
for feedback_file in sorted(story_dir.glob('*-retrospective.json')):
|
|
61
|
+
with open(feedback_file, 'r') as f:
|
|
62
|
+
feedback_data = json.load(f)
|
|
63
|
+
|
|
64
|
+
for question in feedback_data.get('questions', []):
|
|
65
|
+
if question.get('question_id', '').endswith('_01') and isinstance(question.get('response'), int):
|
|
66
|
+
confidence_values.append(question['response'])
|
|
67
|
+
|
|
68
|
+
if len(confidence_values) < 2:
|
|
69
|
+
return {'metrics': []}
|
|
70
|
+
|
|
71
|
+
# Determine trend
|
|
72
|
+
trend = 'improving' if confidence_values[-1] >= confidence_values[0] else 'declining'
|
|
73
|
+
|
|
74
|
+
metrics = [{
|
|
75
|
+
'name': 'TDD confidence',
|
|
76
|
+
'trend': trend,
|
|
77
|
+
'start_value': confidence_values[0],
|
|
78
|
+
'end_value': confidence_values[-1],
|
|
79
|
+
'data_points': len(confidence_values)
|
|
80
|
+
}]
|
|
81
|
+
|
|
82
|
+
return {'metrics': metrics}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def export_personal_journal(feedback_dir: Path, user_id: str) -> Path:
|
|
86
|
+
"""
|
|
87
|
+
Export personal retrospective journal.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Path to journal.md
|
|
91
|
+
"""
|
|
92
|
+
correlation = correlate_feedback_across_stories(feedback_dir, user_id)
|
|
93
|
+
trajectories = identify_improvement_trajectories(feedback_dir, user_id)
|
|
94
|
+
|
|
95
|
+
user_dir = feedback_dir / user_id
|
|
96
|
+
user_dir.mkdir(parents=True, exist_ok=True)
|
|
97
|
+
|
|
98
|
+
journal_path = user_dir / 'journal.md'
|
|
99
|
+
|
|
100
|
+
content = "# Retrospective Journal\n\n"
|
|
101
|
+
content += f"**User:** {user_id}\n\n"
|
|
102
|
+
content += "## Timeline\n\n"
|
|
103
|
+
|
|
104
|
+
for entry in correlation['timeline']:
|
|
105
|
+
content += f"- **{entry['story_id']}** ({entry['timestamp'][:10]}) - {entry['workflow_type']} - {entry['success_status']}\n"
|
|
106
|
+
|
|
107
|
+
content += "\n## Improvement Trajectory\n\n"
|
|
108
|
+
|
|
109
|
+
for metric in trajectories.get('metrics', []):
|
|
110
|
+
content += f"- **{metric['name']}**: {metric['trend']} (from {metric['start_value']} to {metric['end_value']})\n"
|
|
111
|
+
|
|
112
|
+
with open(journal_path, 'w') as f:
|
|
113
|
+
f.write(content)
|
|
114
|
+
|
|
115
|
+
return journal_path
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Data models for feedback system
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import List, Optional, Any
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class Question:
|
|
12
|
+
"""Represents a single feedback question"""
|
|
13
|
+
question_id: str
|
|
14
|
+
question_text: str
|
|
15
|
+
response_type: str # 'rating', 'multiple_choice', 'open_text'
|
|
16
|
+
scale: Optional[str] = None # For rating questions (e.g., "1-5")
|
|
17
|
+
options: Optional[List[str]] = None # For multiple choice
|
|
18
|
+
response: Optional[Any] = None
|
|
19
|
+
skip: bool = False
|
|
20
|
+
|
|
21
|
+
def __post_init__(self):
|
|
22
|
+
"""Validate question configuration"""
|
|
23
|
+
if self.response_type == 'rating' and self.scale is None:
|
|
24
|
+
raise ValueError(f"Rating question {self.question_id} must have scale")
|
|
25
|
+
|
|
26
|
+
if self.response_type == 'multiple_choice' and (self.options is None or len(self.options) < 2):
|
|
27
|
+
raise ValueError(f"Multiple choice question {self.question_id} must have at least 2 options")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class FeedbackSession:
|
|
32
|
+
"""Represents a complete feedback session"""
|
|
33
|
+
feedback_id: str
|
|
34
|
+
timestamp: str # ISO 8601
|
|
35
|
+
story_id: str
|
|
36
|
+
epic_id: Optional[str] = None
|
|
37
|
+
workflow_type: str = '' # 'dev', 'qa', 'orchestrate', 'release', etc.
|
|
38
|
+
success_status: str = '' # 'success', 'failed', 'partial'
|
|
39
|
+
questions: List[dict] = field(default_factory=list)
|
|
40
|
+
metadata: dict = field(default_factory=dict)
|
|
41
|
+
|
|
42
|
+
def to_dict(self) -> dict:
|
|
43
|
+
"""Convert to dictionary for JSON serialization"""
|
|
44
|
+
return {
|
|
45
|
+
'feedback_id': self.feedback_id,
|
|
46
|
+
'timestamp': self.timestamp,
|
|
47
|
+
'story_id': self.story_id,
|
|
48
|
+
'epic_id': self.epic_id,
|
|
49
|
+
'workflow_type': self.workflow_type,
|
|
50
|
+
'success_status': self.success_status,
|
|
51
|
+
'questions': self.questions,
|
|
52
|
+
'metadata': self.metadata,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
@classmethod
|
|
56
|
+
def from_dict(cls, data: dict) -> 'FeedbackSession':
|
|
57
|
+
"""Create FeedbackSession from dictionary"""
|
|
58
|
+
return cls(
|
|
59
|
+
feedback_id=data['feedback_id'],
|
|
60
|
+
timestamp=data['timestamp'],
|
|
61
|
+
story_id=data['story_id'],
|
|
62
|
+
epic_id=data.get('epic_id'),
|
|
63
|
+
workflow_type=data.get('workflow_type', ''),
|
|
64
|
+
success_status=data.get('success_status', ''),
|
|
65
|
+
questions=data.get('questions', []),
|
|
66
|
+
metadata=data.get('metadata', {}),
|
|
67
|
+
)
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context-aware question routing
|
|
3
|
+
|
|
4
|
+
Routes questions based on workflow type and success/failure status.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import yaml
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import List, Dict, Any
|
|
10
|
+
|
|
11
|
+
from .models import Question
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def load_question_bank(bank_path: Path = None) -> Dict[str, Any]:
|
|
15
|
+
"""
|
|
16
|
+
Load question bank from YAML file.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
bank_path: Path to questions.yaml file
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Dict with workflow questions
|
|
23
|
+
"""
|
|
24
|
+
if bank_path is None:
|
|
25
|
+
# Default path
|
|
26
|
+
bank_path = Path(__file__).parent.parent.parent.parent.parent / 'devforgeai' / 'feedback' / 'questions.yaml'
|
|
27
|
+
|
|
28
|
+
if not bank_path.exists():
|
|
29
|
+
# Return minimal default question bank
|
|
30
|
+
return _get_default_question_bank()
|
|
31
|
+
|
|
32
|
+
with open(bank_path, 'r') as f:
|
|
33
|
+
question_bank = yaml.safe_load(f)
|
|
34
|
+
|
|
35
|
+
return question_bank
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def get_context_aware_questions(workflow_type: str, success_status: str) -> List[Question]:
|
|
39
|
+
"""
|
|
40
|
+
Get context-aware questions based on workflow and status.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
workflow_type: 'dev', 'qa', 'orchestrate', 'release', etc.
|
|
44
|
+
success_status: 'success', 'failed', 'partial'
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
List of 4-6 Question objects
|
|
48
|
+
"""
|
|
49
|
+
question_bank = load_question_bank()
|
|
50
|
+
|
|
51
|
+
workflows = question_bank.get('workflows', {})
|
|
52
|
+
workflow_config = workflows.get(workflow_type, {})
|
|
53
|
+
|
|
54
|
+
# Determine question set based on success status
|
|
55
|
+
if success_status == 'failed':
|
|
56
|
+
question_list = workflow_config.get('failure_questions', [])
|
|
57
|
+
else:
|
|
58
|
+
question_list = workflow_config.get('success_questions', [])
|
|
59
|
+
|
|
60
|
+
# Convert to Question objects
|
|
61
|
+
questions = []
|
|
62
|
+
for q_data in question_list[:6]: # Limit to 6 questions
|
|
63
|
+
question = Question(
|
|
64
|
+
question_id=q_data['id'],
|
|
65
|
+
question_text=q_data['text'],
|
|
66
|
+
response_type=q_data['type'],
|
|
67
|
+
scale=q_data.get('scale'),
|
|
68
|
+
options=q_data.get('options'),
|
|
69
|
+
)
|
|
70
|
+
questions.append(question)
|
|
71
|
+
|
|
72
|
+
return questions
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _get_default_question_bank() -> Dict[str, Any]:
|
|
76
|
+
"""
|
|
77
|
+
Return default question bank (used when questions.yaml doesn't exist).
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dict with minimal question bank
|
|
81
|
+
"""
|
|
82
|
+
return {
|
|
83
|
+
'workflows': {
|
|
84
|
+
'dev': {
|
|
85
|
+
'success_questions': [
|
|
86
|
+
{
|
|
87
|
+
'id': 'dev_success_01',
|
|
88
|
+
'text': 'How confident do you feel about the TDD workflow?',
|
|
89
|
+
'type': 'rating',
|
|
90
|
+
'scale': '1-5'
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
'id': 'dev_success_02',
|
|
94
|
+
'text': 'Which phase was most challenging?',
|
|
95
|
+
'type': 'multiple_choice',
|
|
96
|
+
'options': ['Red', 'Green', 'Refactor', 'Integration']
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
'id': 'dev_success_03',
|
|
100
|
+
'text': 'What could we improve about the development workflow?',
|
|
101
|
+
'type': 'open_text'
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
'id': 'dev_success_04',
|
|
105
|
+
'text': 'How well did the framework guide you?',
|
|
106
|
+
'type': 'rating',
|
|
107
|
+
'scale': '1-5'
|
|
108
|
+
},
|
|
109
|
+
],
|
|
110
|
+
'failure_questions': [
|
|
111
|
+
{
|
|
112
|
+
'id': 'dev_failure_01',
|
|
113
|
+
'text': 'What blocked you from completing the story?',
|
|
114
|
+
'type': 'open_text'
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
'id': 'dev_failure_02',
|
|
118
|
+
'text': 'What would help you complete this in the future?',
|
|
119
|
+
'type': 'open_text'
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
'id': 'dev_failure_03',
|
|
123
|
+
'text': 'How clear were the error messages?',
|
|
124
|
+
'type': 'rating',
|
|
125
|
+
'scale': '1-5'
|
|
126
|
+
},
|
|
127
|
+
]
|
|
128
|
+
},
|
|
129
|
+
'qa': {
|
|
130
|
+
'success_questions': [
|
|
131
|
+
{
|
|
132
|
+
'id': 'qa_success_01',
|
|
133
|
+
'text': 'How clear were the quality metrics?',
|
|
134
|
+
'type': 'rating',
|
|
135
|
+
'scale': '1-5'
|
|
136
|
+
},
|
|
137
|
+
{
|
|
138
|
+
'id': 'qa_success_02',
|
|
139
|
+
'text': 'What could improve the validation process?',
|
|
140
|
+
'type': 'open_text'
|
|
141
|
+
},
|
|
142
|
+
{
|
|
143
|
+
'id': 'qa_success_03',
|
|
144
|
+
'text': 'Were coverage thresholds reasonable?',
|
|
145
|
+
'type': 'multiple_choice',
|
|
146
|
+
'options': ['Too strict', 'Just right', 'Too lenient']
|
|
147
|
+
},
|
|
148
|
+
],
|
|
149
|
+
'failure_questions': [
|
|
150
|
+
{
|
|
151
|
+
'id': 'qa_failure_01',
|
|
152
|
+
'text': 'What made the quality check fail?',
|
|
153
|
+
'type': 'open_text'
|
|
154
|
+
},
|
|
155
|
+
{
|
|
156
|
+
'id': 'qa_failure_02',
|
|
157
|
+
'text': 'How can we make quality requirements clearer?',
|
|
158
|
+
'type': 'open_text'
|
|
159
|
+
},
|
|
160
|
+
{
|
|
161
|
+
'id': 'qa_failure_03',
|
|
162
|
+
'text': 'How helpful were the failure messages?',
|
|
163
|
+
'type': 'rating',
|
|
164
|
+
'scale': '1-5'
|
|
165
|
+
},
|
|
166
|
+
]
|
|
167
|
+
},
|
|
168
|
+
'orchestrate': {
|
|
169
|
+
'success_questions': [
|
|
170
|
+
{
|
|
171
|
+
'id': 'orchestrate_success_01',
|
|
172
|
+
'text': 'How smooth was the full workflow integration?',
|
|
173
|
+
'type': 'rating',
|
|
174
|
+
'scale': '1-5'
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
'id': 'orchestrate_success_02',
|
|
178
|
+
'text': 'Which phase had the most friction?',
|
|
179
|
+
'type': 'multiple_choice',
|
|
180
|
+
'options': ['Dev', 'QA', 'Release', 'None']
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
'id': 'orchestrate_success_03',
|
|
184
|
+
'text': 'What could improve the orchestration workflow?',
|
|
185
|
+
'type': 'open_text'
|
|
186
|
+
},
|
|
187
|
+
],
|
|
188
|
+
'failure_questions': [
|
|
189
|
+
{
|
|
190
|
+
'id': 'orchestrate_failure_01',
|
|
191
|
+
'text': 'At which phase did the workflow fail?',
|
|
192
|
+
'type': 'multiple_choice',
|
|
193
|
+
'options': ['Dev', 'QA', 'Release']
|
|
194
|
+
},
|
|
195
|
+
{
|
|
196
|
+
'id': 'orchestrate_failure_02',
|
|
197
|
+
'text': 'What would have helped prevent this failure?',
|
|
198
|
+
'type': 'open_text'
|
|
199
|
+
},
|
|
200
|
+
{
|
|
201
|
+
'id': 'orchestrate_failure_03',
|
|
202
|
+
'text': 'How well did the framework communicate the issue?',
|
|
203
|
+
'type': 'rating',
|
|
204
|
+
'scale': '1-5'
|
|
205
|
+
},
|
|
206
|
+
]
|
|
207
|
+
},
|
|
208
|
+
'release': {
|
|
209
|
+
'success_questions': [
|
|
210
|
+
{
|
|
211
|
+
'id': 'release_success_01',
|
|
212
|
+
'text': 'How confident are you in the deployment?',
|
|
213
|
+
'type': 'rating',
|
|
214
|
+
'scale': '1-5'
|
|
215
|
+
},
|
|
216
|
+
{
|
|
217
|
+
'id': 'release_success_02',
|
|
218
|
+
'text': 'What could improve the release process?',
|
|
219
|
+
'type': 'open_text'
|
|
220
|
+
},
|
|
221
|
+
],
|
|
222
|
+
'failure_questions': [
|
|
223
|
+
{
|
|
224
|
+
'id': 'release_failure_01',
|
|
225
|
+
'text': 'What caused the release to fail?',
|
|
226
|
+
'type': 'open_text'
|
|
227
|
+
},
|
|
228
|
+
{
|
|
229
|
+
'id': 'release_failure_02',
|
|
230
|
+
'text': 'How can we make releases more reliable?',
|
|
231
|
+
'type': 'open_text'
|
|
232
|
+
},
|
|
233
|
+
]
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Retrospective conversation core module
|
|
3
|
+
|
|
4
|
+
Handles triggering retrospectives, capturing feedback, and managing feedback sessions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import uuid
|
|
9
|
+
from datetime import datetime, timedelta, timezone
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import List, Dict, Any, Optional
|
|
12
|
+
|
|
13
|
+
from .models import Question, FeedbackSession
|
|
14
|
+
from .question_router import get_context_aware_questions
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def trigger_retrospective(workflow_type: str, operation_result: Dict[str, Any]) -> List[Question]:
|
|
18
|
+
"""
|
|
19
|
+
Trigger retrospective conversation after operation completion.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
workflow_type: Type of workflow ('dev', 'qa', 'orchestrate', etc.)
|
|
23
|
+
operation_result: Dict with keys 'status', 'story_id', optional 'failure_reason'
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
List of Question objects (4-6 questions)
|
|
27
|
+
"""
|
|
28
|
+
success_status = operation_result.get('status', 'success')
|
|
29
|
+
|
|
30
|
+
# Get context-aware questions based on workflow and status
|
|
31
|
+
questions = get_context_aware_questions(workflow_type, success_status)
|
|
32
|
+
|
|
33
|
+
return questions
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def capture_feedback(
|
|
37
|
+
responses: List[Dict[str, Any]],
|
|
38
|
+
story_id: str,
|
|
39
|
+
workflow_type: str,
|
|
40
|
+
success_status: str,
|
|
41
|
+
feedback_dir: Optional[Path] = None,
|
|
42
|
+
epic_id: Optional[str] = None,
|
|
43
|
+
allow_skip: bool = True,
|
|
44
|
+
) -> Dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
Capture and store feedback responses.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
responses: List of dicts with 'question_id', 'response', 'skip'
|
|
50
|
+
story_id: Story ID (e.g., 'STORY-001')
|
|
51
|
+
workflow_type: Workflow type
|
|
52
|
+
success_status: 'success', 'failed', or 'partial'
|
|
53
|
+
feedback_dir: Directory to store feedback (default: devforgeai/feedback)
|
|
54
|
+
epic_id: Optional epic ID
|
|
55
|
+
allow_skip: Whether to allow full skip (all questions skipped)
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Dict with 'status' ('recorded' or 'skipped'), 'feedback_id', 'file_path', 'message'
|
|
59
|
+
"""
|
|
60
|
+
# Check if all skipped
|
|
61
|
+
all_skipped = all(r.get('skip', False) for r in responses)
|
|
62
|
+
|
|
63
|
+
if all_skipped and allow_skip:
|
|
64
|
+
return {
|
|
65
|
+
'status': 'skipped',
|
|
66
|
+
'message': 'No problem, thanks for using DevForgeAI!'
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# Validate minimum responses (at least 2 of 5 substantive responses)
|
|
70
|
+
substantive_responses = [
|
|
71
|
+
r for r in responses
|
|
72
|
+
if not r.get('skip', False) and r.get('response') and len(str(r['response'])) > 10
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
if len(substantive_responses) < 2 and len(responses) >= 5:
|
|
76
|
+
raise ValueError("At least 2 of 5 questions must have substantive responses (>10 chars)")
|
|
77
|
+
|
|
78
|
+
# Generate feedback ID and timestamp
|
|
79
|
+
feedback_id = f"fb-{uuid.uuid4()}"
|
|
80
|
+
timestamp = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
|
|
81
|
+
|
|
82
|
+
# Calculate metadata
|
|
83
|
+
total_questions = len(responses)
|
|
84
|
+
answered = sum(1 for r in responses if not r.get('skip', False))
|
|
85
|
+
skipped = total_questions - answered
|
|
86
|
+
|
|
87
|
+
metadata = {
|
|
88
|
+
'duration_seconds': 0, # Would be calculated in real implementation
|
|
89
|
+
'total_questions': total_questions,
|
|
90
|
+
'answered': answered,
|
|
91
|
+
'skipped': skipped,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
# Create feedback session
|
|
95
|
+
feedback_session = FeedbackSession(
|
|
96
|
+
feedback_id=feedback_id,
|
|
97
|
+
timestamp=timestamp,
|
|
98
|
+
story_id=story_id,
|
|
99
|
+
epic_id=epic_id,
|
|
100
|
+
workflow_type=workflow_type,
|
|
101
|
+
success_status=success_status,
|
|
102
|
+
questions=responses,
|
|
103
|
+
metadata=metadata,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Determine feedback directory
|
|
107
|
+
if feedback_dir is None:
|
|
108
|
+
feedback_dir = Path.cwd() / 'devforgeai' / 'feedback'
|
|
109
|
+
|
|
110
|
+
# Create story-specific directory
|
|
111
|
+
story_feedback_dir = feedback_dir / story_id
|
|
112
|
+
story_feedback_dir.mkdir(parents=True, exist_ok=True)
|
|
113
|
+
|
|
114
|
+
# Generate filename with timestamp
|
|
115
|
+
filename = f"{timestamp[:10]}-retrospective.json"
|
|
116
|
+
file_path = story_feedback_dir / filename
|
|
117
|
+
|
|
118
|
+
# Write feedback to JSON file
|
|
119
|
+
with open(file_path, 'w') as f:
|
|
120
|
+
json.dump(feedback_session.to_dict(), f, indent=2)
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
'status': 'recorded',
|
|
124
|
+
'feedback_id': feedback_id,
|
|
125
|
+
'file_path': str(file_path),
|
|
126
|
+
'message': '✅ Feedback recorded'
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def is_skip_selected(user_response: str) -> bool:
|
|
131
|
+
"""
|
|
132
|
+
Check if user selected to skip feedback.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
user_response: User's response text
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
True if user wants to skip, False otherwise
|
|
139
|
+
"""
|
|
140
|
+
skip_phrases = [
|
|
141
|
+
'skip',
|
|
142
|
+
'no thanks',
|
|
143
|
+
'not now',
|
|
144
|
+
'later',
|
|
145
|
+
'decline',
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
response_lower = user_response.lower().strip()
|
|
149
|
+
|
|
150
|
+
return any(phrase in response_lower for phrase in skip_phrases)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def save_in_progress_state(
|
|
154
|
+
story_id: str,
|
|
155
|
+
responses: List[Dict[str, Any]],
|
|
156
|
+
workflow_type: str,
|
|
157
|
+
feedback_dir: Path,
|
|
158
|
+
) -> Path:
|
|
159
|
+
"""
|
|
160
|
+
Save in-progress feedback state (for network loss recovery).
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
story_id: Story ID
|
|
164
|
+
responses: Partial responses completed so far
|
|
165
|
+
workflow_type: Workflow type
|
|
166
|
+
feedback_dir: Feedback directory
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Path to saved state file
|
|
170
|
+
"""
|
|
171
|
+
story_feedback_dir = feedback_dir / story_id
|
|
172
|
+
story_feedback_dir.mkdir(parents=True, exist_ok=True)
|
|
173
|
+
|
|
174
|
+
state_data = {
|
|
175
|
+
'story_id': story_id,
|
|
176
|
+
'workflow_type': workflow_type,
|
|
177
|
+
'responses': responses,
|
|
178
|
+
'status': 'in_progress',
|
|
179
|
+
'timestamp': datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z'),
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
state_file = story_feedback_dir / f"{state_data['timestamp'][:19]}-in-progress.json"
|
|
183
|
+
|
|
184
|
+
with open(state_file, 'w') as f:
|
|
185
|
+
json.dump(state_data, f, indent=2)
|
|
186
|
+
|
|
187
|
+
return state_file
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def resume_feedback(story_id: str, feedback_dir: Path) -> Optional[Dict[str, Any]]:
|
|
191
|
+
"""
|
|
192
|
+
Resume in-progress feedback session.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
story_id: Story ID
|
|
196
|
+
feedback_dir: Feedback directory
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
In-progress state data if found, None otherwise
|
|
200
|
+
"""
|
|
201
|
+
story_feedback_dir = feedback_dir / story_id
|
|
202
|
+
|
|
203
|
+
if not story_feedback_dir.exists():
|
|
204
|
+
return None
|
|
205
|
+
|
|
206
|
+
# Find in-progress files
|
|
207
|
+
in_progress_files = list(story_feedback_dir.glob('*-in-progress.json'))
|
|
208
|
+
|
|
209
|
+
if not in_progress_files:
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
# Load most recent
|
|
213
|
+
most_recent = sorted(in_progress_files)[-1]
|
|
214
|
+
|
|
215
|
+
with open(most_recent, 'r') as f:
|
|
216
|
+
state_data = json.load(f)
|
|
217
|
+
|
|
218
|
+
return state_data
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def detect_rapid_sequence(last_feedback_time: datetime, threshold_seconds: int = 30) -> bool:
|
|
222
|
+
"""
|
|
223
|
+
Detect if commands are being run in rapid sequence.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
last_feedback_time: Timestamp of last feedback
|
|
227
|
+
threshold_seconds: Threshold in seconds (default: 30)
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
True if rapid sequence detected, False otherwise
|
|
231
|
+
"""
|
|
232
|
+
time_since_last = datetime.now(timezone.utc) - last_feedback_time
|
|
233
|
+
return time_since_last.total_seconds() < threshold_seconds
|