prizmkit 1.0.13 → 1.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/bin/create-prizmkit.js +4 -1
  2. package/bundled/VERSION.json +3 -3
  3. package/bundled/adapters/claude/command-adapter.js +35 -4
  4. package/bundled/adapters/claude/rules-adapter.js +6 -58
  5. package/bundled/adapters/claude/team-adapter.js +2 -2
  6. package/bundled/adapters/codebuddy/agent-adapter.js +0 -1
  7. package/bundled/adapters/codebuddy/rules-adapter.js +30 -0
  8. package/bundled/adapters/shared/frontmatter.js +3 -1
  9. package/bundled/dev-pipeline/README.md +13 -3
  10. package/bundled/dev-pipeline/launch-bugfix-daemon.sh +10 -0
  11. package/bundled/dev-pipeline/launch-daemon.sh +18 -4
  12. package/bundled/dev-pipeline/lib/common.sh +105 -0
  13. package/bundled/dev-pipeline/run-bugfix.sh +57 -57
  14. package/bundled/dev-pipeline/run.sh +75 -59
  15. package/bundled/dev-pipeline/scripts/check-session-status.py +47 -2
  16. package/bundled/dev-pipeline/scripts/cleanup-logs.py +192 -0
  17. package/bundled/dev-pipeline/scripts/detect-stuck.py +15 -3
  18. package/bundled/dev-pipeline/scripts/generate-bootstrap-prompt.py +32 -27
  19. package/bundled/dev-pipeline/scripts/generate-bugfix-prompt.py +23 -23
  20. package/bundled/dev-pipeline/scripts/update-feature-status.py +50 -2
  21. package/bundled/dev-pipeline/scripts/utils.py +22 -0
  22. package/bundled/dev-pipeline/templates/bootstrap-tier1.md +18 -1
  23. package/bundled/dev-pipeline/templates/bootstrap-tier2.md +19 -1
  24. package/bundled/dev-pipeline/templates/bootstrap-tier3.md +18 -2
  25. package/bundled/dev-pipeline/templates/session-status-schema.json +7 -1
  26. package/bundled/dev-pipeline/tests/__init__.py +0 -0
  27. package/bundled/dev-pipeline/tests/conftest.py +133 -0
  28. package/bundled/dev-pipeline/tests/test_check_session.py +127 -0
  29. package/bundled/dev-pipeline/tests/test_cleanup_logs.py +119 -0
  30. package/bundled/dev-pipeline/tests/test_detect_stuck.py +207 -0
  31. package/bundled/dev-pipeline/tests/test_generate_bugfix_prompt.py +181 -0
  32. package/bundled/dev-pipeline/tests/test_generate_prompt.py +190 -0
  33. package/bundled/dev-pipeline/tests/test_init_bugfix_pipeline.py +153 -0
  34. package/bundled/dev-pipeline/tests/test_init_pipeline.py +241 -0
  35. package/bundled/dev-pipeline/tests/test_update_bug_status.py +142 -0
  36. package/bundled/dev-pipeline/tests/test_update_feature_status.py +277 -0
  37. package/bundled/dev-pipeline/tests/test_utils.py +141 -0
  38. package/bundled/rules/USAGE.md +153 -0
  39. package/bundled/rules/_rules-metadata.json +43 -0
  40. package/bundled/rules/general/prefer-linux-commands.md +9 -0
  41. package/bundled/rules/prizm/prizm-commit-workflow.md +10 -0
  42. package/bundled/rules/prizm/prizm-documentation.md +19 -0
  43. package/bundled/rules/prizm/prizm-progressive-loading.md +11 -0
  44. package/bundled/skills/_metadata.json +130 -67
  45. package/bundled/skills/app-planner/SKILL.md +252 -499
  46. package/bundled/skills/app-planner/assets/evaluation-guide.md +44 -0
  47. package/bundled/skills/app-planner/scripts/validate-and-generate.py +143 -4
  48. package/bundled/skills/bug-planner/SKILL.md +58 -13
  49. package/bundled/skills/bugfix-pipeline-launcher/SKILL.md +5 -7
  50. package/bundled/skills/dev-pipeline-launcher/SKILL.md +16 -7
  51. package/bundled/skills/feature-workflow/SKILL.md +175 -234
  52. package/bundled/skills/prizm-kit/SKILL.md +17 -31
  53. package/bundled/skills/{prizmkit-adr-manager → prizmkit-tool-adr-manager}/SKILL.md +6 -7
  54. package/bundled/skills/{prizmkit-api-doc-generator → prizmkit-tool-api-doc-generator}/SKILL.md +4 -5
  55. package/bundled/skills/{prizmkit-bug-reproducer → prizmkit-tool-bug-reproducer}/SKILL.md +4 -5
  56. package/bundled/skills/{prizmkit-ci-cd-generator → prizmkit-tool-ci-cd-generator}/SKILL.md +4 -5
  57. package/bundled/skills/{prizmkit-db-migration → prizmkit-tool-db-migration}/SKILL.md +4 -5
  58. package/bundled/skills/{prizmkit-dependency-health → prizmkit-tool-dependency-health}/SKILL.md +3 -4
  59. package/bundled/skills/{prizmkit-deployment-strategy → prizmkit-tool-deployment-strategy}/SKILL.md +4 -5
  60. package/bundled/skills/{prizmkit-error-triage → prizmkit-tool-error-triage}/SKILL.md +4 -5
  61. package/bundled/skills/{prizmkit-log-analyzer → prizmkit-tool-log-analyzer}/SKILL.md +4 -5
  62. package/bundled/skills/{prizmkit-monitoring-setup → prizmkit-tool-monitoring-setup}/SKILL.md +4 -5
  63. package/bundled/skills/{prizmkit-onboarding-generator → prizmkit-tool-onboarding-generator}/SKILL.md +4 -5
  64. package/bundled/skills/{prizmkit-perf-profiler → prizmkit-tool-perf-profiler}/SKILL.md +4 -5
  65. package/bundled/skills/{prizmkit-security-audit → prizmkit-tool-security-audit}/SKILL.md +3 -4
  66. package/bundled/skills/{prizmkit-tech-debt-tracker → prizmkit-tool-tech-debt-tracker}/SKILL.md +3 -4
  67. package/bundled/skills/refactor-skill/SKILL.md +371 -0
  68. package/bundled/skills/refactor-workflow/SKILL.md +17 -119
  69. package/package.json +1 -1
  70. package/src/external-skills.js +71 -0
  71. package/src/index.js +62 -4
  72. package/src/metadata.js +36 -0
  73. package/src/scaffold.js +136 -32
  74. package/bundled/skills/prizmkit-bug-fix-workflow/SKILL.md +0 -356
  75. package/bundled/templates/claude-md-template.md +0 -38
  76. package/bundled/templates/codebuddy-md-template.md +0 -35
  77. /package/bundled/skills/{prizmkit-adr-manager → prizmkit-tool-adr-manager}/assets/adr-template.md +0 -0
@@ -0,0 +1,181 @@
1
+ """Tests for generate-bugfix-prompt.py."""
2
+
3
+ import os
4
+ import sys
5
+ import pytest
6
+
7
+
8
+ def _import_generate_bugfix_prompt():
9
+ import importlib.util
10
+ path = os.path.join(
11
+ os.path.dirname(__file__), "..", "scripts", "generate-bugfix-prompt.py"
12
+ )
13
+ spec = importlib.util.spec_from_file_location("generate_bugfix_prompt", path)
14
+ mod = importlib.util.module_from_spec(spec)
15
+ sys.modules["generate_bugfix_prompt"] = mod
16
+ spec.loader.exec_module(mod)
17
+ return mod
18
+
19
+
20
+ gen_bugfix = _import_generate_bugfix_prompt()
21
+ format_acceptance_criteria = gen_bugfix.format_acceptance_criteria
22
+ format_global_context = gen_bugfix.format_global_context
23
+ format_error_source_details = gen_bugfix.format_error_source_details
24
+ format_environment = gen_bugfix.format_environment
25
+ process_conditional_blocks = gen_bugfix.process_conditional_blocks
26
+
27
+
28
+ class TestFormatAcceptanceCriteria:
29
+ def test_normal(self):
30
+ result = format_acceptance_criteria(["Fix A", "Fix B"])
31
+ assert result == "- Fix A\n- Fix B"
32
+
33
+ def test_empty(self):
34
+ result = format_acceptance_criteria([])
35
+ assert "none specified" in result
36
+
37
+ def test_none(self):
38
+ result = format_acceptance_criteria(None)
39
+ assert "none specified" in result
40
+
41
+ def test_single(self):
42
+ result = format_acceptance_criteria(["Only one"])
43
+ assert result == "- Only one"
44
+
45
+
46
+ class TestFormatGlobalContext:
47
+ def test_normal(self):
48
+ result = format_global_context({"lang": "Python"})
49
+ assert "**lang**" in result
50
+ assert "Python" in result
51
+
52
+ def test_empty(self):
53
+ assert "none specified" in format_global_context({})
54
+
55
+ def test_none(self):
56
+ assert "none specified" in format_global_context(None)
57
+
58
+
59
+ class TestFormatErrorSourceDetails:
60
+ def test_stack_trace(self):
61
+ error_source = {
62
+ "type": "stack_trace",
63
+ "stack_trace": "Traceback ... Error",
64
+ "error_message": "Something broke",
65
+ }
66
+ result = format_error_source_details(error_source)
67
+ assert "Stack Trace" in result
68
+ assert "Traceback ... Error" in result
69
+ assert "Error Message" in result
70
+ assert "Something broke" in result
71
+
72
+ def test_log_pattern(self):
73
+ error_source = {
74
+ "type": "log_pattern",
75
+ "log_snippet": "ERROR 2024-01-01 ...",
76
+ }
77
+ result = format_error_source_details(error_source)
78
+ assert "Log Snippet" in result
79
+
80
+ def test_failed_test(self):
81
+ error_source = {
82
+ "type": "failed_test",
83
+ "failed_test_path": "tests/test_auth.py::test_login",
84
+ }
85
+ result = format_error_source_details(error_source)
86
+ assert "Failed Test" in result
87
+ assert "test_auth.py" in result
88
+
89
+ def test_user_report(self):
90
+ error_source = {
91
+ "type": "user_report",
92
+ "reproduction_steps": ["Step 1", "Step 2"],
93
+ }
94
+ result = format_error_source_details(error_source)
95
+ assert "Reproduction Steps" in result
96
+ assert "Step 1" in result
97
+ assert "Step 2" in result
98
+
99
+ def test_none_source(self):
100
+ result = format_error_source_details(None)
101
+ assert "no error source details" in result
102
+
103
+ def test_empty_source(self):
104
+ result = format_error_source_details({})
105
+ assert "no error source details" in result
106
+
107
+ def test_non_dict_source(self):
108
+ result = format_error_source_details("not a dict")
109
+ assert "no error source details" in result
110
+
111
+ def test_unknown_type_with_error_message(self):
112
+ error_source = {
113
+ "type": "unknown",
114
+ "error_message": "Something happened",
115
+ }
116
+ result = format_error_source_details(error_source)
117
+ assert "Error Message" in result
118
+
119
+
120
+ class TestFormatEnvironment:
121
+ def test_normal(self):
122
+ result = format_environment({"os": "macOS", "browser": "Chrome"})
123
+ assert "**browser**" in result
124
+ assert "**os**" in result
125
+
126
+ def test_empty(self):
127
+ result = format_environment({})
128
+ assert "not specified" in result
129
+
130
+ def test_none(self):
131
+ result = format_environment(None)
132
+ assert "not specified" in result
133
+
134
+ def test_non_dict(self):
135
+ result = format_environment("not a dict")
136
+ assert "not specified" in result
137
+
138
+ def test_empty_values_filtered(self):
139
+ result = format_environment({"os": "macOS", "empty_key": ""})
140
+ assert "os" in result
141
+ # empty_key should be filtered because its value is falsy
142
+ assert "empty_key" not in result
143
+
144
+ def test_all_empty_values(self):
145
+ result = format_environment({"a": "", "b": None})
146
+ assert "not specified" in result
147
+
148
+
149
+ class TestProcessConditionalBlocks:
150
+ def test_manual_verification_keeps_block(self):
151
+ bug = {"verification_type": "manual"}
152
+ content = "before\n{{IF_VERIFICATION_MANUAL_OR_HYBRID}}\nmanual stuff\n{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}\nafter"
153
+ result = process_conditional_blocks(content, bug)
154
+ assert "manual stuff" in result
155
+ assert "{{IF_VERIFICATION" not in result
156
+
157
+ def test_hybrid_verification_keeps_block(self):
158
+ bug = {"verification_type": "hybrid"}
159
+ content = "{{IF_VERIFICATION_MANUAL_OR_HYBRID}}\nhybrid stuff\n{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}"
160
+ result = process_conditional_blocks(content, bug)
161
+ assert "hybrid stuff" in result
162
+
163
+ def test_automated_verification_removes_block(self):
164
+ bug = {"verification_type": "automated"}
165
+ content = "before\n{{IF_VERIFICATION_MANUAL_OR_HYBRID}}\nmanual stuff\n{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}\nafter"
166
+ result = process_conditional_blocks(content, bug)
167
+ assert "manual stuff" not in result
168
+ assert "after" in result
169
+
170
+ def test_no_verification_type_defaults_automated(self):
171
+ bug = {}
172
+ content = "{{IF_VERIFICATION_MANUAL_OR_HYBRID}}\nstuff\n{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}\nrest"
173
+ result = process_conditional_blocks(content, bug)
174
+ assert "stuff" not in result
175
+ assert "rest" in result
176
+
177
+ def test_no_conditional_blocks(self):
178
+ bug = {"verification_type": "automated"}
179
+ content = "plain text"
180
+ result = process_conditional_blocks(content, bug)
181
+ assert result == "plain text"
@@ -0,0 +1,190 @@
1
+ """Tests for generate-bootstrap-prompt.py."""
2
+
3
+ import os
4
+ import sys
5
+ import pytest
6
+
7
+
8
+ def _import_generate_prompt():
9
+ import importlib.util
10
+ path = os.path.join(
11
+ os.path.dirname(__file__), "..", "scripts", "generate-bootstrap-prompt.py"
12
+ )
13
+ spec = importlib.util.spec_from_file_location("generate_bootstrap_prompt", path)
14
+ mod = importlib.util.module_from_spec(spec)
15
+ sys.modules["generate_bootstrap_prompt"] = mod
16
+ spec.loader.exec_module(mod)
17
+ return mod
18
+
19
+
20
+ gen_prompt = _import_generate_prompt()
21
+ compute_feature_slug = gen_prompt.compute_feature_slug
22
+ format_acceptance_criteria = gen_prompt.format_acceptance_criteria
23
+ format_global_context = gen_prompt.format_global_context
24
+ process_conditional_blocks = gen_prompt.process_conditional_blocks
25
+ process_mode_blocks = gen_prompt.process_mode_blocks
26
+ determine_pipeline_mode = gen_prompt.determine_pipeline_mode
27
+
28
+
29
+ class TestComputeFeatureSlug:
30
+ def test_basic(self):
31
+ result = compute_feature_slug("F-001", "Project Infrastructure Setup")
32
+ assert result == "001-project-infrastructure-setup"
33
+
34
+ def test_special_characters(self):
35
+ result = compute_feature_slug("F-002", "User Auth (OAuth2.0)")
36
+ assert result == "002-user-auth-oauth20"
37
+
38
+ def test_uppercase_id(self):
39
+ result = compute_feature_slug("F-042", "My Feature")
40
+ assert result == "042-my-feature"
41
+
42
+ def test_lowercase_f(self):
43
+ result = compute_feature_slug("f-5", "Quick Fix")
44
+ assert result == "005-quick-fix"
45
+
46
+ def test_extra_spaces(self):
47
+ result = compute_feature_slug("F-010", " Extra Spaces ")
48
+ assert result == "010-extra-spaces"
49
+
50
+ def test_empty_title(self):
51
+ result = compute_feature_slug("F-001", "")
52
+ # Numeric part only, no trailing hyphen
53
+ assert result.startswith("001")
54
+
55
+ def test_numeric_padding(self):
56
+ result = compute_feature_slug("F-1", "Test")
57
+ assert result.startswith("001-")
58
+
59
+
60
+ class TestFormatAcceptanceCriteria:
61
+ def test_single_item(self):
62
+ result = format_acceptance_criteria(["Must work"])
63
+ assert result == "- Must work"
64
+
65
+ def test_multiple_items(self):
66
+ result = format_acceptance_criteria(["A", "B", "C"])
67
+ assert result == "- A\n- B\n- C"
68
+
69
+ def test_empty_list(self):
70
+ result = format_acceptance_criteria([])
71
+ assert "none specified" in result
72
+
73
+ def test_none_input(self):
74
+ result = format_acceptance_criteria(None)
75
+ assert "none specified" in result
76
+
77
+
78
+ class TestFormatGlobalContext:
79
+ def test_basic(self):
80
+ result = format_global_context({"language": "Python", "framework": "FastAPI"})
81
+ assert "**framework**" in result
82
+ assert "**language**" in result
83
+ assert "Python" in result
84
+ assert "FastAPI" in result
85
+
86
+ def test_sorted_keys(self):
87
+ result = format_global_context({"z_key": "z", "a_key": "a"})
88
+ lines = result.split("\n")
89
+ assert "a_key" in lines[0]
90
+ assert "z_key" in lines[1]
91
+
92
+ def test_empty_context(self):
93
+ result = format_global_context({})
94
+ assert "none specified" in result
95
+
96
+ def test_none_context(self):
97
+ result = format_global_context(None)
98
+ assert "none specified" in result
99
+
100
+
101
+ class TestProcessConditionalBlocks:
102
+ def test_fresh_start_keeps_fresh_block(self):
103
+ content = "before\n{{IF_FRESH_START}}\nfresh content\n{{END_IF_FRESH_START}}\nafter"
104
+ result = process_conditional_blocks(content, "null")
105
+ assert "fresh content" in result
106
+ assert "{{IF_FRESH_START}}" not in result
107
+ assert "after" in result
108
+
109
+ def test_fresh_start_removes_resume_block(self):
110
+ content = "before\n{{IF_RESUME}}\nresume content\n{{END_IF_RESUME}}\nafter"
111
+ result = process_conditional_blocks(content, "null")
112
+ assert "resume content" not in result
113
+ assert "after" in result
114
+
115
+ def test_resume_keeps_resume_block(self):
116
+ content = "before\n{{IF_RESUME}}\nresume content\n{{END_IF_RESUME}}\nafter"
117
+ result = process_conditional_blocks(content, "3")
118
+ assert "resume content" in result
119
+ assert "{{IF_RESUME}}" not in result
120
+
121
+ def test_resume_removes_fresh_block(self):
122
+ content = "before\n{{IF_FRESH_START}}\nfresh content\n{{END_IF_FRESH_START}}\nafter"
123
+ result = process_conditional_blocks(content, "3")
124
+ assert "fresh content" not in result
125
+ assert "after" in result
126
+
127
+ def test_no_conditional_blocks(self):
128
+ content = "just plain text"
129
+ result = process_conditional_blocks(content, "null")
130
+ assert result == "just plain text"
131
+
132
+
133
+ class TestProcessModeBlocks:
134
+ def test_keeps_matching_mode(self):
135
+ content = "before\n{{IF_MODE_LITE}}\nlite content\n{{END_IF_MODE_LITE}}\nafter"
136
+ result = process_mode_blocks(content, "lite", False)
137
+ assert "lite content" in result
138
+ assert "{{IF_MODE_LITE}}" not in result
139
+
140
+ def test_removes_non_matching_mode(self):
141
+ content = "{{IF_MODE_FULL}}\nfull content\n{{END_IF_MODE_FULL}}\nrest"
142
+ result = process_mode_blocks(content, "lite", False)
143
+ assert "full content" not in result
144
+ assert "rest" in result
145
+
146
+ def test_init_done_keeps_init_done_block(self):
147
+ content = "{{IF_INIT_DONE}}\ninit done\n{{END_IF_INIT_DONE}}\n"
148
+ result = process_mode_blocks(content, "standard", True)
149
+ assert "init done" in result
150
+
151
+ def test_init_done_removes_init_needed_block(self):
152
+ content = "{{IF_INIT_NEEDED}}\nneed init\n{{END_IF_INIT_NEEDED}}\n"
153
+ result = process_mode_blocks(content, "standard", True)
154
+ assert "need init" not in result
155
+
156
+ def test_init_not_done_keeps_init_needed_block(self):
157
+ content = "{{IF_INIT_NEEDED}}\nneed init\n{{END_IF_INIT_NEEDED}}\n"
158
+ result = process_mode_blocks(content, "standard", False)
159
+ assert "need init" in result
160
+
161
+ def test_multiple_modes(self):
162
+ content = (
163
+ "{{IF_MODE_LITE}}\nlite\n{{END_IF_MODE_LITE}}\n"
164
+ "{{IF_MODE_STANDARD}}\nstandard\n{{END_IF_MODE_STANDARD}}\n"
165
+ "{{IF_MODE_FULL}}\nfull\n{{END_IF_MODE_FULL}}\n"
166
+ )
167
+ result = process_mode_blocks(content, "standard", False)
168
+ assert "lite" not in result
169
+ assert "standard" in result
170
+ assert "full" not in result
171
+
172
+
173
+ class TestDeterminePipelineMode:
174
+ def test_low_complexity(self):
175
+ assert determine_pipeline_mode("low") == "lite"
176
+
177
+ def test_medium_complexity(self):
178
+ assert determine_pipeline_mode("medium") == "standard"
179
+
180
+ def test_high_complexity(self):
181
+ assert determine_pipeline_mode("high") == "full"
182
+
183
+ def test_critical_complexity(self):
184
+ assert determine_pipeline_mode("critical") == "full"
185
+
186
+ def test_unknown_complexity(self):
187
+ assert determine_pipeline_mode("unknown") == "standard"
188
+
189
+ def test_none_complexity(self):
190
+ assert determine_pipeline_mode(None) == "standard"
@@ -0,0 +1,153 @@
1
+ """Tests for init-bugfix-pipeline.py."""
2
+
3
+ import json
4
+ import os
5
+ import sys
6
+ import pytest
7
+
8
+
9
+ def _import_init_bugfix_pipeline():
10
+ import importlib.util
11
+ path = os.path.join(
12
+ os.path.dirname(__file__), "..", "scripts", "init-bugfix-pipeline.py"
13
+ )
14
+ spec = importlib.util.spec_from_file_location("init_bugfix_pipeline", path)
15
+ mod = importlib.util.module_from_spec(spec)
16
+ sys.modules["init_bugfix_pipeline"] = mod
17
+ spec.loader.exec_module(mod)
18
+ return mod
19
+
20
+
21
+ init_bugfix = _import_init_bugfix_pipeline()
22
+ validate_schema = init_bugfix.validate_schema
23
+ validate_bugs = init_bugfix.validate_bugs
24
+ create_state_directory = init_bugfix.create_state_directory
25
+
26
+
27
+ class TestValidateSchema:
28
+ def test_valid_schema(self, sample_bug_list):
29
+ errors = validate_schema(sample_bug_list)
30
+ assert errors == []
31
+
32
+ def test_wrong_schema(self):
33
+ data = {
34
+ "$schema": "wrong",
35
+ "project_name": "Test",
36
+ "bugs": [{"id": "B-001"}],
37
+ }
38
+ errors = validate_schema(data)
39
+ assert any("$schema" in e for e in errors)
40
+
41
+ def test_missing_project_name(self):
42
+ data = {
43
+ "$schema": "dev-pipeline-bug-fix-list-v1",
44
+ "bugs": [{"id": "B-001"}],
45
+ }
46
+ errors = validate_schema(data)
47
+ assert any("project_name" in e for e in errors)
48
+
49
+ def test_empty_project_name(self):
50
+ data = {
51
+ "$schema": "dev-pipeline-bug-fix-list-v1",
52
+ "project_name": " ",
53
+ "bugs": [{"id": "B-001"}],
54
+ }
55
+ errors = validate_schema(data)
56
+ assert any("project_name" in e for e in errors)
57
+
58
+ def test_missing_bugs(self):
59
+ data = {
60
+ "$schema": "dev-pipeline-bug-fix-list-v1",
61
+ "project_name": "Test",
62
+ }
63
+ errors = validate_schema(data)
64
+ assert any("bugs" in e for e in errors)
65
+
66
+ def test_bugs_not_array(self):
67
+ data = {
68
+ "$schema": "dev-pipeline-bug-fix-list-v1",
69
+ "project_name": "Test",
70
+ "bugs": "not array",
71
+ }
72
+ errors = validate_schema(data)
73
+ assert any("bugs must be an array" in e for e in errors)
74
+
75
+ def test_empty_bugs_array(self):
76
+ data = {
77
+ "$schema": "dev-pipeline-bug-fix-list-v1",
78
+ "project_name": "Test",
79
+ "bugs": [],
80
+ }
81
+ errors = validate_schema(data)
82
+ assert any("at least one bug" in e for e in errors)
83
+
84
+
85
+ class TestValidateBugs:
86
+ def test_valid_bugs(self, sample_bug_list):
87
+ errors, ids = validate_bugs(sample_bug_list["bugs"])
88
+ assert errors == []
89
+ assert ids == {"B-001", "B-002"}
90
+
91
+ def test_duplicate_ids(self):
92
+ bugs = [
93
+ _make_bug("B-001"), _make_bug("B-001", title="Other"),
94
+ ]
95
+ errors, ids = validate_bugs(bugs)
96
+ assert any("Duplicate" in e for e in errors)
97
+
98
+ def test_invalid_id_format(self):
99
+ bugs = [_make_bug("X-001")]
100
+ errors, ids = validate_bugs(bugs)
101
+ assert any("invalid id" in e for e in errors)
102
+
103
+ def test_invalid_severity(self):
104
+ bug = _make_bug("B-001")
105
+ bug["severity"] = "super_critical"
106
+ errors, ids = validate_bugs([bug])
107
+ assert any("invalid severity" in e for e in errors)
108
+
109
+ def test_invalid_verification_type(self):
110
+ bug = _make_bug("B-001")
111
+ bug["verification_type"] = "magic"
112
+ errors, ids = validate_bugs([bug])
113
+ assert any("invalid verification_type" in e for e in errors)
114
+
115
+ def test_invalid_status(self):
116
+ bug = _make_bug("B-001")
117
+ bug["status"] = "nonexistent_status"
118
+ errors, ids = validate_bugs([bug])
119
+ assert any("invalid status" in e for e in errors)
120
+
121
+ def test_missing_required_fields(self):
122
+ bugs = [{"id": "B-001"}]
123
+ errors, ids = validate_bugs(bugs)
124
+ assert len(errors) > 0
125
+
126
+ def test_error_source_not_object(self):
127
+ bug = _make_bug("B-001")
128
+ bug["error_source"] = "a string"
129
+ errors, ids = validate_bugs([bug])
130
+ assert any("error_source must be an object" in e for e in errors)
131
+
132
+ def test_error_source_missing_type(self):
133
+ bug = _make_bug("B-001")
134
+ bug["error_source"] = {"detail": "something"}
135
+ errors, ids = validate_bugs([bug])
136
+ assert any("error_source missing required field: type" in e for e in errors)
137
+
138
+ def test_non_dict_bug(self):
139
+ errors, ids = validate_bugs(["not a dict"])
140
+ assert any("not an object" in e for e in errors)
141
+
142
+
143
+ def _make_bug(bid, title="Test Bug"):
144
+ return {
145
+ "id": bid,
146
+ "title": title,
147
+ "description": "A bug",
148
+ "severity": "medium",
149
+ "error_source": {"type": "stack_trace"},
150
+ "verification_type": "automated",
151
+ "acceptance_criteria": ["Fixed"],
152
+ "status": "pending",
153
+ }