@cleocode/core 2026.3.43 → 2026.3.44
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/admin/export-tasks.d.ts.map +1 -1
- package/dist/agents/agent-schema.d.ts +358 -0
- package/dist/agents/agent-schema.d.ts.map +1 -0
- package/dist/agents/capacity.d.ts +57 -0
- package/dist/agents/capacity.d.ts.map +1 -0
- package/dist/agents/index.d.ts +17 -0
- package/dist/agents/index.d.ts.map +1 -0
- package/dist/agents/registry.d.ts +115 -0
- package/dist/agents/registry.d.ts.map +1 -0
- package/dist/agents/retry.d.ts +83 -0
- package/dist/agents/retry.d.ts.map +1 -0
- package/dist/hooks/index.d.ts +4 -1
- package/dist/hooks/index.d.ts.map +1 -1
- package/dist/hooks/payload-schemas.d.ts +214 -0
- package/dist/hooks/payload-schemas.d.ts.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +16443 -2160
- package/dist/index.js.map +4 -4
- package/dist/inject/index.d.ts.map +1 -1
- package/dist/intelligence/impact.d.ts +51 -0
- package/dist/intelligence/impact.d.ts.map +1 -0
- package/dist/intelligence/index.d.ts +15 -0
- package/dist/intelligence/index.d.ts.map +1 -0
- package/dist/intelligence/patterns.d.ts +66 -0
- package/dist/intelligence/patterns.d.ts.map +1 -0
- package/dist/intelligence/prediction.d.ts +51 -0
- package/dist/intelligence/prediction.d.ts.map +1 -0
- package/dist/intelligence/types.d.ts +221 -0
- package/dist/intelligence/types.d.ts.map +1 -0
- package/dist/internal.d.ts +9 -0
- package/dist/internal.d.ts.map +1 -1
- package/dist/issue/template-parser.d.ts +8 -2
- package/dist/issue/template-parser.d.ts.map +1 -1
- package/dist/lifecycle/pipeline.d.ts +2 -2
- package/dist/lifecycle/pipeline.d.ts.map +1 -1
- package/dist/lifecycle/state-machine.d.ts +1 -1
- package/dist/lifecycle/state-machine.d.ts.map +1 -1
- package/dist/memory/brain-lifecycle.d.ts.map +1 -1
- package/dist/memory/brain-retrieval.d.ts.map +1 -1
- package/dist/memory/brain-row-types.d.ts +40 -6
- package/dist/memory/brain-row-types.d.ts.map +1 -1
- package/dist/memory/brain-search.d.ts.map +1 -1
- package/dist/memory/brain-similarity.d.ts.map +1 -1
- package/dist/memory/claude-mem-migration.d.ts.map +1 -1
- package/dist/nexus/discover.d.ts.map +1 -1
- package/dist/orchestration/bootstrap.d.ts.map +1 -1
- package/dist/orchestration/skill-ops.d.ts +4 -4
- package/dist/orchestration/skill-ops.d.ts.map +1 -1
- package/dist/otel/index.d.ts +1 -1
- package/dist/otel/index.d.ts.map +1 -1
- package/dist/sessions/briefing.d.ts.map +1 -1
- package/dist/sessions/handoff.d.ts.map +1 -1
- package/dist/sessions/index.d.ts +1 -1
- package/dist/sessions/index.d.ts.map +1 -1
- package/dist/sessions/types.d.ts +8 -42
- package/dist/sessions/types.d.ts.map +1 -1
- package/dist/signaldock/signaldock-transport.d.ts +1 -1
- package/dist/signaldock/signaldock-transport.d.ts.map +1 -1
- package/dist/skills/injection/subagent.d.ts +3 -3
- package/dist/skills/injection/subagent.d.ts.map +1 -1
- package/dist/skills/manifests/contribution.d.ts +2 -2
- package/dist/skills/manifests/contribution.d.ts.map +1 -1
- package/dist/skills/orchestrator/spawn.d.ts +6 -6
- package/dist/skills/orchestrator/spawn.d.ts.map +1 -1
- package/dist/skills/orchestrator/startup.d.ts +1 -1
- package/dist/skills/orchestrator/startup.d.ts.map +1 -1
- package/dist/skills/orchestrator/validator.d.ts +2 -2
- package/dist/skills/orchestrator/validator.d.ts.map +1 -1
- package/dist/skills/precedence-types.d.ts +24 -1
- package/dist/skills/precedence-types.d.ts.map +1 -1
- package/dist/skills/types.d.ts +70 -4
- package/dist/skills/types.d.ts.map +1 -1
- package/dist/store/export.d.ts +5 -4
- package/dist/store/export.d.ts.map +1 -1
- package/dist/store/tasks-schema.d.ts +12 -2
- package/dist/store/tasks-schema.d.ts.map +1 -1
- package/dist/store/typed-query.d.ts +12 -0
- package/dist/store/typed-query.d.ts.map +1 -0
- package/dist/store/validation-schemas.d.ts +2422 -50
- package/dist/store/validation-schemas.d.ts.map +1 -1
- package/dist/system/inject-generate.d.ts.map +1 -1
- package/dist/validation/doctor/checks.d.ts +5 -0
- package/dist/validation/doctor/checks.d.ts.map +1 -1
- package/dist/validation/engine.d.ts +10 -10
- package/dist/validation/engine.d.ts.map +1 -1
- package/dist/validation/index.d.ts +6 -2
- package/dist/validation/index.d.ts.map +1 -1
- package/dist/validation/protocol-common.d.ts +10 -2
- package/dist/validation/protocol-common.d.ts.map +1 -1
- package/migrations/drizzle-tasks/20260320013731_wave0-schema-hardening/migration.sql +84 -0
- package/migrations/drizzle-tasks/20260320013731_wave0-schema-hardening/snapshot.json +4060 -0
- package/migrations/drizzle-tasks/20260320020000_agent-dimension/migration.sql +35 -0
- package/migrations/drizzle-tasks/20260320020000_agent-dimension/snapshot.json +4312 -0
- package/package.json +2 -2
- package/src/admin/export-tasks.ts +2 -5
- package/src/agents/__tests__/capacity.test.ts +219 -0
- package/src/agents/__tests__/registry.test.ts +457 -0
- package/src/agents/__tests__/retry.test.ts +289 -0
- package/src/agents/agent-schema.ts +107 -0
- package/src/agents/capacity.ts +151 -0
- package/src/agents/index.ts +68 -0
- package/src/agents/registry.ts +449 -0
- package/src/agents/retry.ts +255 -0
- package/src/hooks/index.ts +20 -1
- package/src/hooks/payload-schemas.ts +199 -0
- package/src/index.ts +69 -0
- package/src/inject/index.ts +14 -14
- package/src/intelligence/__tests__/impact.test.ts +453 -0
- package/src/intelligence/__tests__/patterns.test.ts +450 -0
- package/src/intelligence/__tests__/prediction.test.ts +418 -0
- package/src/intelligence/impact.ts +638 -0
- package/src/intelligence/index.ts +47 -0
- package/src/intelligence/patterns.ts +621 -0
- package/src/intelligence/prediction.ts +621 -0
- package/src/intelligence/types.ts +273 -0
- package/src/internal.ts +82 -1
- package/src/issue/template-parser.ts +65 -4
- package/src/lifecycle/pipeline.ts +14 -7
- package/src/lifecycle/state-machine.ts +6 -2
- package/src/memory/brain-lifecycle.ts +5 -11
- package/src/memory/brain-retrieval.ts +44 -38
- package/src/memory/brain-row-types.ts +43 -6
- package/src/memory/brain-search.ts +53 -32
- package/src/memory/brain-similarity.ts +9 -8
- package/src/memory/claude-mem-migration.ts +4 -3
- package/src/nexus/__tests__/nexus-e2e.test.ts +1481 -0
- package/src/nexus/discover.ts +1 -0
- package/src/orchestration/bootstrap.ts +11 -17
- package/src/orchestration/skill-ops.ts +52 -32
- package/src/otel/index.ts +48 -4
- package/src/sessions/__tests__/briefing.test.ts +31 -2
- package/src/sessions/briefing.ts +27 -42
- package/src/sessions/handoff.ts +52 -86
- package/src/sessions/index.ts +5 -1
- package/src/sessions/types.ts +9 -43
- package/src/signaldock/signaldock-transport.ts +5 -2
- package/src/skills/injection/subagent.ts +10 -16
- package/src/skills/manifests/contribution.ts +5 -13
- package/src/skills/orchestrator/__tests__/spawn-tier.test.ts +44 -30
- package/src/skills/orchestrator/spawn.ts +18 -31
- package/src/skills/orchestrator/startup.ts +78 -65
- package/src/skills/orchestrator/validator.ts +26 -31
- package/src/skills/precedence-types.ts +24 -1
- package/src/skills/types.ts +72 -5
- package/src/store/__tests__/test-db-helper.d.ts +4 -4
- package/src/store/__tests__/test-db-helper.js +5 -16
- package/src/store/__tests__/test-db-helper.ts +5 -18
- package/src/store/chain-schema.ts +1 -1
- package/src/store/export.ts +22 -12
- package/src/store/tasks-schema.ts +65 -8
- package/src/store/typed-query.ts +17 -0
- package/src/store/validation-schemas.ts +347 -23
- package/src/system/inject-generate.ts +9 -23
- package/src/validation/doctor/checks.ts +24 -2
- package/src/validation/engine.ts +11 -11
- package/src/validation/index.ts +131 -3
- package/src/validation/protocol-common.ts +54 -3
- package/dist/tasks/reparent.d.ts +0 -38
- package/dist/tasks/reparent.d.ts.map +0 -1
- package/src/tasks/reparent.ts +0 -134
|
@@ -0,0 +1,621 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Quality Prediction module for the CLEO Intelligence dimension.
|
|
3
|
+
*
|
|
4
|
+
* Provides risk scoring for tasks based on complexity, historical patterns,
|
|
5
|
+
* and blocking analysis. Also predicts lifecycle gate validation outcomes
|
|
6
|
+
* using brain_patterns and brain_learnings data.
|
|
7
|
+
*
|
|
8
|
+
* Uses the existing BrainDataAccessor and DataAccessor — no new tables.
|
|
9
|
+
*
|
|
10
|
+
* @task Wave3A
|
|
11
|
+
* @epic T5149
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import type { Task } from '@cleocode/contracts';
|
|
15
|
+
import type { BrainDataAccessor } from '../store/brain-accessor.js';
|
|
16
|
+
import type { DataAccessor } from '../store/data-accessor.js';
|
|
17
|
+
import type { LearningContext, RiskAssessment, RiskFactor, ValidationPrediction } from './types.js';
|
|
18
|
+
|
|
19
|
+
// ============================================================================
|
|
20
|
+
// Constants
|
|
21
|
+
// ============================================================================
|
|
22
|
+
|
|
23
|
+
/** Risk factor weights — tuned to prioritize blocking risk and complexity. */
|
|
24
|
+
const RISK_WEIGHTS = {
|
|
25
|
+
complexity: 0.25,
|
|
26
|
+
historicalFailure: 0.25,
|
|
27
|
+
blockingRisk: 0.3,
|
|
28
|
+
dependencyDepth: 0.2,
|
|
29
|
+
} as const;
|
|
30
|
+
|
|
31
|
+
/** Thresholds for risk level recommendations. */
|
|
32
|
+
const RISK_THRESHOLDS = {
|
|
33
|
+
low: 0.3,
|
|
34
|
+
medium: 0.6,
|
|
35
|
+
high: 0.8,
|
|
36
|
+
} as const;
|
|
37
|
+
|
|
38
|
+
/** Size-to-complexity mapping (0-1 scale). */
|
|
39
|
+
const SIZE_COMPLEXITY: Record<string, number> = {
|
|
40
|
+
small: 0.2,
|
|
41
|
+
medium: 0.5,
|
|
42
|
+
large: 0.8,
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
// ============================================================================
|
|
46
|
+
// Risk Scoring
|
|
47
|
+
// ============================================================================
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Calculate the risk score for a task based on multiple contributing factors.
|
|
51
|
+
*
|
|
52
|
+
* Factors considered:
|
|
53
|
+
* - Task complexity (size, dependency count, hierarchy depth)
|
|
54
|
+
* - Historical patterns (similar tasks' failure rates from brain_patterns)
|
|
55
|
+
* - Blocking risk (does this task block others?)
|
|
56
|
+
* - Dependency depth (how deep in the dependency chain)
|
|
57
|
+
*
|
|
58
|
+
* @param taskId - The task to assess
|
|
59
|
+
* @param taskAccessor - DataAccessor for tasks.db
|
|
60
|
+
* @param brainAccessor - BrainDataAccessor for brain.db
|
|
61
|
+
* @returns A complete risk assessment with score, confidence, factors, and recommendation
|
|
62
|
+
*/
|
|
63
|
+
export async function calculateTaskRisk(
|
|
64
|
+
taskId: string,
|
|
65
|
+
taskAccessor: DataAccessor,
|
|
66
|
+
brainAccessor: BrainDataAccessor,
|
|
67
|
+
): Promise<RiskAssessment> {
|
|
68
|
+
const task = await taskAccessor.loadSingleTask(taskId);
|
|
69
|
+
if (!task) {
|
|
70
|
+
return {
|
|
71
|
+
taskId,
|
|
72
|
+
riskScore: 0,
|
|
73
|
+
confidence: 0,
|
|
74
|
+
factors: [],
|
|
75
|
+
recommendation: `Task ${taskId} not found — cannot assess risk.`,
|
|
76
|
+
};
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const factors: RiskFactor[] = [];
|
|
80
|
+
let dataPoints = 0;
|
|
81
|
+
|
|
82
|
+
// Factor 1: Complexity (size + dependency count + child count)
|
|
83
|
+
const complexityFactor = await computeComplexityFactor(task, taskAccessor);
|
|
84
|
+
factors.push(complexityFactor);
|
|
85
|
+
dataPoints += 1;
|
|
86
|
+
|
|
87
|
+
// Factor 2: Historical failure patterns from brain_patterns
|
|
88
|
+
const historicalFactor = await computeHistoricalFailureFactor(task, brainAccessor);
|
|
89
|
+
factors.push(historicalFactor);
|
|
90
|
+
if (historicalFactor.value > 0) dataPoints += 1;
|
|
91
|
+
|
|
92
|
+
// Factor 3: Blocking risk (how many tasks does this block?)
|
|
93
|
+
const blockingFactor = await computeBlockingFactor(task, taskAccessor);
|
|
94
|
+
factors.push(blockingFactor);
|
|
95
|
+
dataPoints += 1;
|
|
96
|
+
|
|
97
|
+
// Factor 4: Dependency depth
|
|
98
|
+
const depthFactor = await computeDependencyDepthFactor(task, taskAccessor);
|
|
99
|
+
factors.push(depthFactor);
|
|
100
|
+
dataPoints += 1;
|
|
101
|
+
|
|
102
|
+
// Compute weighted aggregate score
|
|
103
|
+
const riskScore = computeWeightedScore(factors);
|
|
104
|
+
|
|
105
|
+
// Confidence scales with available data (0.25 base, up to 1.0)
|
|
106
|
+
const confidence = Math.min(1.0, 0.25 + (dataPoints / 4) * 0.75);
|
|
107
|
+
|
|
108
|
+
const recommendation = generateRecommendation(riskScore, factors);
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
taskId,
|
|
112
|
+
riskScore: Math.round(riskScore * 1000) / 1000,
|
|
113
|
+
confidence: Math.round(confidence * 1000) / 1000,
|
|
114
|
+
factors,
|
|
115
|
+
recommendation,
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// ============================================================================
|
|
120
|
+
// Validation Outcome Prediction
|
|
121
|
+
// ============================================================================
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Predict the likelihood of a task passing a lifecycle validation gate.
|
|
125
|
+
*
|
|
126
|
+
* Combines:
|
|
127
|
+
* - Historical gate results for similar tasks (from brain_patterns)
|
|
128
|
+
* - Current task completion state
|
|
129
|
+
* - Applicable learnings from brain_learnings
|
|
130
|
+
*
|
|
131
|
+
* @param taskId - The task to evaluate
|
|
132
|
+
* @param stage - The lifecycle stage (e.g., "specification", "implementation")
|
|
133
|
+
* @param taskAccessor - DataAccessor for tasks.db
|
|
134
|
+
* @param brainAccessor - BrainDataAccessor for brain.db
|
|
135
|
+
* @returns Prediction with pass likelihood, blockers, and suggestions
|
|
136
|
+
*/
|
|
137
|
+
export async function predictValidationOutcome(
|
|
138
|
+
taskId: string,
|
|
139
|
+
stage: string,
|
|
140
|
+
taskAccessor: DataAccessor,
|
|
141
|
+
brainAccessor: BrainDataAccessor,
|
|
142
|
+
): Promise<ValidationPrediction> {
|
|
143
|
+
const task = await taskAccessor.loadSingleTask(taskId);
|
|
144
|
+
if (!task) {
|
|
145
|
+
return {
|
|
146
|
+
taskId,
|
|
147
|
+
stage,
|
|
148
|
+
passLikelihood: 0,
|
|
149
|
+
blockers: [`Task ${taskId} not found`],
|
|
150
|
+
suggestions: [],
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
const blockers: string[] = [];
|
|
155
|
+
const suggestions: string[] = [];
|
|
156
|
+
let passSignals = 0;
|
|
157
|
+
let failSignals = 0;
|
|
158
|
+
let totalSignals = 0;
|
|
159
|
+
|
|
160
|
+
// Signal 1: Task status assessment
|
|
161
|
+
const statusResult = assessTaskStatus(task, stage);
|
|
162
|
+
passSignals += statusResult.pass;
|
|
163
|
+
failSignals += statusResult.fail;
|
|
164
|
+
totalSignals += statusResult.total;
|
|
165
|
+
blockers.push(...statusResult.blockers);
|
|
166
|
+
suggestions.push(...statusResult.suggestions);
|
|
167
|
+
|
|
168
|
+
// Signal 2: Acceptance criteria completeness
|
|
169
|
+
const criteriaResult = assessAcceptanceCriteria(task);
|
|
170
|
+
passSignals += criteriaResult.pass;
|
|
171
|
+
failSignals += criteriaResult.fail;
|
|
172
|
+
totalSignals += criteriaResult.total;
|
|
173
|
+
blockers.push(...criteriaResult.blockers);
|
|
174
|
+
suggestions.push(...criteriaResult.suggestions);
|
|
175
|
+
|
|
176
|
+
// Signal 3: Historical success patterns from brain_patterns
|
|
177
|
+
const patternResult = await assessHistoricalPatterns(task, stage, brainAccessor);
|
|
178
|
+
passSignals += patternResult.pass;
|
|
179
|
+
failSignals += patternResult.fail;
|
|
180
|
+
totalSignals += patternResult.total;
|
|
181
|
+
suggestions.push(...patternResult.suggestions);
|
|
182
|
+
|
|
183
|
+
// Signal 4: Applicable learnings from brain_learnings
|
|
184
|
+
const learningContext = await gatherLearningContext(task, brainAccessor);
|
|
185
|
+
if (learningContext.actionableCount > 0) {
|
|
186
|
+
passSignals += learningContext.averageConfidence;
|
|
187
|
+
totalSignals += 1;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Compute pass likelihood
|
|
191
|
+
const passLikelihood =
|
|
192
|
+
totalSignals > 0
|
|
193
|
+
? Math.round((passSignals / (passSignals + failSignals || 1)) * 1000) / 1000
|
|
194
|
+
: 0.5;
|
|
195
|
+
|
|
196
|
+
return {
|
|
197
|
+
taskId,
|
|
198
|
+
stage,
|
|
199
|
+
passLikelihood: Math.max(0, Math.min(1, passLikelihood)),
|
|
200
|
+
blockers,
|
|
201
|
+
suggestions,
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// ============================================================================
|
|
206
|
+
// Internal: Risk Factor Computation
|
|
207
|
+
// ============================================================================
|
|
208
|
+
|
|
209
|
+
async function computeComplexityFactor(task: Task, accessor: DataAccessor): Promise<RiskFactor> {
|
|
210
|
+
const depCount = task.depends?.length ?? 0;
|
|
211
|
+
let childCount = 0;
|
|
212
|
+
|
|
213
|
+
try {
|
|
214
|
+
childCount = await accessor.countChildren(task.id);
|
|
215
|
+
} catch {
|
|
216
|
+
// countChildren may fail for leaf tasks; default to 0
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
const sizeValue = SIZE_COMPLEXITY[task.size ?? 'medium'] ?? 0.5;
|
|
220
|
+
const depNormalized = Math.min(depCount / 10, 1.0);
|
|
221
|
+
const childNormalized = Math.min(childCount / 20, 1.0);
|
|
222
|
+
|
|
223
|
+
const value = sizeValue * 0.4 + depNormalized * 0.3 + childNormalized * 0.3;
|
|
224
|
+
|
|
225
|
+
const parts: string[] = [];
|
|
226
|
+
if (task.size) parts.push(`size=${task.size}`);
|
|
227
|
+
if (depCount > 0) parts.push(`${depCount} dependencies`);
|
|
228
|
+
if (childCount > 0) parts.push(`${childCount} children`);
|
|
229
|
+
|
|
230
|
+
return {
|
|
231
|
+
name: 'complexity',
|
|
232
|
+
weight: RISK_WEIGHTS.complexity,
|
|
233
|
+
value: Math.round(value * 1000) / 1000,
|
|
234
|
+
description:
|
|
235
|
+
parts.length > 0
|
|
236
|
+
? `Task complexity based on ${parts.join(', ')}`
|
|
237
|
+
: 'Default complexity (no size/deps/children data)',
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
async function computeHistoricalFailureFactor(
|
|
242
|
+
task: Task,
|
|
243
|
+
brainAccessor: BrainDataAccessor,
|
|
244
|
+
): Promise<RiskFactor> {
|
|
245
|
+
// Look for failure and blocker patterns in brain_patterns
|
|
246
|
+
const failurePatterns = await brainAccessor.findPatterns({ type: 'failure', limit: 50 });
|
|
247
|
+
const blockerPatterns = await brainAccessor.findPatterns({ type: 'blocker', limit: 50 });
|
|
248
|
+
|
|
249
|
+
const allNegativePatterns = [...failurePatterns, ...blockerPatterns];
|
|
250
|
+
|
|
251
|
+
if (allNegativePatterns.length === 0) {
|
|
252
|
+
return {
|
|
253
|
+
name: 'historical_failure',
|
|
254
|
+
weight: RISK_WEIGHTS.historicalFailure,
|
|
255
|
+
value: 0,
|
|
256
|
+
description: 'No historical failure patterns found in brain.db',
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// Score based on average failure rate of matching patterns
|
|
261
|
+
let matchCount = 0;
|
|
262
|
+
let totalFailureRate = 0;
|
|
263
|
+
|
|
264
|
+
const taskLabels = new Set((task.labels ?? []).map((l) => l.toLowerCase()));
|
|
265
|
+
const taskTitle = task.title.toLowerCase();
|
|
266
|
+
const taskDesc = (task.description ?? '').toLowerCase();
|
|
267
|
+
|
|
268
|
+
for (const p of allNegativePatterns) {
|
|
269
|
+
const patternText = p.pattern.toLowerCase();
|
|
270
|
+
const contextText = p.context.toLowerCase();
|
|
271
|
+
|
|
272
|
+
// Check for textual overlap between pattern and task
|
|
273
|
+
const hasLabelMatch =
|
|
274
|
+
taskLabels.size > 0 &&
|
|
275
|
+
[...taskLabels].some((l) => patternText.includes(l) || contextText.includes(l));
|
|
276
|
+
const hasTitleMatch =
|
|
277
|
+
patternText.includes(taskTitle) || taskTitle.includes(patternText.slice(0, 20));
|
|
278
|
+
const hasDescMatch =
|
|
279
|
+
taskDesc.length > 10 &&
|
|
280
|
+
(patternText.includes(taskDesc.slice(0, 30)) || taskDesc.includes(patternText.slice(0, 30)));
|
|
281
|
+
|
|
282
|
+
if (hasLabelMatch || hasTitleMatch || hasDescMatch) {
|
|
283
|
+
matchCount++;
|
|
284
|
+
// successRate close to 0 means high failure rate
|
|
285
|
+
const failureRate = p.successRate !== null ? 1 - p.successRate : 0.5;
|
|
286
|
+
totalFailureRate += failureRate;
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if (matchCount === 0) {
|
|
291
|
+
return {
|
|
292
|
+
name: 'historical_failure',
|
|
293
|
+
weight: RISK_WEIGHTS.historicalFailure,
|
|
294
|
+
value: 0,
|
|
295
|
+
description: 'No matching historical failure patterns for this task',
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
const avgFailureRate = totalFailureRate / matchCount;
|
|
300
|
+
|
|
301
|
+
return {
|
|
302
|
+
name: 'historical_failure',
|
|
303
|
+
weight: RISK_WEIGHTS.historicalFailure,
|
|
304
|
+
value: Math.round(avgFailureRate * 1000) / 1000,
|
|
305
|
+
description: `${matchCount} matching failure/blocker pattern(s) with avg failure rate ${Math.round(avgFailureRate * 100)}%`,
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
async function computeBlockingFactor(task: Task, accessor: DataAccessor): Promise<RiskFactor> {
|
|
310
|
+
// Count how many other tasks depend on this task (i.e., this task blocks them)
|
|
311
|
+
let blockedCount = 0;
|
|
312
|
+
|
|
313
|
+
try {
|
|
314
|
+
// Query all non-done tasks to find those that depend on this task
|
|
315
|
+
const { tasks: allTasks } = await accessor.queryTasks({
|
|
316
|
+
status: ['pending', 'active', 'blocked'],
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
for (const t of allTasks) {
|
|
320
|
+
if (t.depends?.includes(task.id)) {
|
|
321
|
+
blockedCount++;
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
} catch {
|
|
325
|
+
// Best-effort: if query fails, treat as unknown
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Normalize: 0 blocked = 0, 5+ blocked = 1.0
|
|
329
|
+
const value = Math.min(blockedCount / 5, 1.0);
|
|
330
|
+
|
|
331
|
+
return {
|
|
332
|
+
name: 'blocking_risk',
|
|
333
|
+
weight: RISK_WEIGHTS.blockingRisk,
|
|
334
|
+
value: Math.round(value * 1000) / 1000,
|
|
335
|
+
description:
|
|
336
|
+
blockedCount > 0
|
|
337
|
+
? `This task blocks ${blockedCount} other task(s)`
|
|
338
|
+
: 'This task does not block any other tasks',
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
async function computeDependencyDepthFactor(
|
|
343
|
+
task: Task,
|
|
344
|
+
accessor: DataAccessor,
|
|
345
|
+
): Promise<RiskFactor> {
|
|
346
|
+
// Walk the dependency chain to measure depth
|
|
347
|
+
let depth = 0;
|
|
348
|
+
const visited = new Set<string>();
|
|
349
|
+
let current = task;
|
|
350
|
+
|
|
351
|
+
while (current.depends && current.depends.length > 0 && depth < 10) {
|
|
352
|
+
const firstDep = current.depends[0];
|
|
353
|
+
if (visited.has(firstDep)) break; // cycle guard
|
|
354
|
+
visited.add(firstDep);
|
|
355
|
+
|
|
356
|
+
const depTask = await accessor.loadSingleTask(firstDep);
|
|
357
|
+
if (!depTask) break;
|
|
358
|
+
|
|
359
|
+
depth++;
|
|
360
|
+
current = depTask;
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// Also count parent depth (hierarchy)
|
|
364
|
+
let hierarchyDepth = 0;
|
|
365
|
+
let parentId = task.parentId;
|
|
366
|
+
const visitedParents = new Set<string>();
|
|
367
|
+
|
|
368
|
+
while (parentId && hierarchyDepth < 10) {
|
|
369
|
+
if (visitedParents.has(parentId)) break;
|
|
370
|
+
visitedParents.add(parentId);
|
|
371
|
+
|
|
372
|
+
const parent = await accessor.loadSingleTask(parentId);
|
|
373
|
+
if (!parent) break;
|
|
374
|
+
|
|
375
|
+
hierarchyDepth++;
|
|
376
|
+
parentId = parent.parentId;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
const totalDepth = depth + hierarchyDepth;
|
|
380
|
+
// Normalize: depth 0 = 0, depth 8+ = 1.0
|
|
381
|
+
const value = Math.min(totalDepth / 8, 1.0);
|
|
382
|
+
|
|
383
|
+
return {
|
|
384
|
+
name: 'dependency_depth',
|
|
385
|
+
weight: RISK_WEIGHTS.dependencyDepth,
|
|
386
|
+
value: Math.round(value * 1000) / 1000,
|
|
387
|
+
description: `Dependency chain depth: ${depth}, hierarchy depth: ${hierarchyDepth}`,
|
|
388
|
+
};
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
// ============================================================================
|
|
392
|
+
// Internal: Validation Prediction Helpers
|
|
393
|
+
// ============================================================================
|
|
394
|
+
|
|
395
|
+
interface SignalResult {
|
|
396
|
+
pass: number;
|
|
397
|
+
fail: number;
|
|
398
|
+
total: number;
|
|
399
|
+
blockers: string[];
|
|
400
|
+
suggestions: string[];
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
function assessTaskStatus(task: Task, stage: string): SignalResult {
|
|
404
|
+
const blockers: string[] = [];
|
|
405
|
+
const suggestions: string[] = [];
|
|
406
|
+
let pass = 0;
|
|
407
|
+
let fail = 0;
|
|
408
|
+
|
|
409
|
+
if (task.status === 'blocked') {
|
|
410
|
+
fail += 1;
|
|
411
|
+
blockers.push(`Task is currently blocked${task.blockedBy ? `: ${task.blockedBy}` : ''}`);
|
|
412
|
+
suggestions.push('Resolve blocking issues before attempting gate validation');
|
|
413
|
+
} else if (task.status === 'cancelled') {
|
|
414
|
+
fail += 1;
|
|
415
|
+
blockers.push('Task is cancelled');
|
|
416
|
+
} else if (task.status === 'done') {
|
|
417
|
+
pass += 1;
|
|
418
|
+
} else if (task.status === 'active') {
|
|
419
|
+
// For early stages, active is fine; for late stages, may need completion
|
|
420
|
+
if (stage === 'verification' || stage === 'release') {
|
|
421
|
+
fail += 0.5;
|
|
422
|
+
suggestions.push(`Task should be completed before ${stage} gate`);
|
|
423
|
+
} else {
|
|
424
|
+
pass += 0.5;
|
|
425
|
+
}
|
|
426
|
+
} else {
|
|
427
|
+
// todo status
|
|
428
|
+
if (stage === 'specification') {
|
|
429
|
+
pass += 0.3;
|
|
430
|
+
} else {
|
|
431
|
+
fail += 0.5;
|
|
432
|
+
suggestions.push(
|
|
433
|
+
`Task is still in "${task.status}" status — work should begin before ${stage} gate`,
|
|
434
|
+
);
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
return { pass, fail, total: 1, blockers, suggestions };
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
function assessAcceptanceCriteria(task: Task): SignalResult {
|
|
442
|
+
const blockers: string[] = [];
|
|
443
|
+
const suggestions: string[] = [];
|
|
444
|
+
|
|
445
|
+
const criteria = task.acceptance ?? [];
|
|
446
|
+
|
|
447
|
+
if (criteria.length === 0) {
|
|
448
|
+
return {
|
|
449
|
+
pass: 0.3,
|
|
450
|
+
fail: 0.2,
|
|
451
|
+
total: 1,
|
|
452
|
+
blockers: [],
|
|
453
|
+
suggestions: ['Add acceptance criteria to improve validation confidence'],
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
// Having acceptance criteria is a positive signal
|
|
458
|
+
return {
|
|
459
|
+
pass: 0.7,
|
|
460
|
+
fail: 0,
|
|
461
|
+
total: 1,
|
|
462
|
+
blockers,
|
|
463
|
+
suggestions,
|
|
464
|
+
};
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
async function assessHistoricalPatterns(
|
|
468
|
+
task: Task,
|
|
469
|
+
stage: string,
|
|
470
|
+
brainAccessor: BrainDataAccessor,
|
|
471
|
+
): Promise<SignalResult> {
|
|
472
|
+
const suggestions: string[] = [];
|
|
473
|
+
let pass = 0;
|
|
474
|
+
let fail = 0;
|
|
475
|
+
let total = 0;
|
|
476
|
+
|
|
477
|
+
const taskLabels = new Set((task.labels ?? []).map((l) => l.toLowerCase()));
|
|
478
|
+
const taskTitle = task.title.toLowerCase();
|
|
479
|
+
|
|
480
|
+
// Look for success patterns related to this stage or matching task attributes
|
|
481
|
+
const successPatterns = await brainAccessor.findPatterns({ type: 'success', limit: 30 });
|
|
482
|
+
|
|
483
|
+
for (const p of successPatterns) {
|
|
484
|
+
const ctx = p.context.toLowerCase();
|
|
485
|
+
const pat = p.pattern.toLowerCase();
|
|
486
|
+
const stageMatch = ctx.includes(stage.toLowerCase());
|
|
487
|
+
const labelMatch = [...taskLabels].some((l) => pat.includes(l) || ctx.includes(l));
|
|
488
|
+
const titleMatch = taskTitle
|
|
489
|
+
.split(/\s+/)
|
|
490
|
+
.filter((w) => w.length > 3)
|
|
491
|
+
.some((w) => pat.includes(w));
|
|
492
|
+
|
|
493
|
+
if (stageMatch || labelMatch || titleMatch) {
|
|
494
|
+
total++;
|
|
495
|
+
if (p.successRate !== null && p.successRate >= 0.7) {
|
|
496
|
+
pass += p.successRate;
|
|
497
|
+
} else {
|
|
498
|
+
pass += 0.5;
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
// Look for failure patterns related to this stage or matching task attributes
|
|
504
|
+
const failurePatterns = await brainAccessor.findPatterns({ type: 'failure', limit: 30 });
|
|
505
|
+
|
|
506
|
+
for (const p of failurePatterns) {
|
|
507
|
+
const ctx = p.context.toLowerCase();
|
|
508
|
+
const pat = p.pattern.toLowerCase();
|
|
509
|
+
const stageMatch = ctx.includes(stage.toLowerCase());
|
|
510
|
+
const labelMatch = [...taskLabels].some((l) => pat.includes(l) || ctx.includes(l));
|
|
511
|
+
const titleMatch = taskTitle
|
|
512
|
+
.split(/\s+/)
|
|
513
|
+
.filter((w) => w.length > 3)
|
|
514
|
+
.some((w) => pat.includes(w));
|
|
515
|
+
|
|
516
|
+
if (stageMatch || labelMatch || titleMatch) {
|
|
517
|
+
total++;
|
|
518
|
+
fail += p.successRate !== null ? 1 - p.successRate : 0.5;
|
|
519
|
+
if (p.mitigation) {
|
|
520
|
+
suggestions.push(`Historical pattern suggests: ${p.mitigation}`);
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
if (total === 0) {
|
|
526
|
+
return { pass: 0, fail: 0, total: 0, blockers: [], suggestions };
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
return { pass, fail, total, blockers: [], suggestions };
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
/**
|
|
533
|
+
* Gather applicable learnings for a task from brain_learnings.
|
|
534
|
+
*/
|
|
535
|
+
export async function gatherLearningContext(
|
|
536
|
+
task: Task,
|
|
537
|
+
brainAccessor: BrainDataAccessor,
|
|
538
|
+
): Promise<LearningContext> {
|
|
539
|
+
const learnings = await brainAccessor.findLearnings({ limit: 100 });
|
|
540
|
+
|
|
541
|
+
const taskLabels = new Set((task.labels ?? []).map((l) => l.toLowerCase()));
|
|
542
|
+
const taskTitle = task.title.toLowerCase();
|
|
543
|
+
|
|
544
|
+
const applicable = learnings.filter((l) => {
|
|
545
|
+
const insight = l.insight.toLowerCase();
|
|
546
|
+
const source = l.source.toLowerCase();
|
|
547
|
+
|
|
548
|
+
// Match by task ID reference
|
|
549
|
+
if (insight.includes(task.id.toLowerCase()) || source.includes(task.id.toLowerCase())) {
|
|
550
|
+
return true;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
// Match by label overlap
|
|
554
|
+
if (taskLabels.size > 0 && [...taskLabels].some((label) => insight.includes(label))) {
|
|
555
|
+
return true;
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
// Match by applicable types
|
|
559
|
+
const applicableTypes = JSON.parse(l.applicableTypesJson || '[]') as string[];
|
|
560
|
+
if (task.type && applicableTypes.includes(task.type)) {
|
|
561
|
+
return true;
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
// Match by title keyword overlap (at least 2 words matching)
|
|
565
|
+
const titleWords = taskTitle.split(/\s+/).filter((w) => w.length > 3);
|
|
566
|
+
const matchingWords = titleWords.filter((w) => insight.includes(w));
|
|
567
|
+
if (matchingWords.length >= 2) {
|
|
568
|
+
return true;
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
return false;
|
|
572
|
+
});
|
|
573
|
+
|
|
574
|
+
const totalConfidence = applicable.reduce((sum, l) => sum + l.confidence, 0);
|
|
575
|
+
const averageConfidence = applicable.length > 0 ? totalConfidence / applicable.length : 0;
|
|
576
|
+
const actionableCount = applicable.filter((l) => l.actionable).length;
|
|
577
|
+
|
|
578
|
+
return {
|
|
579
|
+
applicable,
|
|
580
|
+
averageConfidence: Math.round(averageConfidence * 1000) / 1000,
|
|
581
|
+
actionableCount,
|
|
582
|
+
};
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
// ============================================================================
|
|
586
|
+
// Internal: Score Computation
|
|
587
|
+
// ============================================================================
|
|
588
|
+
|
|
589
|
+
function computeWeightedScore(factors: RiskFactor[]): number {
|
|
590
|
+
let totalWeight = 0;
|
|
591
|
+
let weightedSum = 0;
|
|
592
|
+
|
|
593
|
+
for (const f of factors) {
|
|
594
|
+
weightedSum += f.weight * f.value;
|
|
595
|
+
totalWeight += f.weight;
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
return totalWeight > 0 ? weightedSum / totalWeight : 0;
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
function generateRecommendation(score: number, factors: RiskFactor[]): string {
|
|
602
|
+
if (score <= RISK_THRESHOLDS.low) {
|
|
603
|
+
return 'Low risk. Proceed with normal workflow.';
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
if (score <= RISK_THRESHOLDS.medium) {
|
|
607
|
+
const topFactors = factors
|
|
608
|
+
.filter((f) => f.value > 0.4)
|
|
609
|
+
.sort((a, b) => b.value * b.weight - a.value * a.weight)
|
|
610
|
+
.slice(0, 2);
|
|
611
|
+
|
|
612
|
+
const factorNames = topFactors.map((f) => f.name).join(', ');
|
|
613
|
+
return `Moderate risk (${factorNames}). Consider extra review or decomposition.`;
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
if (score <= RISK_THRESHOLDS.high) {
|
|
617
|
+
return 'High risk. Recommend decomposition, additional testing, or pair review before proceeding.';
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
return 'Critical risk. Strongly recommend breaking this task into smaller units and addressing blockers first.';
|
|
621
|
+
}
|