@unrdf/decision-fabric 26.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +357 -0
- package/capability-map.md +96 -0
- package/package.json +42 -0
- package/src/bb8020-orchestrator.mjs +763 -0
- package/src/bb8020-steps/index.mjs +9 -0
- package/src/bb8020-steps/step0-pre-validation.mjs +35 -0
- package/src/bb8020-steps/step1-parsing.mjs +30 -0
- package/src/bb8020-steps/step10-kgc-logging.mjs +81 -0
- package/src/bb8020-steps/step2-pareto.mjs +39 -0
- package/src/bb8020-steps/step3-embedding.mjs +42 -0
- package/src/bb8020-steps/step4-pattern-matching.mjs +127 -0
- package/src/bb8020-steps/step8-syntax-validation.mjs +91 -0
- package/src/bb8020-steps/step9-static-analysis.mjs +105 -0
- package/src/engine.mjs +386 -0
- package/src/index.mjs +147 -0
- package/src/pareto-analyzer.mjs +248 -0
- package/src/socratic-agent.mjs +358 -0
- package/test/decision-fabric.test.mjs +353 -0
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pareto Frontier Analyzer
|
|
3
|
+
*
|
|
4
|
+
* Implements the Big Bang 80/20 methodology for feature prioritization.
|
|
5
|
+
* Based on theoretical framework from thesis-bigbang-80-20.tex
|
|
6
|
+
*
|
|
7
|
+
* Key Properties:
|
|
8
|
+
* - Identifies Pareto-optimal features (non-dominated in value-cost space)
|
|
9
|
+
* - Validates 80/20 rule: 20% of features deliver 80% of value
|
|
10
|
+
* - Computes specification entropy H_spec to determine applicability
|
|
11
|
+
* - Provides multi-objective optimization for strategic decisions
|
|
12
|
+
*
|
|
13
|
+
* Mathematical Guarantees:
|
|
14
|
+
* - H_spec(P) ≥ 0.75 × H_spec(F) (Pareto entropy concentration)
|
|
15
|
+
* - |P| ≈ 0.2 × |F| (size of Pareto frontier)
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Feature representation with value and cost
|
|
20
|
+
*/
|
|
21
|
+
export class Feature {
|
|
22
|
+
constructor({ id, name, value, cost, description = '' }) {
|
|
23
|
+
this.id = id;
|
|
24
|
+
this.name = name;
|
|
25
|
+
this.value = value; // Business value (0-100)
|
|
26
|
+
this.cost = cost; // Implementation cost (LoC or hours)
|
|
27
|
+
this.description = description;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Value-to-cost ratio (efficiency metric)
|
|
32
|
+
*/
|
|
33
|
+
get efficiency() {
|
|
34
|
+
return this.cost > 0 ? this.value / this.cost : 0;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Check if this feature dominates another
|
|
39
|
+
*
|
|
40
|
+
* Dominates if: value ≥ other.value AND cost ≤ other.cost (at least one strict)
|
|
41
|
+
*/
|
|
42
|
+
dominates(other) {
|
|
43
|
+
return (
|
|
44
|
+
this.value >= other.value &&
|
|
45
|
+
this.cost <= other.cost &&
|
|
46
|
+
(this.value > other.value || this.cost < other.cost)
|
|
47
|
+
);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Pareto Frontier Analyzer
|
|
53
|
+
*/
|
|
54
|
+
export class ParetoAnalyzer {
|
|
55
|
+
constructor() {
|
|
56
|
+
this.features = [];
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Add feature to analysis
|
|
61
|
+
*/
|
|
62
|
+
addFeature(feature) {
|
|
63
|
+
if (!(feature instanceof Feature)) {
|
|
64
|
+
throw new Error('Must be instance of Feature');
|
|
65
|
+
}
|
|
66
|
+
this.features.push(feature);
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Add multiple features
|
|
71
|
+
*/
|
|
72
|
+
addFeatures(features) {
|
|
73
|
+
features.forEach(f => this.addFeature(f));
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Compute Pareto frontier (non-dominated features)
|
|
78
|
+
*
|
|
79
|
+
* Returns array of features that are Pareto-optimal
|
|
80
|
+
*/
|
|
81
|
+
computeParetoFrontier() {
|
|
82
|
+
const frontier = [];
|
|
83
|
+
|
|
84
|
+
for (const candidate of this.features) {
|
|
85
|
+
let isDominated = false;
|
|
86
|
+
|
|
87
|
+
for (const other of this.features) {
|
|
88
|
+
if (candidate.id !== other.id && other.dominates(candidate)) {
|
|
89
|
+
isDominated = true;
|
|
90
|
+
break;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
if (!isDominated) {
|
|
95
|
+
frontier.push(candidate);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Sort by efficiency (value/cost ratio) descending
|
|
100
|
+
return frontier.sort((a, b) => b.efficiency - a.efficiency);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Compute specification entropy H_spec
|
|
105
|
+
*
|
|
106
|
+
* H_spec = -Σ p_i log₂(p_i)
|
|
107
|
+
* where p_i = value_i / Σ value_j (value-weighted probability)
|
|
108
|
+
*/
|
|
109
|
+
computeSpecificationEntropy() {
|
|
110
|
+
const totalValue = this.features.reduce((sum, f) => sum + f.value, 0);
|
|
111
|
+
|
|
112
|
+
if (totalValue === 0) return 0;
|
|
113
|
+
|
|
114
|
+
let entropy = 0;
|
|
115
|
+
for (const feature of this.features) {
|
|
116
|
+
const p = feature.value / totalValue;
|
|
117
|
+
if (p > 0) {
|
|
118
|
+
entropy -= p * Math.log2(p);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
return entropy;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Validate 80/20 rule
|
|
127
|
+
*
|
|
128
|
+
* Returns { valid, paretoPercentage, valuePercentage }
|
|
129
|
+
*/
|
|
130
|
+
validate8020Rule() {
|
|
131
|
+
const frontier = this.computeParetoFrontier();
|
|
132
|
+
const totalValue = this.features.reduce((sum, f) => sum + f.value, 0);
|
|
133
|
+
const frontierValue = frontier.reduce((sum, f) => sum + f.value, 0);
|
|
134
|
+
|
|
135
|
+
const paretoPercentage = (frontier.length / this.features.length) * 100;
|
|
136
|
+
const valuePercentage = (frontierValue / totalValue) * 100;
|
|
137
|
+
|
|
138
|
+
// Valid if ~20% of features deliver ~80% of value (with 20% tolerance)
|
|
139
|
+
const valid = paretoPercentage <= 40 && valuePercentage >= 60;
|
|
140
|
+
|
|
141
|
+
return {
|
|
142
|
+
valid,
|
|
143
|
+
paretoPercentage,
|
|
144
|
+
valuePercentage,
|
|
145
|
+
paretoCount: frontier.length,
|
|
146
|
+
totalCount: this.features.length
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* Check if Big Bang 80/20 methodology is applicable
|
|
152
|
+
*
|
|
153
|
+
* Applicable if H_spec ≤ 16 bits (bounded specification entropy)
|
|
154
|
+
*/
|
|
155
|
+
isBB8020Applicable() {
|
|
156
|
+
const hSpec = this.computeSpecificationEntropy();
|
|
157
|
+
return {
|
|
158
|
+
applicable: hSpec <= 16,
|
|
159
|
+
h_spec: hSpec,
|
|
160
|
+
max_allowed: 16,
|
|
161
|
+
reason: hSpec <= 16
|
|
162
|
+
? 'Domain has bounded entropy - BB80/20 applicable'
|
|
163
|
+
: 'Domain entropy too high - iterative approach recommended'
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* Generate implementation recommendation
|
|
169
|
+
*/
|
|
170
|
+
generateRecommendation() {
|
|
171
|
+
const frontier = this.computeParetoFrontier();
|
|
172
|
+
const applicability = this.isBB8020Applicable();
|
|
173
|
+
const rule8020 = this.validate8020Rule();
|
|
174
|
+
|
|
175
|
+
const totalCost = this.features.reduce((sum, f) => sum + f.cost, 0);
|
|
176
|
+
const frontierCost = frontier.reduce((sum, f) => sum + f.cost, 0);
|
|
177
|
+
const totalValue = this.features.reduce((sum, f) => sum + f.value, 0);
|
|
178
|
+
const frontierValue = frontier.reduce((sum, f) => sum + f.value, 0);
|
|
179
|
+
|
|
180
|
+
return {
|
|
181
|
+
methodology: applicability.applicable ? 'Big Bang 80/20' : 'Iterative Development',
|
|
182
|
+
specification_entropy: applicability.h_spec,
|
|
183
|
+
pareto_frontier: {
|
|
184
|
+
features: frontier.map(f => ({
|
|
185
|
+
id: f.id,
|
|
186
|
+
name: f.name,
|
|
187
|
+
value: f.value,
|
|
188
|
+
cost: f.cost,
|
|
189
|
+
efficiency: f.efficiency
|
|
190
|
+
})),
|
|
191
|
+
count: frontier.length,
|
|
192
|
+
percentage_of_total: rule8020.paretoPercentage
|
|
193
|
+
},
|
|
194
|
+
value_analysis: {
|
|
195
|
+
frontier_value: frontierValue,
|
|
196
|
+
total_value: totalValue,
|
|
197
|
+
percentage: rule8020.valuePercentage,
|
|
198
|
+
meets_8020: rule8020.valid
|
|
199
|
+
},
|
|
200
|
+
cost_analysis: {
|
|
201
|
+
frontier_cost: frontierCost,
|
|
202
|
+
total_cost: totalCost,
|
|
203
|
+
savings: totalCost - frontierCost,
|
|
204
|
+
efficiency_gain: ((totalCost - frontierCost) / totalCost * 100).toFixed(1) + '%'
|
|
205
|
+
},
|
|
206
|
+
recommendation: applicability.applicable
|
|
207
|
+
? `Implement ${frontier.length} Pareto-optimal features using BB80/20 single-pass methodology. ` +
|
|
208
|
+
`Expected implementation time: 2-3 hours. Predicted correctness: ≥99.99%.`
|
|
209
|
+
: `Domain entropy (${applicability.h_spec.toFixed(2)} bits) exceeds threshold. ` +
|
|
210
|
+
`Use iterative development with 3-5 sprints.`
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Generate Pareto chart data for visualization
|
|
216
|
+
*/
|
|
217
|
+
generateParetoChart() {
|
|
218
|
+
const frontier = this.computeParetoFrontier();
|
|
219
|
+
|
|
220
|
+
return frontier.map(f => ({
|
|
221
|
+
feature: f.name,
|
|
222
|
+
value: f.value,
|
|
223
|
+
cost: f.cost,
|
|
224
|
+
efficiency: f.efficiency
|
|
225
|
+
}));
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Example usage demonstrating KGC 4D feature analysis
|
|
231
|
+
*/
|
|
232
|
+
export function createKGC4DExample() {
|
|
233
|
+
const analyzer = new ParetoAnalyzer();
|
|
234
|
+
|
|
235
|
+
// Features from KGC 4D case study (thesis-bigbang-80-20.tex)
|
|
236
|
+
analyzer.addFeatures([
|
|
237
|
+
new Feature({ id: 1, name: 'BigInt Time', value: 95, cost: 20, description: 'Nanosecond precision timestamps' }),
|
|
238
|
+
new Feature({ id: 2, name: 'Event Log', value: 85, cost: 50, description: 'Immutable event sourcing' }),
|
|
239
|
+
new Feature({ id: 3, name: 'Named Graphs', value: 80, cost: 30, description: 'Multi-graph support' }),
|
|
240
|
+
new Feature({ id: 4, name: 'Freeze', value: 75, cost: 150, description: 'Snapshot creation' }),
|
|
241
|
+
new Feature({ id: 5, name: 'Time-Travel', value: 70, cost: 200, description: '4D reconstruction' }),
|
|
242
|
+
new Feature({ id: 6, name: 'Receipt', value: 60, cost: 80, description: 'Cryptographic receipts' }),
|
|
243
|
+
new Feature({ id: 7, name: 'React UI', value: 40, cost: 300, description: 'Web interface' }),
|
|
244
|
+
new Feature({ id: 8, name: 'Advanced Hooks', value: 30, cost: 500, description: 'Governance framework' })
|
|
245
|
+
]);
|
|
246
|
+
|
|
247
|
+
return analyzer;
|
|
248
|
+
}
|
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Socratic AI Agent
|
|
3
|
+
*
|
|
4
|
+
* Implements intelligent assumption extraction and evidence-based reasoning.
|
|
5
|
+
* Challenges user input to enforce intellectual rigor and prevent groupthink.
|
|
6
|
+
*
|
|
7
|
+
* Based on 2030 vision capability: "Death of Groupthink"
|
|
8
|
+
*
|
|
9
|
+
* Key Functions:
|
|
10
|
+
* - Extract implicit assumptions from statements
|
|
11
|
+
* - Query knowledge graph for supporting/refuting evidence
|
|
12
|
+
* - Generate Socratic questions to clarify ambiguity
|
|
13
|
+
* - Enforce MECE (Mutually Exclusive, Collectively Exhaustive)
|
|
14
|
+
* - Detect logical fallacies and circular reasoning
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Assumption extracted from user statement
|
|
19
|
+
*/
|
|
20
|
+
export class Assumption {
|
|
21
|
+
constructor({ id, statement, confidence, evidence_for = [], evidence_against = [] }) {
|
|
22
|
+
this.id = id;
|
|
23
|
+
this.statement = statement;
|
|
24
|
+
this.confidence = confidence; // 0-1
|
|
25
|
+
this.evidence_for = evidence_for;
|
|
26
|
+
this.evidence_against = evidence_against;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Strength of evidence supporting this assumption
|
|
31
|
+
*/
|
|
32
|
+
get evidenceStrength() {
|
|
33
|
+
const forStrength = this.evidence_for.length;
|
|
34
|
+
const againstStrength = this.evidence_against.length;
|
|
35
|
+
|
|
36
|
+
if (forStrength + againstStrength === 0) return 0;
|
|
37
|
+
return forStrength / (forStrength + againstStrength);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Classification of assumption quality
|
|
42
|
+
*/
|
|
43
|
+
get classification() {
|
|
44
|
+
if (this.evidence_for.length === 0 && this.evidence_against.length === 0) {
|
|
45
|
+
return 'UNVALIDATED';
|
|
46
|
+
}
|
|
47
|
+
if (this.evidenceStrength >= 0.8) return 'STRONG';
|
|
48
|
+
if (this.evidenceStrength >= 0.5) return 'MODERATE';
|
|
49
|
+
if (this.evidenceStrength >= 0.2) return 'WEAK';
|
|
50
|
+
return 'REFUTED';
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Challenge generated by Socratic agent
|
|
56
|
+
*/
|
|
57
|
+
export class SocraticChallenge {
|
|
58
|
+
constructor({ type, question, context, severity }) {
|
|
59
|
+
this.type = type; // 'CLARIFICATION' | 'EVIDENCE' | 'LOGIC' | 'MECE'
|
|
60
|
+
this.question = question;
|
|
61
|
+
this.context = context;
|
|
62
|
+
this.severity = severity; // 'LOW' | 'MEDIUM' | 'HIGH'
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Socratic AI Agent
|
|
68
|
+
*/
|
|
69
|
+
export class SocraticAgent {
|
|
70
|
+
constructor({ knowledgeStore }) {
|
|
71
|
+
this.knowledgeStore = knowledgeStore; // RDF knowledge graph for evidence queries
|
|
72
|
+
this.assumptionPatterns = this._initializePatterns();
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Initialize linguistic patterns for assumption extraction
|
|
77
|
+
*/
|
|
78
|
+
_initializePatterns() {
|
|
79
|
+
return {
|
|
80
|
+
// Causal assumptions: "X will solve Y"
|
|
81
|
+
causal: /(.+?)\s+will\s+solve\s+(.+)/i,
|
|
82
|
+
|
|
83
|
+
// Need assumptions: "We need to X"
|
|
84
|
+
need: /we\s+need\s+to\s+(.+)/i,
|
|
85
|
+
|
|
86
|
+
// Optimization assumptions: "Optimize X" (vague)
|
|
87
|
+
optimize: /optimize\s+(.+)/i,
|
|
88
|
+
|
|
89
|
+
// Absolute claims: "Always", "Never", "All", "None"
|
|
90
|
+
absolute: /(always|never|all|none|every|no)\s+(.+)/i,
|
|
91
|
+
|
|
92
|
+
// Implicit causality: "If X then Y"
|
|
93
|
+
conditional: /if\s+(.+?)\s+then\s+(.+)/i
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Analyze user statement and extract assumptions
|
|
99
|
+
*/
|
|
100
|
+
async analyzeStatement(statement) {
|
|
101
|
+
const assumptions = [];
|
|
102
|
+
|
|
103
|
+
// Pattern 1: Causal assumptions
|
|
104
|
+
const causalMatch = statement.match(this.assumptionPatterns.causal);
|
|
105
|
+
if (causalMatch) {
|
|
106
|
+
assumptions.push(new Assumption({
|
|
107
|
+
id: `causal-${Date.now()}`,
|
|
108
|
+
statement: `${causalMatch[1]} causes ${causalMatch[2]}`,
|
|
109
|
+
confidence: 0.3, // Low confidence without evidence
|
|
110
|
+
evidence_for: [],
|
|
111
|
+
evidence_against: []
|
|
112
|
+
}));
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Pattern 2: Need assumptions
|
|
116
|
+
const needMatch = statement.match(this.assumptionPatterns.need);
|
|
117
|
+
if (needMatch) {
|
|
118
|
+
assumptions.push(new Assumption({
|
|
119
|
+
id: `need-${Date.now()}`,
|
|
120
|
+
statement: `Action required: ${needMatch[1]}`,
|
|
121
|
+
confidence: 0.5, // Moderate confidence (needs justification)
|
|
122
|
+
evidence_for: [],
|
|
123
|
+
evidence_against: []
|
|
124
|
+
}));
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Pattern 3: Optimization assumptions (vague)
|
|
128
|
+
const optimizeMatch = statement.match(this.assumptionPatterns.optimize);
|
|
129
|
+
if (optimizeMatch) {
|
|
130
|
+
assumptions.push(new Assumption({
|
|
131
|
+
id: `optimize-${Date.now()}`,
|
|
132
|
+
statement: `Optimization target: ${optimizeMatch[1]} (VAGUE - needs clarification)`,
|
|
133
|
+
confidence: 0.2, // Very low - needs clarification
|
|
134
|
+
evidence_for: [],
|
|
135
|
+
evidence_against: []
|
|
136
|
+
}));
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Pattern 4: Absolute claims (often false)
|
|
140
|
+
const absoluteMatch = statement.match(this.assumptionPatterns.absolute);
|
|
141
|
+
if (absoluteMatch) {
|
|
142
|
+
assumptions.push(new Assumption({
|
|
143
|
+
id: `absolute-${Date.now()}`,
|
|
144
|
+
statement: `Absolute claim: ${absoluteMatch[1]} ${absoluteMatch[2]}`,
|
|
145
|
+
confidence: 0.1, // Very low - absolutes rarely true
|
|
146
|
+
evidence_for: [],
|
|
147
|
+
evidence_against: []
|
|
148
|
+
}));
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Query knowledge graph for evidence
|
|
152
|
+
for (const assumption of assumptions) {
|
|
153
|
+
await this._gatherEvidence(assumption);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return assumptions;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Query knowledge graph for evidence supporting/refuting assumption
|
|
161
|
+
*/
|
|
162
|
+
async _gatherEvidence(assumption) {
|
|
163
|
+
// Simplified: In production, would use SPARQL queries against knowledge graph
|
|
164
|
+
// For now, simulate evidence gathering
|
|
165
|
+
|
|
166
|
+
// Example: Query historical decisions with similar assumptions
|
|
167
|
+
const similarDecisions = await this._querySimilarDecisions(assumption.statement);
|
|
168
|
+
|
|
169
|
+
for (const decision of similarDecisions) {
|
|
170
|
+
if (decision.outcome === 'success') {
|
|
171
|
+
assumption.evidence_for.push({
|
|
172
|
+
source: 'historical_decision',
|
|
173
|
+
decision_id: decision.id,
|
|
174
|
+
relevance: decision.similarity
|
|
175
|
+
});
|
|
176
|
+
} else {
|
|
177
|
+
assumption.evidence_against.push({
|
|
178
|
+
source: 'historical_decision',
|
|
179
|
+
decision_id: decision.id,
|
|
180
|
+
relevance: decision.similarity
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Query for similar past decisions (stub - would use hyperdimensional search)
|
|
188
|
+
*/
|
|
189
|
+
async _querySimilarDecisions(statement) {
|
|
190
|
+
// Stub: In production would use hyperdimensional similarity search
|
|
191
|
+
return [];
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/**
|
|
195
|
+
* Generate Socratic challenges for weak/unvalidated assumptions
|
|
196
|
+
*/
|
|
197
|
+
generateChallenges(assumptions) {
|
|
198
|
+
const challenges = [];
|
|
199
|
+
|
|
200
|
+
for (const assumption of assumptions) {
|
|
201
|
+
// Challenge 1: Vague/optimization assumptions need clarification
|
|
202
|
+
if (assumption.statement.includes('VAGUE')) {
|
|
203
|
+
challenges.push(new SocraticChallenge({
|
|
204
|
+
type: 'CLARIFICATION',
|
|
205
|
+
question: `Clarification needed: By "optimize," do you mean reduce time-to-value or increase conversion rate? Please specify the metric.`,
|
|
206
|
+
context: assumption.statement,
|
|
207
|
+
severity: 'HIGH'
|
|
208
|
+
}));
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// Challenge 2: Unvalidated assumptions need evidence
|
|
212
|
+
if (assumption.classification === 'UNVALIDATED') {
|
|
213
|
+
challenges.push(new SocraticChallenge({
|
|
214
|
+
type: 'EVIDENCE',
|
|
215
|
+
question: `What evidence supports the assumption: "${assumption.statement}"? Current data shows no causal link.`,
|
|
216
|
+
context: assumption.statement,
|
|
217
|
+
severity: 'HIGH'
|
|
218
|
+
}));
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Challenge 3: Weak assumptions need strengthening
|
|
222
|
+
if (assumption.classification === 'WEAK') {
|
|
223
|
+
challenges.push(new SocraticChallenge({
|
|
224
|
+
type: 'EVIDENCE',
|
|
225
|
+
question: `Weak evidence for: "${assumption.statement}". Evidence strength: ${(assumption.evidenceStrength * 100).toFixed(0)}%. Additional validation recommended.`,
|
|
226
|
+
context: assumption.statement,
|
|
227
|
+
severity: 'MEDIUM'
|
|
228
|
+
}));
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// Challenge 4: Refuted assumptions should be reconsidered
|
|
232
|
+
if (assumption.classification === 'REFUTED') {
|
|
233
|
+
challenges.push(new SocraticChallenge({
|
|
234
|
+
type: 'LOGIC',
|
|
235
|
+
question: `Contradictory evidence found for: "${assumption.statement}". Evidence against outweighs evidence for. Reconsider this assumption.`,
|
|
236
|
+
context: assumption.statement,
|
|
237
|
+
severity: 'HIGH'
|
|
238
|
+
}));
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// Challenge 5: Absolute claims need counterexamples
|
|
242
|
+
if (assumption.statement.includes('Absolute claim')) {
|
|
243
|
+
challenges.push(new SocraticChallenge({
|
|
244
|
+
type: 'LOGIC',
|
|
245
|
+
question: `Absolute claims are rarely true. Can you provide counterexamples or weaken this claim?`,
|
|
246
|
+
context: assumption.statement,
|
|
247
|
+
severity: 'MEDIUM'
|
|
248
|
+
}));
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
return challenges;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Suggest evidence-based alternatives using Pareto analysis
|
|
257
|
+
*/
|
|
258
|
+
async suggestAlternatives(statement, assumptions) {
|
|
259
|
+
// Use hyperdimensional search to find similar past problems
|
|
260
|
+
const similarProblems = await this._findSimilarProblems(statement);
|
|
261
|
+
|
|
262
|
+
// Rank by Pareto optimality (value/cost ratio)
|
|
263
|
+
const alternatives = similarProblems
|
|
264
|
+
.map(problem => ({
|
|
265
|
+
solution: problem.solution,
|
|
266
|
+
value: problem.value,
|
|
267
|
+
cost: problem.cost,
|
|
268
|
+
efficiency: problem.value / problem.cost,
|
|
269
|
+
evidence_strength: problem.evidence_strength
|
|
270
|
+
}))
|
|
271
|
+
.sort((a, b) => b.efficiency - a.efficiency)
|
|
272
|
+
.slice(0, 3); // Top 3 Pareto-optimal alternatives
|
|
273
|
+
|
|
274
|
+
return alternatives;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Find similar problems (stub - would use hyperdimensional similarity)
|
|
279
|
+
*/
|
|
280
|
+
async _findSimilarProblems(statement) {
|
|
281
|
+
// Stub: In production would query knowledge graph with hyperdimensional embeddings
|
|
282
|
+
return [];
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
/**
|
|
286
|
+
* Complete Socratic analysis workflow
|
|
287
|
+
*/
|
|
288
|
+
async analyze(statement) {
|
|
289
|
+
// Step 1: Extract assumptions
|
|
290
|
+
const assumptions = await this.analyzeStatement(statement);
|
|
291
|
+
|
|
292
|
+
// Step 2: Generate challenges
|
|
293
|
+
const challenges = this.generateChallenges(assumptions);
|
|
294
|
+
|
|
295
|
+
// Step 3: Suggest alternatives (if challenges exist)
|
|
296
|
+
const alternatives = challenges.length > 0
|
|
297
|
+
? await this.suggestAlternatives(statement, assumptions)
|
|
298
|
+
: [];
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
original_statement: statement,
|
|
302
|
+
assumptions,
|
|
303
|
+
challenges,
|
|
304
|
+
alternatives,
|
|
305
|
+
recommendation: this._generateRecommendation(assumptions, challenges, alternatives)
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
/**
|
|
310
|
+
* Generate final recommendation
|
|
311
|
+
*/
|
|
312
|
+
_generateRecommendation(assumptions, challenges, alternatives) {
|
|
313
|
+
const highSeverityChallenges = challenges.filter(c => c.severity === 'HIGH').length;
|
|
314
|
+
const unvalidatedAssumptions = assumptions.filter(a => a.classification === 'UNVALIDATED').length;
|
|
315
|
+
|
|
316
|
+
if (highSeverityChallenges > 0 || unvalidatedAssumptions > 0) {
|
|
317
|
+
return {
|
|
318
|
+
proceed: false,
|
|
319
|
+
reason: `${highSeverityChallenges} high-severity challenges and ${unvalidatedAssumptions} unvalidated assumptions detected. Address these before proceeding.`,
|
|
320
|
+
action: 'Clarify ambiguous terms and provide evidence for key assumptions.'
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
const weakAssumptions = assumptions.filter(a => a.classification === 'WEAK').length;
|
|
325
|
+
if (weakAssumptions > 0) {
|
|
326
|
+
return {
|
|
327
|
+
proceed: true,
|
|
328
|
+
reason: `${weakAssumptions} weak assumptions detected but no critical blockers.`,
|
|
329
|
+
action: 'Proceed with caution. Monitor assumptions and validate during implementation.',
|
|
330
|
+
alternatives: alternatives.length > 0 ? `Consider ${alternatives.length} evidence-based alternatives.` : null
|
|
331
|
+
};
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
return {
|
|
335
|
+
proceed: true,
|
|
336
|
+
reason: 'All assumptions validated with strong evidence.',
|
|
337
|
+
action: 'Proceed with implementation. High confidence in success.'
|
|
338
|
+
};
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
/**
|
|
343
|
+
* Example usage
|
|
344
|
+
*/
|
|
345
|
+
export function createExampleAnalysis() {
|
|
346
|
+
const agent = new SocraticAgent({ knowledgeStore: null });
|
|
347
|
+
|
|
348
|
+
// Example 1: Vague optimization
|
|
349
|
+
const vague = "We need to optimize the onboarding flow";
|
|
350
|
+
|
|
351
|
+
// Example 2: Unvalidated causal claim
|
|
352
|
+
const causal = "Adding feature X will solve problem Y";
|
|
353
|
+
|
|
354
|
+
// Example 3: Absolute claim
|
|
355
|
+
const absolute = "All users always abandon at the payment step";
|
|
356
|
+
|
|
357
|
+
return { agent, examples: [vague, causal, absolute] };
|
|
358
|
+
}
|