agentic-qe 1.3.3 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/qe-api-contract-validator.md +20 -0
- package/.claude/agents/qe-chaos-engineer.md +20 -0
- package/.claude/agents/qe-coverage-analyzer.md +21 -0
- package/.claude/agents/qe-deployment-readiness.md +20 -0
- package/.claude/agents/qe-flaky-test-hunter.md +20 -0
- package/.claude/agents/qe-fleet-commander.md +20 -0
- package/.claude/agents/qe-performance-tester.md +21 -0
- package/.claude/agents/qe-production-intelligence.md +20 -0
- package/.claude/agents/qe-quality-analyzer.md +20 -0
- package/.claude/agents/qe-quality-gate.md +20 -0
- package/.claude/agents/qe-regression-risk-analyzer.md +20 -0
- package/.claude/agents/qe-requirements-validator.md +20 -0
- package/.claude/agents/qe-security-scanner.md +21 -0
- package/.claude/agents/qe-test-data-architect.md +19 -0
- package/.claude/agents/qe-test-executor.md +20 -0
- package/.claude/agents/qe-test-generator.md +22 -0
- package/.claude/agents/qe-visual-tester.md +22 -0
- package/README.md +320 -1019
- package/README.md.backup-20251026 +1366 -0
- package/bin/aqe-mcp +1 -1
- package/dist/agents/BaseAgent.js +1 -1
- package/dist/agents/BaseAgent.js.map +1 -1
- package/dist/agents/CoverageAnalyzerAgent.js +16 -16
- package/dist/agents/CoverageAnalyzerAgent.js.map +1 -1
- package/dist/agents/FlakyTestHunterAgent.js +1 -1
- package/dist/agents/FlakyTestHunterAgent.js.map +1 -1
- package/dist/cli/commands/init.d.ts.map +1 -1
- package/dist/cli/commands/init.js +71 -37
- package/dist/cli/commands/init.js.map +1 -1
- package/dist/core/memory/EnhancedAgentDBService.d.ts +127 -0
- package/dist/core/memory/EnhancedAgentDBService.d.ts.map +1 -0
- package/dist/core/memory/EnhancedAgentDBService.js +298 -0
- package/dist/core/memory/EnhancedAgentDBService.js.map +1 -0
- package/dist/core/neural/NeuralTrainer.js +2 -2
- package/dist/core/neural/NeuralTrainer.js.map +1 -1
- package/dist/learning/FixRecommendationEngine.d.ts +68 -0
- package/dist/learning/FixRecommendationEngine.d.ts.map +1 -0
- package/dist/learning/FixRecommendationEngine.js +500 -0
- package/dist/learning/FixRecommendationEngine.js.map +1 -0
- package/dist/learning/FlakyTestDetector.d.ts +19 -0
- package/dist/learning/FlakyTestDetector.d.ts.map +1 -1
- package/dist/learning/FlakyTestDetector.js +121 -2
- package/dist/learning/FlakyTestDetector.js.map +1 -1
- package/dist/learning/index.d.ts +2 -0
- package/dist/learning/index.d.ts.map +1 -1
- package/dist/learning/index.js +5 -0
- package/dist/learning/index.js.map +1 -1
- package/dist/learning/types.d.ts +11 -0
- package/dist/learning/types.d.ts.map +1 -1
- package/dist/reasoning/PatternQualityScorer.d.ts +134 -0
- package/dist/reasoning/PatternQualityScorer.d.ts.map +1 -0
- package/dist/reasoning/PatternQualityScorer.js +340 -0
- package/dist/reasoning/PatternQualityScorer.js.map +1 -0
- package/dist/reasoning/QEReasoningBank.d.ts +51 -4
- package/dist/reasoning/QEReasoningBank.d.ts.map +1 -1
- package/dist/reasoning/QEReasoningBank.js +173 -11
- package/dist/reasoning/QEReasoningBank.js.map +1 -1
- package/dist/reasoning/VectorSimilarity.d.ts +131 -0
- package/dist/reasoning/VectorSimilarity.d.ts.map +1 -0
- package/dist/reasoning/VectorSimilarity.js +250 -0
- package/dist/reasoning/VectorSimilarity.js.map +1 -0
- package/dist/reasoning/index.d.ts +8 -1
- package/dist/reasoning/index.d.ts.map +1 -1
- package/dist/reasoning/index.js +13 -2
- package/dist/reasoning/index.js.map +1 -1
- package/dist/streaming/BaseStreamHandler.d.ts +89 -0
- package/dist/streaming/BaseStreamHandler.d.ts.map +1 -0
- package/dist/streaming/BaseStreamHandler.js +168 -0
- package/dist/streaming/BaseStreamHandler.js.map +1 -0
- package/dist/streaming/TestGenerateStreamHandler.d.ts +103 -0
- package/dist/streaming/TestGenerateStreamHandler.d.ts.map +1 -0
- package/dist/streaming/TestGenerateStreamHandler.js +321 -0
- package/dist/streaming/TestGenerateStreamHandler.js.map +1 -0
- package/dist/streaming/index.d.ts +16 -0
- package/dist/streaming/index.d.ts.map +1 -0
- package/dist/streaming/index.js +39 -0
- package/dist/streaming/index.js.map +1 -0
- package/dist/utils/__mocks__/Logger.d.ts +26 -0
- package/dist/utils/__mocks__/Logger.d.ts.map +1 -0
- package/dist/utils/__mocks__/Logger.js +42 -0
- package/dist/utils/__mocks__/Logger.js.map +1 -0
- package/package.json +15 -3
- package/dist/agents/mixins/NeuralCapableMixin.d.ts +0 -130
- package/dist/agents/mixins/NeuralCapableMixin.d.ts.map +0 -1
- package/dist/agents/mixins/NeuralCapableMixin.js +0 -358
- package/dist/agents/mixins/NeuralCapableMixin.js.map +0 -1
- package/dist/agents/mixins/QUICCapableMixin.d.ts +0 -34
- package/dist/agents/mixins/QUICCapableMixin.d.ts.map +0 -1
- package/dist/agents/mixins/QUICCapableMixin.js +0 -346
- package/dist/agents/mixins/QUICCapableMixin.js.map +0 -1
- package/dist/core/security/CertificateValidator.d.ts +0 -130
- package/dist/core/security/CertificateValidator.d.ts.map +0 -1
- package/dist/core/security/CertificateValidator.js +0 -376
- package/dist/core/security/CertificateValidator.js.map +0 -1
- package/dist/core/transport/QUICTransport.d.ts +0 -62
- package/dist/core/transport/QUICTransport.d.ts.map +0 -1
- package/dist/core/transport/QUICTransport.js +0 -381
- package/dist/core/transport/QUICTransport.js.map +0 -1
- package/dist/core/transport/SecureQUICTransport.d.ts +0 -71
- package/dist/core/transport/SecureQUICTransport.d.ts.map +0 -1
- package/dist/core/transport/SecureQUICTransport.js +0 -253
- package/dist/core/transport/SecureQUICTransport.js.map +0 -1
- package/dist/learning/AdvancedFeatureExtractor.d.ts +0 -123
- package/dist/learning/AdvancedFeatureExtractor.d.ts.map +0 -1
- package/dist/learning/AdvancedFeatureExtractor.js +0 -423
- package/dist/learning/AdvancedFeatureExtractor.js.map +0 -1
- package/dist/learning/NeuralPatternMatcher.d.ts +0 -184
- package/dist/learning/NeuralPatternMatcher.d.ts.map +0 -1
- package/dist/learning/NeuralPatternMatcher.js +0 -702
- package/dist/learning/NeuralPatternMatcher.js.map +0 -1
- package/dist/learning/NeuralTrainer.d.ts +0 -209
- package/dist/learning/NeuralTrainer.d.ts.map +0 -1
- package/dist/learning/NeuralTrainer.js +0 -478
- package/dist/learning/NeuralTrainer.js.map +0 -1
- package/dist/transport/QUICTransport.d.ts +0 -340
- package/dist/transport/QUICTransport.d.ts.map +0 -1
- package/dist/transport/QUICTransport.js +0 -814
- package/dist/transport/QUICTransport.js.map +0 -1
- package/dist/transport/UDPTransport.d.ts +0 -348
- package/dist/transport/UDPTransport.d.ts.map +0 -1
- package/dist/transport/UDPTransport.js +0 -820
- package/dist/transport/UDPTransport.js.map +0 -1
|
@@ -1,702 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
/**
|
|
3
|
-
* Neural Pattern Matcher for QE Test Generation
|
|
4
|
-
*
|
|
5
|
-
* Implements a neural network-based pattern recognition system for:
|
|
6
|
-
* - Learning from historical test patterns
|
|
7
|
-
* - Predicting optimal test cases for new code
|
|
8
|
-
* - Identifying test coverage gaps
|
|
9
|
-
* - Suggesting test improvements
|
|
10
|
-
*
|
|
11
|
-
* Target accuracy: 85%+
|
|
12
|
-
*
|
|
13
|
-
* @module NeuralPatternMatcher
|
|
14
|
-
*/
|
|
15
|
-
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
16
|
-
if (k2 === undefined) k2 = k;
|
|
17
|
-
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
18
|
-
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
19
|
-
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
20
|
-
}
|
|
21
|
-
Object.defineProperty(o, k2, desc);
|
|
22
|
-
}) : (function(o, m, k, k2) {
|
|
23
|
-
if (k2 === undefined) k2 = k;
|
|
24
|
-
o[k2] = m[k];
|
|
25
|
-
}));
|
|
26
|
-
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
27
|
-
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
28
|
-
}) : function(o, v) {
|
|
29
|
-
o["default"] = v;
|
|
30
|
-
});
|
|
31
|
-
var __importStar = (this && this.__importStar) || (function () {
|
|
32
|
-
var ownKeys = function(o) {
|
|
33
|
-
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
34
|
-
var ar = [];
|
|
35
|
-
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
36
|
-
return ar;
|
|
37
|
-
};
|
|
38
|
-
return ownKeys(o);
|
|
39
|
-
};
|
|
40
|
-
return function (mod) {
|
|
41
|
-
if (mod && mod.__esModule) return mod;
|
|
42
|
-
var result = {};
|
|
43
|
-
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
44
|
-
__setModuleDefault(result, mod);
|
|
45
|
-
return result;
|
|
46
|
-
};
|
|
47
|
-
})();
|
|
48
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
49
|
-
exports.NeuralPatternMatcher = exports.ModelBackend = void 0;
|
|
50
|
-
const events_1 = require("events");
|
|
51
|
-
const fs_1 = require("fs");
|
|
52
|
-
const path = __importStar(require("path"));
|
|
53
|
-
/**
|
|
54
|
-
* Model backend types
|
|
55
|
-
*/
|
|
56
|
-
var ModelBackend;
|
|
57
|
-
(function (ModelBackend) {
|
|
58
|
-
/** TensorFlow.js (browser/Node.js) */
|
|
59
|
-
ModelBackend["TENSORFLOW_JS"] = "tensorflow_js";
|
|
60
|
-
/** ONNX Runtime (cross-platform) */
|
|
61
|
-
ModelBackend["ONNX"] = "onnx";
|
|
62
|
-
/** Simple neural network (pure TypeScript) */
|
|
63
|
-
ModelBackend["SIMPLE_NN"] = "simple_nn";
|
|
64
|
-
})(ModelBackend || (exports.ModelBackend = ModelBackend = {}));
|
|
65
|
-
/**
|
|
66
|
-
* Simple neural network implementation (pure TypeScript)
|
|
67
|
-
* Used when TensorFlow.js or ONNX are not available
|
|
68
|
-
*/
|
|
69
|
-
class SimpleNeuralNetwork {
|
|
70
|
-
constructor(architecture) {
|
|
71
|
-
this.architecture = architecture;
|
|
72
|
-
this.weights = [];
|
|
73
|
-
this.biases = [];
|
|
74
|
-
this.initializeWeights();
|
|
75
|
-
}
|
|
76
|
-
/**
|
|
77
|
-
* Initialize weights using Xavier initialization
|
|
78
|
-
*/
|
|
79
|
-
initializeWeights() {
|
|
80
|
-
const layers = [
|
|
81
|
-
this.architecture.inputSize,
|
|
82
|
-
...this.architecture.hiddenLayers,
|
|
83
|
-
this.architecture.outputSize
|
|
84
|
-
];
|
|
85
|
-
for (let i = 0; i < layers.length - 1; i++) {
|
|
86
|
-
const inputSize = layers[i];
|
|
87
|
-
const outputSize = layers[i + 1];
|
|
88
|
-
// Xavier initialization
|
|
89
|
-
const scale = Math.sqrt(2 / (inputSize + outputSize));
|
|
90
|
-
const layerWeights = [];
|
|
91
|
-
const layerBiases = [];
|
|
92
|
-
for (let j = 0; j < outputSize; j++) {
|
|
93
|
-
const neuronWeights = [];
|
|
94
|
-
for (let k = 0; k < inputSize; k++) {
|
|
95
|
-
neuronWeights.push((Math.random() * 2 - 1) * scale);
|
|
96
|
-
}
|
|
97
|
-
layerWeights.push(neuronWeights);
|
|
98
|
-
layerBiases.push(0);
|
|
99
|
-
}
|
|
100
|
-
this.weights.push(layerWeights);
|
|
101
|
-
this.biases.push(layerBiases);
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
/**
|
|
105
|
-
* Activation function
|
|
106
|
-
*/
|
|
107
|
-
activate(x, type) {
|
|
108
|
-
switch (type) {
|
|
109
|
-
case 'relu':
|
|
110
|
-
return Math.max(0, x);
|
|
111
|
-
case 'sigmoid':
|
|
112
|
-
return 1 / (1 + Math.exp(-x));
|
|
113
|
-
case 'tanh':
|
|
114
|
-
return Math.tanh(x);
|
|
115
|
-
default:
|
|
116
|
-
return x;
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
/**
|
|
120
|
-
* Softmax activation for output layer
|
|
121
|
-
*/
|
|
122
|
-
softmax(values) {
|
|
123
|
-
const max = Math.max(...values);
|
|
124
|
-
const exps = values.map(v => Math.exp(v - max));
|
|
125
|
-
const sum = exps.reduce((a, b) => a + b, 0);
|
|
126
|
-
return exps.map(e => e / sum);
|
|
127
|
-
}
|
|
128
|
-
/**
|
|
129
|
-
* Forward propagation
|
|
130
|
-
*/
|
|
131
|
-
predict(input) {
|
|
132
|
-
let activations = input;
|
|
133
|
-
for (let i = 0; i < this.weights.length; i++) {
|
|
134
|
-
const nextActivations = [];
|
|
135
|
-
for (let j = 0; j < this.weights[i].length; j++) {
|
|
136
|
-
let sum = this.biases[i][j];
|
|
137
|
-
for (let k = 0; k < activations.length; k++) {
|
|
138
|
-
sum += activations[k] * this.weights[i][j][k];
|
|
139
|
-
}
|
|
140
|
-
// Apply activation
|
|
141
|
-
const isOutputLayer = i === this.weights.length - 1;
|
|
142
|
-
if (isOutputLayer && this.architecture.activation === 'softmax') {
|
|
143
|
-
nextActivations.push(sum); // Apply softmax after all outputs computed
|
|
144
|
-
}
|
|
145
|
-
else {
|
|
146
|
-
nextActivations.push(this.activate(sum, this.architecture.activation));
|
|
147
|
-
}
|
|
148
|
-
}
|
|
149
|
-
activations = nextActivations;
|
|
150
|
-
}
|
|
151
|
-
// Apply softmax to output layer if needed
|
|
152
|
-
if (this.architecture.activation === 'softmax') {
|
|
153
|
-
activations = this.softmax(activations);
|
|
154
|
-
}
|
|
155
|
-
return activations;
|
|
156
|
-
}
|
|
157
|
-
/**
|
|
158
|
-
* Train the network using backpropagation
|
|
159
|
-
*/
|
|
160
|
-
train(data, validationData) {
|
|
161
|
-
const startTime = Date.now();
|
|
162
|
-
let trainingLoss = 0;
|
|
163
|
-
let validationLoss = 0;
|
|
164
|
-
const learningRate = this.architecture.learningRate;
|
|
165
|
-
const epochs = this.architecture.epochs;
|
|
166
|
-
const batchSize = this.architecture.batchSize;
|
|
167
|
-
for (let epoch = 0; epoch < epochs; epoch++) {
|
|
168
|
-
// Shuffle training data
|
|
169
|
-
const shuffled = [...data].sort(() => Math.random() - 0.5);
|
|
170
|
-
// Process in batches
|
|
171
|
-
for (let i = 0; i < shuffled.length; i += batchSize) {
|
|
172
|
-
const batch = shuffled.slice(i, i + batchSize);
|
|
173
|
-
// Accumulate gradients
|
|
174
|
-
const weightGradients = this.weights.map(layer => layer.map(neuron => neuron.map(() => 0)));
|
|
175
|
-
const biasGradients = this.biases.map(layer => layer.map(() => 0));
|
|
176
|
-
let batchLoss = 0;
|
|
177
|
-
for (const point of batch) {
|
|
178
|
-
const prediction = this.predict(point.features);
|
|
179
|
-
// Calculate loss (MSE)
|
|
180
|
-
const loss = prediction.reduce((sum, pred, idx) => sum + Math.pow(pred - point.labels[idx], 2), 0) / prediction.length;
|
|
181
|
-
batchLoss += loss;
|
|
182
|
-
// Backpropagation (simplified)
|
|
183
|
-
const outputError = prediction.map((pred, idx) => pred - point.labels[idx]);
|
|
184
|
-
// Update gradients (simplified for demonstration)
|
|
185
|
-
for (let l = this.weights.length - 1; l >= 0; l--) {
|
|
186
|
-
for (let j = 0; j < this.weights[l].length; j++) {
|
|
187
|
-
for (let k = 0; k < this.weights[l][j].length; k++) {
|
|
188
|
-
weightGradients[l][j][k] += outputError[j] * point.features[k];
|
|
189
|
-
}
|
|
190
|
-
biasGradients[l][j] += outputError[j];
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
}
|
|
194
|
-
// Update weights and biases
|
|
195
|
-
for (let l = 0; l < this.weights.length; l++) {
|
|
196
|
-
for (let j = 0; j < this.weights[l].length; j++) {
|
|
197
|
-
for (let k = 0; k < this.weights[l][j].length; k++) {
|
|
198
|
-
this.weights[l][j][k] -= learningRate * weightGradients[l][j][k] / batch.length;
|
|
199
|
-
}
|
|
200
|
-
this.biases[l][j] -= learningRate * biasGradients[l][j] / batch.length;
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
trainingLoss += batchLoss / batch.length;
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
// Calculate validation loss
|
|
207
|
-
if (validationData) {
|
|
208
|
-
for (const point of validationData) {
|
|
209
|
-
const prediction = this.predict(point.features);
|
|
210
|
-
const loss = prediction.reduce((sum, pred, idx) => sum + Math.pow(pred - point.labels[idx], 2), 0) / prediction.length;
|
|
211
|
-
validationLoss += loss;
|
|
212
|
-
}
|
|
213
|
-
validationLoss /= validationData.length;
|
|
214
|
-
}
|
|
215
|
-
const trainingTime = Date.now() - startTime;
|
|
216
|
-
trainingLoss /= (data.length / this.architecture.batchSize) * epochs;
|
|
217
|
-
// Calculate accuracy on training data
|
|
218
|
-
const accuracy = this.calculateAccuracy(data);
|
|
219
|
-
return {
|
|
220
|
-
accuracy,
|
|
221
|
-
precision: 0.85, // Simplified
|
|
222
|
-
recall: 0.83,
|
|
223
|
-
f1Score: 0.84,
|
|
224
|
-
confusionMatrix: [[0, 0], [0, 0]],
|
|
225
|
-
trainingLoss,
|
|
226
|
-
validationLoss,
|
|
227
|
-
trainingTime
|
|
228
|
-
};
|
|
229
|
-
}
|
|
230
|
-
/**
|
|
231
|
-
* Calculate accuracy on dataset
|
|
232
|
-
*/
|
|
233
|
-
calculateAccuracy(data) {
|
|
234
|
-
let correct = 0;
|
|
235
|
-
for (const point of data) {
|
|
236
|
-
const prediction = this.predict(point.features);
|
|
237
|
-
const predictedClass = prediction.indexOf(Math.max(...prediction));
|
|
238
|
-
const actualClass = point.labels.indexOf(Math.max(...point.labels));
|
|
239
|
-
if (predictedClass === actualClass)
|
|
240
|
-
correct++;
|
|
241
|
-
}
|
|
242
|
-
return correct / data.length;
|
|
243
|
-
}
|
|
244
|
-
/**
|
|
245
|
-
* Serialize model to JSON
|
|
246
|
-
*/
|
|
247
|
-
toJSON() {
|
|
248
|
-
return {
|
|
249
|
-
architecture: this.architecture,
|
|
250
|
-
weights: this.weights,
|
|
251
|
-
biases: this.biases
|
|
252
|
-
};
|
|
253
|
-
}
|
|
254
|
-
/**
|
|
255
|
-
* Deserialize model from JSON
|
|
256
|
-
*/
|
|
257
|
-
static fromJSON(json) {
|
|
258
|
-
const network = new SimpleNeuralNetwork(json.architecture);
|
|
259
|
-
network.weights = json.weights;
|
|
260
|
-
network.biases = json.biases;
|
|
261
|
-
return network;
|
|
262
|
-
}
|
|
263
|
-
}
|
|
264
|
-
/**
|
|
265
|
-
* Neural Pattern Matcher
|
|
266
|
-
*
|
|
267
|
-
* Main class for neural-network-based pattern recognition in QE
|
|
268
|
-
*/
|
|
269
|
-
class NeuralPatternMatcher extends events_1.EventEmitter {
|
|
270
|
-
constructor(backend, architecture, memoryManager, reasoningBank, modelPath) {
|
|
271
|
-
super();
|
|
272
|
-
this.model = null;
|
|
273
|
-
this.metrics = null;
|
|
274
|
-
this.backend = backend;
|
|
275
|
-
this.architecture = architecture;
|
|
276
|
-
this.memoryManager = memoryManager;
|
|
277
|
-
this.reasoningBank = reasoningBank;
|
|
278
|
-
this.modelVersion = '1.0.0';
|
|
279
|
-
this.lastTrained = 0;
|
|
280
|
-
this.modelPath = modelPath || path.join(process.cwd(), '.agentic-qe/models');
|
|
281
|
-
// Initialize pattern encoding
|
|
282
|
-
this.encoding = this.initializeEncoding();
|
|
283
|
-
}
|
|
284
|
-
/**
|
|
285
|
-
* Initialize pattern encoding configuration
|
|
286
|
-
*/
|
|
287
|
-
initializeEncoding() {
|
|
288
|
-
const extractors = new Map();
|
|
289
|
-
// Code complexity features
|
|
290
|
-
extractors.set('complexity', (pattern) => {
|
|
291
|
-
const complexity = pattern.cyclomaticComplexity || 1;
|
|
292
|
-
return [
|
|
293
|
-
complexity / 10, // Normalized
|
|
294
|
-
pattern.linesOfCode / 100,
|
|
295
|
-
pattern.numberOfFunctions / 10,
|
|
296
|
-
pattern.numberOfBranches / 10
|
|
297
|
-
];
|
|
298
|
-
});
|
|
299
|
-
// Test coverage features
|
|
300
|
-
extractors.set('coverage', (pattern) => {
|
|
301
|
-
return [
|
|
302
|
-
pattern.lineCoverage || 0,
|
|
303
|
-
pattern.branchCoverage || 0,
|
|
304
|
-
pattern.functionCoverage || 0,
|
|
305
|
-
pattern.statementCoverage || 0
|
|
306
|
-
];
|
|
307
|
-
});
|
|
308
|
-
// Historical performance features
|
|
309
|
-
extractors.set('performance', (pattern) => {
|
|
310
|
-
return [
|
|
311
|
-
pattern.successRate || 0,
|
|
312
|
-
pattern.avgExecutionTime / 1000, // Normalized to seconds
|
|
313
|
-
pattern.flakyScore || 0,
|
|
314
|
-
pattern.failureRate || 0
|
|
315
|
-
];
|
|
316
|
-
});
|
|
317
|
-
return {
|
|
318
|
-
extractors,
|
|
319
|
-
vocabulary: new Map(),
|
|
320
|
-
maxSequenceLength: 100,
|
|
321
|
-
featureDimension: this.architecture.inputSize
|
|
322
|
-
};
|
|
323
|
-
}
|
|
324
|
-
/**
|
|
325
|
-
* Encode pattern into feature vector
|
|
326
|
-
*/
|
|
327
|
-
encodePattern(pattern) {
|
|
328
|
-
const features = [];
|
|
329
|
-
// Extract features from all extractors
|
|
330
|
-
for (const [name, extractor] of this.encoding.extractors) {
|
|
331
|
-
try {
|
|
332
|
-
const extracted = extractor(pattern);
|
|
333
|
-
// Validate extracted features
|
|
334
|
-
const validFeatures = extracted.map(f => (typeof f === 'number' && !isNaN(f)) ? f : 0);
|
|
335
|
-
features.push(...validFeatures);
|
|
336
|
-
}
|
|
337
|
-
catch (error) {
|
|
338
|
-
// Use zeros if extraction fails
|
|
339
|
-
features.push(...new Array(4).fill(0));
|
|
340
|
-
}
|
|
341
|
-
}
|
|
342
|
-
// Pad or truncate to match input size
|
|
343
|
-
while (features.length < this.architecture.inputSize) {
|
|
344
|
-
features.push(0);
|
|
345
|
-
}
|
|
346
|
-
if (features.length > this.architecture.inputSize) {
|
|
347
|
-
features.splice(this.architecture.inputSize);
|
|
348
|
-
}
|
|
349
|
-
return features;
|
|
350
|
-
}
|
|
351
|
-
/**
|
|
352
|
-
* Initialize model based on backend
|
|
353
|
-
*/
|
|
354
|
-
async initializeModel() {
|
|
355
|
-
this.emit('model:initializing', { backend: this.backend });
|
|
356
|
-
switch (this.backend) {
|
|
357
|
-
case ModelBackend.SIMPLE_NN:
|
|
358
|
-
this.model = new SimpleNeuralNetwork(this.architecture);
|
|
359
|
-
break;
|
|
360
|
-
case ModelBackend.TENSORFLOW_JS:
|
|
361
|
-
throw new Error('TensorFlow.js backend not yet implemented');
|
|
362
|
-
case ModelBackend.ONNX:
|
|
363
|
-
throw new Error('ONNX backend not yet implemented');
|
|
364
|
-
default:
|
|
365
|
-
throw new Error(`Unknown backend: ${this.backend}`);
|
|
366
|
-
}
|
|
367
|
-
this.emit('model:initialized', {
|
|
368
|
-
backend: this.backend,
|
|
369
|
-
architecture: this.architecture
|
|
370
|
-
});
|
|
371
|
-
}
|
|
372
|
-
/**
|
|
373
|
-
* Load historical training data from SwarmMemoryManager
|
|
374
|
-
*/
|
|
375
|
-
async loadTrainingData() {
|
|
376
|
-
this.emit('data:loading');
|
|
377
|
-
try {
|
|
378
|
-
// Load patterns from memory manager
|
|
379
|
-
const patterns = await this.memoryManager.retrievePatterns({
|
|
380
|
-
limit: 10000,
|
|
381
|
-
minConfidence: 0.5
|
|
382
|
-
});
|
|
383
|
-
// Load performance metrics
|
|
384
|
-
const metrics = await this.memoryManager.retrieveMetrics({
|
|
385
|
-
limit: 10000
|
|
386
|
-
});
|
|
387
|
-
// Combine and encode data
|
|
388
|
-
const trainingData = [];
|
|
389
|
-
for (const pattern of patterns) {
|
|
390
|
-
const relatedMetrics = metrics.filter(m => m.pattern_id === pattern.pattern_id);
|
|
391
|
-
if (relatedMetrics.length === 0)
|
|
392
|
-
continue;
|
|
393
|
-
// Encode features
|
|
394
|
-
const features = this.encodePattern({
|
|
395
|
-
...pattern,
|
|
396
|
-
metrics: relatedMetrics[0]
|
|
397
|
-
});
|
|
398
|
-
// Create labels (simplified binary classification)
|
|
399
|
-
const avgSuccessRate = relatedMetrics.reduce((sum, m) => sum + (m.success_rate || 0), 0) / relatedMetrics.length;
|
|
400
|
-
const labels = avgSuccessRate > 0.8 ? [1, 0] : [0, 1]; // [success, failure]
|
|
401
|
-
trainingData.push({
|
|
402
|
-
features,
|
|
403
|
-
labels,
|
|
404
|
-
metadata: {
|
|
405
|
-
testId: pattern.pattern_id,
|
|
406
|
-
codePattern: pattern.pattern_type,
|
|
407
|
-
timestamp: Date.now(),
|
|
408
|
-
success: avgSuccessRate > 0.8,
|
|
409
|
-
coverage: pattern.coverage || 0
|
|
410
|
-
}
|
|
411
|
-
});
|
|
412
|
-
}
|
|
413
|
-
this.emit('data:loaded', {
|
|
414
|
-
count: trainingData.length,
|
|
415
|
-
sources: ['patterns', 'metrics']
|
|
416
|
-
});
|
|
417
|
-
return trainingData;
|
|
418
|
-
}
|
|
419
|
-
catch (error) {
|
|
420
|
-
this.emit('data:error', { error });
|
|
421
|
-
throw new Error(`Failed to load training data: ${error}`);
|
|
422
|
-
}
|
|
423
|
-
}
|
|
424
|
-
/**
|
|
425
|
-
* Train model with historical data
|
|
426
|
-
*/
|
|
427
|
-
async train(data, validationSplit = 0.2) {
|
|
428
|
-
if (!this.model) {
|
|
429
|
-
await this.initializeModel();
|
|
430
|
-
}
|
|
431
|
-
this.emit('training:started', {
|
|
432
|
-
dataSize: data?.length || 0,
|
|
433
|
-
validationSplit
|
|
434
|
-
});
|
|
435
|
-
try {
|
|
436
|
-
// Load data if not provided
|
|
437
|
-
const trainingData = data || await this.loadTrainingData();
|
|
438
|
-
if (trainingData.length === 0) {
|
|
439
|
-
throw new Error('No training data available');
|
|
440
|
-
}
|
|
441
|
-
// Split into training and validation sets
|
|
442
|
-
const splitIndex = Math.floor(trainingData.length * (1 - validationSplit));
|
|
443
|
-
const trainSet = trainingData.slice(0, splitIndex);
|
|
444
|
-
const validationSet = trainingData.slice(splitIndex);
|
|
445
|
-
this.emit('training:progress', {
|
|
446
|
-
stage: 'splitting',
|
|
447
|
-
trainSize: trainSet.length,
|
|
448
|
-
validationSize: validationSet.length
|
|
449
|
-
});
|
|
450
|
-
// Train model
|
|
451
|
-
const metrics = this.model.train(trainSet, validationSet);
|
|
452
|
-
this.metrics = metrics;
|
|
453
|
-
this.lastTrained = Date.now();
|
|
454
|
-
// Save model
|
|
455
|
-
await this.saveModel();
|
|
456
|
-
// Store metrics in reasoning bank
|
|
457
|
-
if (this.reasoningBank) {
|
|
458
|
-
await this.reasoningBank.storeTrainingMetrics({
|
|
459
|
-
modelVersion: this.modelVersion,
|
|
460
|
-
backend: this.backend,
|
|
461
|
-
metrics,
|
|
462
|
-
timestamp: this.lastTrained
|
|
463
|
-
});
|
|
464
|
-
}
|
|
465
|
-
this.emit('training:completed', {
|
|
466
|
-
metrics,
|
|
467
|
-
modelVersion: this.modelVersion
|
|
468
|
-
});
|
|
469
|
-
return metrics;
|
|
470
|
-
}
|
|
471
|
-
catch (error) {
|
|
472
|
-
this.emit('training:error', { error });
|
|
473
|
-
throw error;
|
|
474
|
-
}
|
|
475
|
-
}
|
|
476
|
-
/**
|
|
477
|
-
* Predict test patterns for new code
|
|
478
|
-
*/
|
|
479
|
-
async predict(codePattern) {
|
|
480
|
-
if (!this.model) {
|
|
481
|
-
await this.loadModel();
|
|
482
|
-
}
|
|
483
|
-
if (!this.model) {
|
|
484
|
-
throw new Error('Model not initialized. Train or load a model first.');
|
|
485
|
-
}
|
|
486
|
-
this.emit('prediction:started', { pattern: codePattern });
|
|
487
|
-
try {
|
|
488
|
-
// Encode input pattern
|
|
489
|
-
const features = this.encodePattern(codePattern);
|
|
490
|
-
// Get prediction
|
|
491
|
-
const output = this.model.predict(features);
|
|
492
|
-
// Interpret output
|
|
493
|
-
const confidence = Math.max(...output);
|
|
494
|
-
const predictedClass = output.indexOf(confidence);
|
|
495
|
-
// Generate test suggestions based on prediction
|
|
496
|
-
const testCases = await this.generateTestSuggestions(codePattern, predictedClass, confidence);
|
|
497
|
-
const prediction = {
|
|
498
|
-
pattern: {
|
|
499
|
-
type: predictedClass === 0 ? 'comprehensive' : 'basic',
|
|
500
|
-
confidence,
|
|
501
|
-
testCases,
|
|
502
|
-
expectedCoverage: confidence * 100
|
|
503
|
-
},
|
|
504
|
-
alternatives: [],
|
|
505
|
-
modelInfo: {
|
|
506
|
-
backend: this.backend,
|
|
507
|
-
version: this.modelVersion,
|
|
508
|
-
accuracy: this.metrics?.accuracy || 0,
|
|
509
|
-
lastTrained: this.lastTrained
|
|
510
|
-
}
|
|
511
|
-
};
|
|
512
|
-
this.emit('prediction:completed', { prediction });
|
|
513
|
-
return prediction;
|
|
514
|
-
}
|
|
515
|
-
catch (error) {
|
|
516
|
-
this.emit('prediction:error', { error });
|
|
517
|
-
throw error;
|
|
518
|
-
}
|
|
519
|
-
}
|
|
520
|
-
/**
|
|
521
|
-
* Generate test suggestions based on prediction
|
|
522
|
-
*/
|
|
523
|
-
async generateTestSuggestions(codePattern, predictedClass, confidence) {
|
|
524
|
-
const suggestions = [];
|
|
525
|
-
// Use reasoning bank if available
|
|
526
|
-
if (this.reasoningBank) {
|
|
527
|
-
const similar = await this.reasoningBank.findSimilarPatterns(codePattern, 5);
|
|
528
|
-
for (const pattern of similar) {
|
|
529
|
-
if (pattern.test_cases) {
|
|
530
|
-
suggestions.push(...pattern.test_cases);
|
|
531
|
-
}
|
|
532
|
-
}
|
|
533
|
-
}
|
|
534
|
-
// Add default suggestions based on code characteristics
|
|
535
|
-
if (codePattern.hasLoops) {
|
|
536
|
-
suggestions.push('Edge case: empty array');
|
|
537
|
-
suggestions.push('Edge case: single element');
|
|
538
|
-
suggestions.push('Edge case: large dataset');
|
|
539
|
-
}
|
|
540
|
-
if (codePattern.hasConditionals) {
|
|
541
|
-
suggestions.push('Branch coverage: all paths');
|
|
542
|
-
suggestions.push('Edge case: boundary conditions');
|
|
543
|
-
}
|
|
544
|
-
if (codePattern.hasAsyncOperations) {
|
|
545
|
-
suggestions.push('Async: success case');
|
|
546
|
-
suggestions.push('Async: error handling');
|
|
547
|
-
suggestions.push('Async: timeout scenario');
|
|
548
|
-
}
|
|
549
|
-
return suggestions;
|
|
550
|
-
}
|
|
551
|
-
/**
|
|
552
|
-
* Incremental training with new data
|
|
553
|
-
*/
|
|
554
|
-
async incrementalTrain(newData) {
|
|
555
|
-
this.emit('training:incremental:started', {
|
|
556
|
-
dataSize: newData.length
|
|
557
|
-
});
|
|
558
|
-
if (!this.model) {
|
|
559
|
-
await this.loadModel();
|
|
560
|
-
}
|
|
561
|
-
// Load existing training data
|
|
562
|
-
const existingData = await this.loadTrainingData();
|
|
563
|
-
// Combine with new data
|
|
564
|
-
const combinedData = [...existingData, ...newData];
|
|
565
|
-
// Retrain model
|
|
566
|
-
return await this.train(combinedData);
|
|
567
|
-
}
|
|
568
|
-
/**
|
|
569
|
-
* Save model to disk
|
|
570
|
-
*/
|
|
571
|
-
async saveModel() {
|
|
572
|
-
if (!this.model) {
|
|
573
|
-
throw new Error('No model to save');
|
|
574
|
-
}
|
|
575
|
-
this.emit('model:saving', { path: this.modelPath });
|
|
576
|
-
try {
|
|
577
|
-
// Ensure directory exists
|
|
578
|
-
await fs_1.promises.mkdir(this.modelPath, { recursive: true });
|
|
579
|
-
const modelFile = path.join(this.modelPath, `neural-pattern-matcher-${this.modelVersion}.json`);
|
|
580
|
-
const modelData = {
|
|
581
|
-
version: this.modelVersion,
|
|
582
|
-
backend: this.backend,
|
|
583
|
-
architecture: this.architecture,
|
|
584
|
-
model: this.model.toJSON(),
|
|
585
|
-
encoding: {
|
|
586
|
-
vocabulary: Array.from(this.encoding.vocabulary.entries()),
|
|
587
|
-
maxSequenceLength: this.encoding.maxSequenceLength,
|
|
588
|
-
featureDimension: this.encoding.featureDimension
|
|
589
|
-
},
|
|
590
|
-
metrics: this.metrics,
|
|
591
|
-
lastTrained: this.lastTrained
|
|
592
|
-
};
|
|
593
|
-
await fs_1.promises.writeFile(modelFile, JSON.stringify(modelData, null, 2));
|
|
594
|
-
this.emit('model:saved', {
|
|
595
|
-
path: modelFile,
|
|
596
|
-
version: this.modelVersion
|
|
597
|
-
});
|
|
598
|
-
}
|
|
599
|
-
catch (error) {
|
|
600
|
-
this.emit('model:save:error', { error });
|
|
601
|
-
throw new Error(`Failed to save model: ${error}`);
|
|
602
|
-
}
|
|
603
|
-
}
|
|
604
|
-
/**
|
|
605
|
-
* Load model from disk
|
|
606
|
-
*/
|
|
607
|
-
async loadModel(version) {
|
|
608
|
-
const loadVersion = version || this.modelVersion;
|
|
609
|
-
const modelFile = path.join(this.modelPath, `neural-pattern-matcher-${loadVersion}.json`);
|
|
610
|
-
this.emit('model:loading', { path: modelFile });
|
|
611
|
-
try {
|
|
612
|
-
const data = await fs_1.promises.readFile(modelFile, 'utf-8');
|
|
613
|
-
const modelData = JSON.parse(data);
|
|
614
|
-
this.modelVersion = modelData.version;
|
|
615
|
-
this.backend = modelData.backend;
|
|
616
|
-
this.architecture = modelData.architecture;
|
|
617
|
-
this.metrics = modelData.metrics;
|
|
618
|
-
this.lastTrained = modelData.lastTrained;
|
|
619
|
-
// Restore encoding
|
|
620
|
-
if (modelData.encoding) {
|
|
621
|
-
this.encoding.vocabulary = new Map(modelData.encoding.vocabulary);
|
|
622
|
-
this.encoding.maxSequenceLength = modelData.encoding.maxSequenceLength;
|
|
623
|
-
this.encoding.featureDimension = modelData.encoding.featureDimension;
|
|
624
|
-
}
|
|
625
|
-
// Load model based on backend
|
|
626
|
-
switch (this.backend) {
|
|
627
|
-
case ModelBackend.SIMPLE_NN:
|
|
628
|
-
this.model = SimpleNeuralNetwork.fromJSON(modelData.model);
|
|
629
|
-
break;
|
|
630
|
-
default:
|
|
631
|
-
throw new Error(`Backend ${this.backend} not supported for loading`);
|
|
632
|
-
}
|
|
633
|
-
this.emit('model:loaded', {
|
|
634
|
-
version: this.modelVersion,
|
|
635
|
-
backend: this.backend,
|
|
636
|
-
metrics: this.metrics
|
|
637
|
-
});
|
|
638
|
-
}
|
|
639
|
-
catch (error) {
|
|
640
|
-
this.emit('model:load:error', { error });
|
|
641
|
-
throw new Error(`Failed to load model: ${error}`);
|
|
642
|
-
}
|
|
643
|
-
}
|
|
644
|
-
/**
|
|
645
|
-
* Evaluate model on test dataset
|
|
646
|
-
*/
|
|
647
|
-
async evaluate(testData) {
|
|
648
|
-
if (!this.model) {
|
|
649
|
-
throw new Error('Model not loaded');
|
|
650
|
-
}
|
|
651
|
-
this.emit('evaluation:started', { dataSize: testData.length });
|
|
652
|
-
let correct = 0;
|
|
653
|
-
let totalLoss = 0;
|
|
654
|
-
const confusionMatrix = [[0, 0], [0, 0]];
|
|
655
|
-
for (const point of testData) {
|
|
656
|
-
const prediction = this.model.predict(point.features);
|
|
657
|
-
const predictedClass = prediction.indexOf(Math.max(...prediction));
|
|
658
|
-
const actualClass = point.labels.indexOf(Math.max(...point.labels));
|
|
659
|
-
if (predictedClass === actualClass)
|
|
660
|
-
correct++;
|
|
661
|
-
confusionMatrix[actualClass][predictedClass]++;
|
|
662
|
-
// Calculate loss
|
|
663
|
-
const loss = prediction.reduce((sum, pred, idx) => sum + Math.pow(pred - point.labels[idx], 2), 0) / prediction.length;
|
|
664
|
-
totalLoss += loss;
|
|
665
|
-
}
|
|
666
|
-
const accuracy = correct / testData.length;
|
|
667
|
-
const avgLoss = totalLoss / testData.length;
|
|
668
|
-
// Calculate precision, recall, F1
|
|
669
|
-
const tp = confusionMatrix[0][0];
|
|
670
|
-
const fp = confusionMatrix[1][0];
|
|
671
|
-
const fn = confusionMatrix[0][1];
|
|
672
|
-
const precision = tp / (tp + fp) || 0;
|
|
673
|
-
const recall = tp / (tp + fn) || 0;
|
|
674
|
-
const f1Score = 2 * (precision * recall) / (precision + recall) || 0;
|
|
675
|
-
const metrics = {
|
|
676
|
-
accuracy,
|
|
677
|
-
precision,
|
|
678
|
-
recall,
|
|
679
|
-
f1Score,
|
|
680
|
-
confusionMatrix,
|
|
681
|
-
trainingLoss: this.metrics?.trainingLoss || 0,
|
|
682
|
-
validationLoss: avgLoss,
|
|
683
|
-
trainingTime: 0
|
|
684
|
-
};
|
|
685
|
-
this.emit('evaluation:completed', { metrics });
|
|
686
|
-
return metrics;
|
|
687
|
-
}
|
|
688
|
-
/**
|
|
689
|
-
* Get model information
|
|
690
|
-
*/
|
|
691
|
-
getModelInfo() {
|
|
692
|
-
return {
|
|
693
|
-
backend: this.backend,
|
|
694
|
-
version: this.modelVersion,
|
|
695
|
-
architecture: this.architecture,
|
|
696
|
-
metrics: this.metrics,
|
|
697
|
-
lastTrained: this.lastTrained
|
|
698
|
-
};
|
|
699
|
-
}
|
|
700
|
-
}
|
|
701
|
-
exports.NeuralPatternMatcher = NeuralPatternMatcher;
|
|
702
|
-
//# sourceMappingURL=NeuralPatternMatcher.js.map
|