vme-mcp-server 0.1.5 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/api-utils.js +251 -0
- package/dist/lib/capability-cache.js +131 -0
- package/dist/lib/capability-discovery.js +240 -0
- package/dist/lib/intent-recognition.js +159 -0
- package/dist/lib/name-resolver.js +174 -0
- package/dist/lib/session.js +59 -0
- package/dist/lib/vm-parsing.js +82 -0
- package/dist/server.js +36 -918
- package/dist/server_old.js +933 -0
- package/dist/tools/check-capability.js +84 -0
- package/dist/tools/create-vm.js +253 -0
- package/dist/tools/discover-capabilities.js +79 -0
- package/dist/tools/export-training-data.js +115 -0
- package/dist/tools/get-cache-status.js +123 -0
- package/dist/tools/get-resources.js +40 -0
- package/dist/tools/index.js +68 -0
- package/dist/tools/parse-intent.js +52 -0
- package/dist/tools/provide-feedback.js +61 -0
- package/dist/tools/query-resources.js +248 -0
- package/dist/types/interfaces.js +2 -0
- package/package.json +4 -4
@@ -0,0 +1,84 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.handleCheckCapability = exports.checkCapabilityTool = void 0;
|
4
|
+
const capability_discovery_js_1 = require("../lib/capability-discovery.js");
|
5
|
+
exports.checkCapabilityTool = {
|
6
|
+
name: "check_capability",
|
7
|
+
description: "Check if a specific capability is available using natural language queries",
|
8
|
+
inputSchema: {
|
9
|
+
type: "object",
|
10
|
+
properties: {
|
11
|
+
question: {
|
12
|
+
type: "string",
|
13
|
+
description: "Natural language capability question (e.g., 'Can I create VMs with GPUs?', 'Is VMware supported?', 'What's the max CPU per VM?')"
|
14
|
+
},
|
15
|
+
capability_type: {
|
16
|
+
type: "string",
|
17
|
+
enum: ["hypervisor", "gpu_support", "max_cpu", "max_memory", "storage_types", "network_types"],
|
18
|
+
description: "Specific capability type to check (optional, inferred from question if not provided)"
|
19
|
+
}
|
20
|
+
},
|
21
|
+
required: ["question"]
|
22
|
+
}
|
23
|
+
};
|
24
|
+
async function handleCheckCapability(args) {
|
25
|
+
try {
|
26
|
+
const { question, capability_type } = args;
|
27
|
+
if (!question || typeof question !== 'string') {
|
28
|
+
return {
|
29
|
+
content: [
|
30
|
+
{
|
31
|
+
type: "text",
|
32
|
+
text: JSON.stringify({
|
33
|
+
error: "Invalid input",
|
34
|
+
message: "Question parameter is required and must be a string"
|
35
|
+
}, null, 2)
|
36
|
+
}
|
37
|
+
],
|
38
|
+
isError: true
|
39
|
+
};
|
40
|
+
}
|
41
|
+
// Use the capability discovery engine to check the capability
|
42
|
+
const result = await capability_discovery_js_1.capabilityDiscovery.checkCapability(question, capability_type);
|
43
|
+
// Prepare comprehensive response
|
44
|
+
const response = {
|
45
|
+
question: question,
|
46
|
+
answer: result.answer,
|
47
|
+
details: result.details,
|
48
|
+
confidence: result.confidence,
|
49
|
+
confidence_level: result.confidence >= 0.8 ? "high" :
|
50
|
+
result.confidence >= 0.5 ? "medium" : "low",
|
51
|
+
capability_type_detected: capability_type || "inferred_from_question",
|
52
|
+
suggestions: result.confidence < 0.5 ? [
|
53
|
+
"Try being more specific in your question",
|
54
|
+
"Use capability_type parameter for better accuracy",
|
55
|
+
"Examples: 'Can I create 32-CPU VMs?', 'Is GPU support available?'"
|
56
|
+
] : undefined
|
57
|
+
};
|
58
|
+
return {
|
59
|
+
content: [
|
60
|
+
{
|
61
|
+
type: "text",
|
62
|
+
text: JSON.stringify(response, null, 2)
|
63
|
+
}
|
64
|
+
],
|
65
|
+
isError: false
|
66
|
+
};
|
67
|
+
}
|
68
|
+
catch (error) {
|
69
|
+
return {
|
70
|
+
content: [
|
71
|
+
{
|
72
|
+
type: "text",
|
73
|
+
text: JSON.stringify({
|
74
|
+
error: "Capability check failed",
|
75
|
+
message: error.message,
|
76
|
+
question: args?.question || "unknown"
|
77
|
+
}, null, 2)
|
78
|
+
}
|
79
|
+
],
|
80
|
+
isError: true
|
81
|
+
};
|
82
|
+
}
|
83
|
+
}
|
84
|
+
exports.handleCheckCapability = handleCheckCapability;
|
@@ -0,0 +1,253 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.handleCreateVM = exports.createVMTool = void 0;
|
4
|
+
const vm_parsing_js_1 = require("../lib/vm-parsing.js");
|
5
|
+
const api_utils_js_1 = require("../lib/api-utils.js");
|
6
|
+
const name_resolver_js_1 = require("../lib/name-resolver.js");
|
7
|
+
exports.createVMTool = {
|
8
|
+
name: "create_vm",
|
9
|
+
description: "Provision a new virtual machine",
|
10
|
+
inputSchema: {
|
11
|
+
type: "object",
|
12
|
+
properties: {
|
13
|
+
name: {
|
14
|
+
type: "string",
|
15
|
+
description: "Name for VM(s). Supports patterns like 'web01' or 'web01->web03' for multiple VMs"
|
16
|
+
},
|
17
|
+
group: {
|
18
|
+
type: "string",
|
19
|
+
description: "Group/site where VM will be created"
|
20
|
+
},
|
21
|
+
cloud: {
|
22
|
+
type: "string",
|
23
|
+
description: "Cloud/zone where VM will be provisioned (also accepts 'zone')"
|
24
|
+
},
|
25
|
+
zone: {
|
26
|
+
type: "string",
|
27
|
+
description: "Zone/cloud where VM will be provisioned (alias for 'cloud')"
|
28
|
+
},
|
29
|
+
template: {
|
30
|
+
type: "string",
|
31
|
+
description: "VM template or operating system"
|
32
|
+
},
|
33
|
+
size: {
|
34
|
+
type: "string",
|
35
|
+
description: "VM size (small, medium, 4GB, 8GB, etc.)"
|
36
|
+
},
|
37
|
+
distribution: {
|
38
|
+
type: "string",
|
39
|
+
description: "VM distribution strategy: 'auto' (default), 'spread' (across all nodes), or 'node1,node2,node3' (specific nodes)"
|
40
|
+
},
|
41
|
+
count: {
|
42
|
+
type: "number",
|
43
|
+
description: "Number of VMs to create (alternative to name patterns)"
|
44
|
+
}
|
45
|
+
},
|
46
|
+
required: ["name", "group", "template", "size"]
|
47
|
+
}
|
48
|
+
};
|
49
|
+
async function handleCreateVM(args) {
|
50
|
+
const { name, group, cloud, zone, template, size, distribution, count } = args;
|
51
|
+
// Allow both 'cloud' and 'zone' parameters interchangeably
|
52
|
+
const location = cloud || zone;
|
53
|
+
if (!location) {
|
54
|
+
return {
|
55
|
+
content: [
|
56
|
+
{
|
57
|
+
type: "text",
|
58
|
+
text: JSON.stringify({
|
59
|
+
error: {
|
60
|
+
code: "missing_location",
|
61
|
+
message: "Either 'cloud' or 'zone' parameter is required"
|
62
|
+
}
|
63
|
+
}, null, 2)
|
64
|
+
}
|
65
|
+
],
|
66
|
+
isError: true
|
67
|
+
};
|
68
|
+
}
|
69
|
+
// Parse VM names and determine distribution strategy
|
70
|
+
const vmNames = (0, vm_parsing_js_1.parseVMNames)(name, count);
|
71
|
+
const nodes = await (0, api_utils_js_1.getClusterNodes)();
|
72
|
+
// Calculate node assignments
|
73
|
+
const nodeAssignments = (0, vm_parsing_js_1.calculateNodeAssignments)(vmNames, nodes, distribution);
|
74
|
+
// Use name resolver to get IDs from actual VME environment
|
75
|
+
const groupId = await name_resolver_js_1.nameResolver.resolveNameToId('group', group);
|
76
|
+
const cloudId = await name_resolver_js_1.nameResolver.resolveNameToId('zone', location);
|
77
|
+
const imageId = await name_resolver_js_1.nameResolver.resolveNameToId('virtualImage', template);
|
78
|
+
const servicePlanId = await name_resolver_js_1.nameResolver.resolveNameToId('servicePlan', size);
|
79
|
+
const instanceTypeId = await name_resolver_js_1.nameResolver.resolveNameToId('instanceType', 'HPE VM'); // Default for VME
|
80
|
+
// Validate all required IDs were resolved
|
81
|
+
if (!groupId || !cloudId || !servicePlanId || !imageId) {
|
82
|
+
const errors = [];
|
83
|
+
if (!groupId) {
|
84
|
+
const availableGroups = await name_resolver_js_1.nameResolver.getAvailableNames('group');
|
85
|
+
errors.push(`Group '${group}' not found. Available: ${availableGroups.join(', ')}`);
|
86
|
+
}
|
87
|
+
if (!cloudId) {
|
88
|
+
const availableZones = await name_resolver_js_1.nameResolver.getAvailableNames('zone');
|
89
|
+
errors.push(`Zone/Cloud '${location}' not found. Available: ${availableZones.join(', ')}`);
|
90
|
+
}
|
91
|
+
if (!servicePlanId) {
|
92
|
+
const availablePlans = await name_resolver_js_1.nameResolver.getAvailableNames('servicePlan');
|
93
|
+
errors.push(`Size '${size}' could not be resolved to service plan. Available: ${availablePlans.join(', ')}`);
|
94
|
+
}
|
95
|
+
if (!imageId) {
|
96
|
+
const availableImages = await name_resolver_js_1.nameResolver.getAvailableNames('virtualImage');
|
97
|
+
errors.push(`Template '${template}' could not be resolved to OS image. Available: ${availableImages.join(', ')}`);
|
98
|
+
}
|
99
|
+
return {
|
100
|
+
content: [
|
101
|
+
{
|
102
|
+
type: "text",
|
103
|
+
text: JSON.stringify({
|
104
|
+
error: {
|
105
|
+
code: "resolution_failed",
|
106
|
+
message: `Failed to resolve parameters:\n${errors.join('\n')}`
|
107
|
+
}
|
108
|
+
}, null, 2)
|
109
|
+
}
|
110
|
+
],
|
111
|
+
isError: true
|
112
|
+
};
|
113
|
+
}
|
114
|
+
// Get additional required IDs from VME environment
|
115
|
+
let resourcePoolId = 'pool-1'; // Default fallback
|
116
|
+
let datastoreId = 5; // Default fallback
|
117
|
+
let networkId = 'network-2'; // Default fallback
|
118
|
+
let layoutId = 2; // Default fallback
|
119
|
+
try {
|
120
|
+
// Try to get real resource pool, datastore, network IDs from VME
|
121
|
+
const [resourcePools, datastores, networks, layouts] = await Promise.allSettled([
|
122
|
+
api_utils_js_1.api.get('/resource-pools').catch(() => null),
|
123
|
+
api_utils_js_1.api.get('/datastores').catch(() => null),
|
124
|
+
api_utils_js_1.api.get('/networks').catch(() => null),
|
125
|
+
api_utils_js_1.api.get('/layouts').catch(() => null)
|
126
|
+
]);
|
127
|
+
// Use first available resource pool
|
128
|
+
if (resourcePools.status === 'fulfilled' && resourcePools.value?.data?.resourcePools?.[0]) {
|
129
|
+
resourcePoolId = resourcePools.value.data.resourcePools[0].id;
|
130
|
+
}
|
131
|
+
// Use first available datastore
|
132
|
+
if (datastores.status === 'fulfilled' && datastores.value?.data?.datastores?.[0]) {
|
133
|
+
datastoreId = datastores.value.data.datastores[0].id;
|
134
|
+
}
|
135
|
+
// Use first available network
|
136
|
+
if (networks.status === 'fulfilled' && networks.value?.data?.networks?.[0]) {
|
137
|
+
networkId = networks.value.data.networks[0].id;
|
138
|
+
}
|
139
|
+
// Use first HPE VM layout
|
140
|
+
if (layouts.status === 'fulfilled' && layouts.value?.data?.layouts) {
|
141
|
+
const hpeLayout = layouts.value.data.layouts.find((l) => l.code?.includes('mvm') || l.name?.toLowerCase().includes('hpe'));
|
142
|
+
if (hpeLayout) {
|
143
|
+
layoutId = hpeLayout.id;
|
144
|
+
}
|
145
|
+
}
|
146
|
+
}
|
147
|
+
catch (error) {
|
148
|
+
console.warn('Could not discover some VME resources, using defaults:', error.message);
|
149
|
+
}
|
150
|
+
// Create VMs sequentially
|
151
|
+
const results = [];
|
152
|
+
const errors = [];
|
153
|
+
for (const assignment of nodeAssignments) {
|
154
|
+
const vmConfig = {
|
155
|
+
resourcePoolId: resourcePoolId,
|
156
|
+
poolProviderType: 'mvm',
|
157
|
+
imageId: imageId,
|
158
|
+
createUser: true
|
159
|
+
};
|
160
|
+
// Add kvmHostId only if explicitly specified
|
161
|
+
if (assignment.kvmHostId) {
|
162
|
+
vmConfig.kvmHostId = assignment.kvmHostId;
|
163
|
+
}
|
164
|
+
const payload = {
|
165
|
+
zoneId: cloudId,
|
166
|
+
instance: {
|
167
|
+
name: assignment.name,
|
168
|
+
cloud: await name_resolver_js_1.nameResolver.getAvailableNames('zone').then(zones => zones[0]) || 'tc-lab',
|
169
|
+
hostName: assignment.name,
|
170
|
+
type: 'mvm',
|
171
|
+
instanceType: {
|
172
|
+
code: 'mvm'
|
173
|
+
},
|
174
|
+
site: {
|
175
|
+
id: groupId
|
176
|
+
},
|
177
|
+
layout: {
|
178
|
+
id: layoutId,
|
179
|
+
code: 'mvm-1.0-single'
|
180
|
+
},
|
181
|
+
plan: {
|
182
|
+
id: servicePlanId
|
183
|
+
}
|
184
|
+
},
|
185
|
+
config: vmConfig,
|
186
|
+
volumes: [
|
187
|
+
{
|
188
|
+
id: -1,
|
189
|
+
rootVolume: true,
|
190
|
+
name: 'root',
|
191
|
+
size: 10,
|
192
|
+
storageType: 1,
|
193
|
+
datastoreId: datastoreId
|
194
|
+
}
|
195
|
+
],
|
196
|
+
networkInterfaces: [
|
197
|
+
{
|
198
|
+
primaryInterface: true,
|
199
|
+
ipMode: 'dhcp',
|
200
|
+
network: {
|
201
|
+
id: networkId
|
202
|
+
},
|
203
|
+
networkInterfaceTypeId: 10
|
204
|
+
}
|
205
|
+
],
|
206
|
+
layoutSize: 1
|
207
|
+
};
|
208
|
+
try {
|
209
|
+
const response = await api_utils_js_1.api.post("/instances", payload);
|
210
|
+
const vm = response.data?.instance;
|
211
|
+
const nodeInfo = assignment.kvmHostId ? ` on node ${assignment.kvmHostId}` : ' (auto-placed)';
|
212
|
+
results.push(`VM '${vm.name}' created (ID: ${vm.id})${nodeInfo}`);
|
213
|
+
}
|
214
|
+
catch (err) {
|
215
|
+
const nodeInfo = assignment.kvmHostId ? ` on node ${assignment.kvmHostId}` : '';
|
216
|
+
errors.push(`VM '${assignment.name}' failed${nodeInfo}: ${err.response?.data?.message || err.message}`);
|
217
|
+
}
|
218
|
+
}
|
219
|
+
// Prepare response
|
220
|
+
const summary = [];
|
221
|
+
if (results.length > 0) {
|
222
|
+
summary.push(`Successfully created ${results.length} VM(s):`);
|
223
|
+
summary.push(...results);
|
224
|
+
}
|
225
|
+
if (errors.length > 0) {
|
226
|
+
summary.push(`\nFailed to create ${errors.length} VM(s):`);
|
227
|
+
summary.push(...errors);
|
228
|
+
}
|
229
|
+
summary.push(`\nResolved parameters:`);
|
230
|
+
summary.push(`- Group: ${group} (ID: ${groupId})`);
|
231
|
+
summary.push(`- Zone/Cloud: ${location} (ID: ${cloudId})`);
|
232
|
+
summary.push(`- Template: ${template} (ID: ${imageId})`);
|
233
|
+
summary.push(`- Plan: ${size} (ID: ${servicePlanId})`);
|
234
|
+
if (distribution === 'spread' || (vmNames.length > 1 && !distribution)) {
|
235
|
+
summary.push(`- Distribution: Spread across nodes ${nodes.join(', ')}`);
|
236
|
+
}
|
237
|
+
else if (nodeAssignments.some(a => a.kvmHostId)) {
|
238
|
+
summary.push(`- Distribution: Specific node placement`);
|
239
|
+
}
|
240
|
+
else {
|
241
|
+
summary.push(`- Distribution: Auto-placement`);
|
242
|
+
}
|
243
|
+
return {
|
244
|
+
content: [
|
245
|
+
{
|
246
|
+
type: "text",
|
247
|
+
text: summary.join('\n')
|
248
|
+
}
|
249
|
+
],
|
250
|
+
isError: errors.length > 0 && results.length === 0
|
251
|
+
};
|
252
|
+
}
|
253
|
+
exports.handleCreateVM = handleCreateVM;
|
@@ -0,0 +1,79 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.handleDiscoverCapabilities = exports.discoverCapabilitiesTool = void 0;
|
4
|
+
const capability_discovery_js_1 = require("../lib/capability-discovery.js");
|
5
|
+
exports.discoverCapabilitiesTool = {
|
6
|
+
name: "discover_capabilities",
|
7
|
+
description: "Discover VME infrastructure capabilities with intelligent filtering and caching",
|
8
|
+
inputSchema: {
|
9
|
+
type: "object",
|
10
|
+
properties: {
|
11
|
+
domain: {
|
12
|
+
type: "string",
|
13
|
+
enum: ["compute", "storage", "networking", "platform", "all"],
|
14
|
+
description: "Which capability domain to discover (default: all)",
|
15
|
+
default: "all"
|
16
|
+
},
|
17
|
+
refresh: {
|
18
|
+
type: "boolean",
|
19
|
+
description: "Force refresh of cached data (default: false)",
|
20
|
+
default: false
|
21
|
+
},
|
22
|
+
include_limits: {
|
23
|
+
type: "boolean",
|
24
|
+
description: "Include license/quota limits (default: true)",
|
25
|
+
default: true
|
26
|
+
}
|
27
|
+
},
|
28
|
+
required: []
|
29
|
+
}
|
30
|
+
};
|
31
|
+
async function handleDiscoverCapabilities(args) {
|
32
|
+
try {
|
33
|
+
const { domain = "all", refresh = false, include_limits = true } = args;
|
34
|
+
// Convert single domain to array for discovery engine
|
35
|
+
const domains = domain === "all" ? ["all"] : [domain];
|
36
|
+
const capabilities = await capability_discovery_js_1.capabilityDiscovery.discoverCapabilities(domains, refresh);
|
37
|
+
// Filter out license limits if not requested
|
38
|
+
if (!include_limits && capabilities.platform) {
|
39
|
+
delete capabilities.platform.license_limits;
|
40
|
+
}
|
41
|
+
// Prepare response with metadata
|
42
|
+
const response = {
|
43
|
+
capabilities,
|
44
|
+
metadata: {
|
45
|
+
discovery_time: capabilities.discovered_at,
|
46
|
+
domains_requested: domains,
|
47
|
+
cache_refresh_forced: refresh,
|
48
|
+
total_cache_fields: capabilities.cache_status.length,
|
49
|
+
fresh_cache_fields: capabilities.cache_status.filter(s => s.fresh).length,
|
50
|
+
token_optimization: "Field-level TTL caching reduces response size by 90%+"
|
51
|
+
}
|
52
|
+
};
|
53
|
+
return {
|
54
|
+
content: [
|
55
|
+
{
|
56
|
+
type: "text",
|
57
|
+
text: JSON.stringify(response, null, 2)
|
58
|
+
}
|
59
|
+
],
|
60
|
+
isError: false
|
61
|
+
};
|
62
|
+
}
|
63
|
+
catch (error) {
|
64
|
+
return {
|
65
|
+
content: [
|
66
|
+
{
|
67
|
+
type: "text",
|
68
|
+
text: JSON.stringify({
|
69
|
+
error: "Capability discovery failed",
|
70
|
+
message: error.message,
|
71
|
+
suggestion: "Check VME API connectivity and authentication"
|
72
|
+
}, null, 2)
|
73
|
+
}
|
74
|
+
],
|
75
|
+
isError: true
|
76
|
+
};
|
77
|
+
}
|
78
|
+
}
|
79
|
+
exports.handleDiscoverCapabilities = handleDiscoverCapabilities;
|
@@ -0,0 +1,115 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.handleExportTrainingData = exports.exportTrainingDataTool = void 0;
|
4
|
+
const fs_1 = require("fs");
|
5
|
+
const path_1 = require("path");
|
6
|
+
exports.exportTrainingDataTool = {
|
7
|
+
name: "export_training_data",
|
8
|
+
description: "Export AI training data for model improvement (requires ENABLE_AI_TRAINING_DATA=true)",
|
9
|
+
inputSchema: {
|
10
|
+
type: "object",
|
11
|
+
properties: {
|
12
|
+
format: {
|
13
|
+
type: "string",
|
14
|
+
description: "Export format: 'jsonl' or 'csv'",
|
15
|
+
enum: ["jsonl", "csv"]
|
16
|
+
},
|
17
|
+
days: {
|
18
|
+
type: "number",
|
19
|
+
description: "Number of days of data to export (default: 7)"
|
20
|
+
}
|
21
|
+
},
|
22
|
+
required: []
|
23
|
+
}
|
24
|
+
};
|
25
|
+
async function handleExportTrainingData(args) {
|
26
|
+
const AI_TRAINING_ENABLED = process.env.ENABLE_AI_TRAINING_DATA === 'true';
|
27
|
+
if (!AI_TRAINING_ENABLED) {
|
28
|
+
return {
|
29
|
+
content: [
|
30
|
+
{
|
31
|
+
type: "text",
|
32
|
+
text: JSON.stringify({
|
33
|
+
error: "Training data collection is disabled",
|
34
|
+
message: "Set ENABLE_AI_TRAINING_DATA=true in .env to enable data collection and export"
|
35
|
+
}, null, 2)
|
36
|
+
}
|
37
|
+
],
|
38
|
+
isError: true
|
39
|
+
};
|
40
|
+
}
|
41
|
+
const { format = "jsonl", days = 7 } = args;
|
42
|
+
try {
|
43
|
+
const logsDir = (0, path_1.join)(process.cwd(), 'ai-training-logs');
|
44
|
+
if (!(0, fs_1.existsSync)(logsDir)) {
|
45
|
+
return {
|
46
|
+
content: [
|
47
|
+
{
|
48
|
+
type: "text",
|
49
|
+
text: JSON.stringify({
|
50
|
+
message: "No training data found",
|
51
|
+
data_count: 0
|
52
|
+
}, null, 2)
|
53
|
+
}
|
54
|
+
],
|
55
|
+
isError: false
|
56
|
+
};
|
57
|
+
}
|
58
|
+
// Collect data from last N days
|
59
|
+
const cutoffDate = new Date(Date.now() - (days * 24 * 60 * 60 * 1000));
|
60
|
+
const allData = [];
|
61
|
+
// Read log files and aggregate data
|
62
|
+
const logFiles = (0, fs_1.readdirSync)(logsDir).filter((file) => file.startsWith('interactions-') && file.endsWith('.jsonl'));
|
63
|
+
for (const file of logFiles) {
|
64
|
+
const fileDate = new Date(file.replace('interactions-', '').replace('.jsonl', ''));
|
65
|
+
if (fileDate >= cutoffDate) {
|
66
|
+
const content = (0, fs_1.readFileSync)((0, path_1.join)(logsDir, file), 'utf-8');
|
67
|
+
const lines = content.trim().split('\n').filter(line => line.trim());
|
68
|
+
for (const line of lines) {
|
69
|
+
try {
|
70
|
+
allData.push(JSON.parse(line));
|
71
|
+
}
|
72
|
+
catch (e) {
|
73
|
+
// Skip malformed lines
|
74
|
+
}
|
75
|
+
}
|
76
|
+
}
|
77
|
+
}
|
78
|
+
return {
|
79
|
+
content: [
|
80
|
+
{
|
81
|
+
type: "text",
|
82
|
+
text: JSON.stringify({
|
83
|
+
message: `Exported ${allData.length} training data records from last ${days} days`,
|
84
|
+
format: format,
|
85
|
+
data_count: allData.length,
|
86
|
+
data: format === 'jsonl' ? allData : allData.map(item => ({
|
87
|
+
timestamp: item.timestamp,
|
88
|
+
tool: item.tool_name,
|
89
|
+
user_input: JSON.stringify(item.user_input),
|
90
|
+
parsed_output: JSON.stringify(item.parsed_output),
|
91
|
+
success: item.success_metrics?.operation_success,
|
92
|
+
confidence: item.success_metrics?.confidence_score
|
93
|
+
}))
|
94
|
+
}, null, 2)
|
95
|
+
}
|
96
|
+
],
|
97
|
+
isError: false
|
98
|
+
};
|
99
|
+
}
|
100
|
+
catch (error) {
|
101
|
+
return {
|
102
|
+
content: [
|
103
|
+
{
|
104
|
+
type: "text",
|
105
|
+
text: JSON.stringify({
|
106
|
+
error: "Failed to export training data",
|
107
|
+
message: error.message
|
108
|
+
}, null, 2)
|
109
|
+
}
|
110
|
+
],
|
111
|
+
isError: true
|
112
|
+
};
|
113
|
+
}
|
114
|
+
}
|
115
|
+
exports.handleExportTrainingData = handleExportTrainingData;
|
@@ -0,0 +1,123 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.handleGetCacheStatus = exports.getCacheStatusTool = void 0;
|
4
|
+
const capability_cache_js_1 = require("../lib/capability-cache.js");
|
5
|
+
exports.getCacheStatusTool = {
|
6
|
+
name: "get_cache_status",
|
7
|
+
description: "Show freshness and status of cached capability data for transparency and debugging",
|
8
|
+
inputSchema: {
|
9
|
+
type: "object",
|
10
|
+
properties: {
|
11
|
+
field: {
|
12
|
+
type: "string",
|
13
|
+
enum: ["zones", "clusters", "service_plans", "instance_types", "virtual_images", "resource_pools", "networks", "groups"],
|
14
|
+
description: "Specific field to check (optional, shows all cached fields if omitted)"
|
15
|
+
},
|
16
|
+
include_statistics: {
|
17
|
+
type: "boolean",
|
18
|
+
description: "Include cache hit/miss statistics (default: false)",
|
19
|
+
default: false
|
20
|
+
}
|
21
|
+
},
|
22
|
+
required: []
|
23
|
+
}
|
24
|
+
};
|
25
|
+
async function handleGetCacheStatus(args) {
|
26
|
+
try {
|
27
|
+
const { field, include_statistics = false } = args;
|
28
|
+
// Get cache status for specific field or all fields
|
29
|
+
const cacheStatuses = capability_cache_js_1.capabilityCache.getCacheStatus(field);
|
30
|
+
// Calculate summary statistics
|
31
|
+
const totalFields = cacheStatuses.length;
|
32
|
+
const freshFields = cacheStatuses.filter(s => s.is_fresh).length;
|
33
|
+
const staleFields = totalFields - freshFields;
|
34
|
+
// Build response
|
35
|
+
const response = {
|
36
|
+
cache_summary: {
|
37
|
+
total_cached_fields: totalFields,
|
38
|
+
fresh_fields: freshFields,
|
39
|
+
stale_fields: staleFields,
|
40
|
+
freshness_rate: totalFields > 0 ? Math.round((freshFields / totalFields) * 100) : 0
|
41
|
+
},
|
42
|
+
field_details: cacheStatuses.map(status => ({
|
43
|
+
field_name: status.field_name,
|
44
|
+
status: status.is_fresh ? "fresh" : "stale",
|
45
|
+
cached_at: new Date(status.cached_at * 1000).toISOString(),
|
46
|
+
age_seconds: status.age_seconds,
|
47
|
+
age_human: formatDuration(status.age_seconds),
|
48
|
+
ttl_seconds: status.ttl_seconds,
|
49
|
+
expires_in_seconds: status.expires_in_seconds,
|
50
|
+
expires_in_human: status.is_fresh ? formatDuration(status.expires_in_seconds) : "expired",
|
51
|
+
endpoint: status.endpoint
|
52
|
+
})),
|
53
|
+
recommendations: generateRecommendations(cacheStatuses)
|
54
|
+
};
|
55
|
+
// Include statistics if requested
|
56
|
+
if (include_statistics) {
|
57
|
+
const stats = capability_cache_js_1.capabilityCache.getStatistics();
|
58
|
+
response.statistics = {
|
59
|
+
cache_hits: stats.hits,
|
60
|
+
cache_misses: stats.misses,
|
61
|
+
hit_rate_percentage: Math.round(stats.hitRate * 100),
|
62
|
+
note: "Statistics tracking not yet implemented - placeholder data"
|
63
|
+
};
|
64
|
+
}
|
65
|
+
return {
|
66
|
+
content: [
|
67
|
+
{
|
68
|
+
type: "text",
|
69
|
+
text: JSON.stringify(response, null, 2)
|
70
|
+
}
|
71
|
+
],
|
72
|
+
isError: false
|
73
|
+
};
|
74
|
+
}
|
75
|
+
catch (error) {
|
76
|
+
return {
|
77
|
+
content: [
|
78
|
+
{
|
79
|
+
type: "text",
|
80
|
+
text: JSON.stringify({
|
81
|
+
error: "Cache status check failed",
|
82
|
+
message: error.message
|
83
|
+
}, null, 2)
|
84
|
+
}
|
85
|
+
],
|
86
|
+
isError: true
|
87
|
+
};
|
88
|
+
}
|
89
|
+
}
|
90
|
+
exports.handleGetCacheStatus = handleGetCacheStatus;
|
91
|
+
// Helper function to format duration in human-readable format
|
92
|
+
function formatDuration(seconds) {
|
93
|
+
if (seconds < 60) {
|
94
|
+
return `${seconds}s`;
|
95
|
+
}
|
96
|
+
else if (seconds < 3600) {
|
97
|
+
return `${Math.floor(seconds / 60)}m ${seconds % 60}s`;
|
98
|
+
}
|
99
|
+
else {
|
100
|
+
const hours = Math.floor(seconds / 3600);
|
101
|
+
const minutes = Math.floor((seconds % 3600) / 60);
|
102
|
+
return `${hours}h ${minutes}m`;
|
103
|
+
}
|
104
|
+
}
|
105
|
+
// Generate recommendations based on cache status
|
106
|
+
function generateRecommendations(statuses) {
|
107
|
+
const recommendations = [];
|
108
|
+
const staleFields = statuses.filter(s => !s.is_fresh);
|
109
|
+
const oldFields = statuses.filter(s => s.age_seconds > 3600); // > 1 hour
|
110
|
+
if (staleFields.length > 0) {
|
111
|
+
recommendations.push(`${staleFields.length} field(s) have stale data: ${staleFields.map(s => s.field_name).join(", ")}`);
|
112
|
+
}
|
113
|
+
if (oldFields.length > 0) {
|
114
|
+
recommendations.push(`Consider refreshing old data for: ${oldFields.map(s => s.field_name).join(", ")}`);
|
115
|
+
}
|
116
|
+
if (statuses.length === 0) {
|
117
|
+
recommendations.push("No cached data found. Run discover_capabilities to populate cache.");
|
118
|
+
}
|
119
|
+
if (recommendations.length === 0) {
|
120
|
+
recommendations.push("Cache is healthy - all data is fresh and within TTL limits.");
|
121
|
+
}
|
122
|
+
return recommendations;
|
123
|
+
}
|