ultravisor-beacon-capability 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +106 -0
- package/docs/.nojekyll +0 -0
- package/docs/README.md +103 -0
- package/docs/_brand.json +18 -0
- package/docs/_cover.md +13 -0
- package/docs/_sidebar.md +31 -0
- package/docs/_topbar.md +5 -0
- package/docs/_version.json +7 -0
- package/docs/api/README.md +44 -0
- package/docs/api/action-convention.md +148 -0
- package/docs/api/add-action.md +68 -0
- package/docs/api/beacon-capability.md +89 -0
- package/docs/api/build-action-map.md +88 -0
- package/docs/api/connect.md +81 -0
- package/docs/api/disconnect.md +50 -0
- package/docs/api/is-connected.md +33 -0
- package/docs/api/lifecycle-hooks.md +115 -0
- package/docs/architecture.md +237 -0
- package/docs/css/docuserve.css +327 -0
- package/docs/examples/README.md +58 -0
- package/docs/examples/certificate-expiry-monitor.md +212 -0
- package/docs/examples/docker-container-management.md +265 -0
- package/docs/examples/log-archive-and-upload.md +214 -0
- package/docs/examples/log-file-cleanup.md +199 -0
- package/docs/examples/mysql-maintenance.md +253 -0
- package/docs/examples/postgresql-aggregation.md +247 -0
- package/docs/examples/rest-api-health-check.md +213 -0
- package/docs/examples/rest-endpoint-sync.md +240 -0
- package/docs/examples/server-metrics-collection.md +199 -0
- package/docs/examples/shell-commands.md +176 -0
- package/docs/index.html +39 -0
- package/docs/quickstart.md +199 -0
- package/docs/retold-catalog.json +85 -0
- package/docs/retold-keyword-index.json +10642 -0
- package/package.json +33 -0
- package/source/Ultravisor-Beacon-Capability-ActionMap.cjs +132 -0
- package/source/Ultravisor-Beacon-Capability.cjs +276 -0
- package/test/Ultravisor-Beacon-Capability_tests.js +744 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
# Example: Docker Container Management
|
|
2
|
+
|
|
3
|
+
A capability for listing, restarting, and pruning Docker containers and images. Wraps Docker CLI commands as beacon actions for remote orchestration through Ultravisor.
|
|
4
|
+
|
|
5
|
+
## Full Source
|
|
6
|
+
|
|
7
|
+
```javascript
|
|
8
|
+
const libFable = require('fable');
|
|
9
|
+
const libBeaconCapability = require('ultravisor-beacon-capability');
|
|
10
|
+
const libChildProcess = require('child_process');
|
|
11
|
+
|
|
12
|
+
class DockerManagement extends libBeaconCapability
|
|
13
|
+
{
|
|
14
|
+
constructor(pFable, pOptions, pServiceHash)
|
|
15
|
+
{
|
|
16
|
+
super(pFable, pOptions, pServiceHash);
|
|
17
|
+
this.serviceType = 'DockerManagement';
|
|
18
|
+
this.capabilityName = 'DockerManagement';
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Internal: run a docker command and return the output.
|
|
23
|
+
*/
|
|
24
|
+
_exec(pCmd, pTimeout, fCallback)
|
|
25
|
+
{
|
|
26
|
+
libChildProcess.exec(pCmd, { timeout: pTimeout || 60000, maxBuffer: 5 * 1024 * 1024 }, (pError, pStdOut, pStdErr) =>
|
|
27
|
+
{
|
|
28
|
+
if (pError)
|
|
29
|
+
{
|
|
30
|
+
return fCallback(new Error(`${pCmd}: ${pStdErr || pError.message}`));
|
|
31
|
+
}
|
|
32
|
+
return fCallback(null, pStdOut.trim());
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// --- Action: ListContainers ---
|
|
37
|
+
|
|
38
|
+
get actionListContainers_Description()
|
|
39
|
+
{
|
|
40
|
+
return 'List Docker containers with status, ports, and resource usage';
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
get actionListContainers_Schema()
|
|
44
|
+
{
|
|
45
|
+
return [
|
|
46
|
+
{ Name: 'All', DataType: 'Boolean', Required: false, Default: false, Description: 'Include stopped containers' },
|
|
47
|
+
{ Name: 'Filter', DataType: 'String', Required: false, Description: 'Docker filter expression (e.g. "status=running")' }
|
|
48
|
+
];
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
actionListContainers(pSettings, pWorkItem, fCallback)
|
|
52
|
+
{
|
|
53
|
+
let tmpFlags = pSettings.All ? '-a' : '';
|
|
54
|
+
let tmpFilter = pSettings.Filter ? `--filter "${pSettings.Filter}"` : '';
|
|
55
|
+
let tmpFormat = '{{.ID}}\\t{{.Names}}\\t{{.Image}}\\t{{.Status}}\\t{{.Ports}}\\t{{.Size}}';
|
|
56
|
+
|
|
57
|
+
let tmpCmd = `docker ps ${tmpFlags} ${tmpFilter} --format '${tmpFormat}' --size`;
|
|
58
|
+
|
|
59
|
+
this._exec(tmpCmd, 30000, (pError, pOutput) =>
|
|
60
|
+
{
|
|
61
|
+
if (pError) return fCallback(pError);
|
|
62
|
+
|
|
63
|
+
let tmpContainers = pOutput.split('\n').filter(Boolean).map((pLine) =>
|
|
64
|
+
{
|
|
65
|
+
let tmpParts = pLine.split('\t');
|
|
66
|
+
return {
|
|
67
|
+
ID: tmpParts[0],
|
|
68
|
+
Name: tmpParts[1],
|
|
69
|
+
Image: tmpParts[2],
|
|
70
|
+
Status: tmpParts[3],
|
|
71
|
+
Ports: tmpParts[4],
|
|
72
|
+
Size: tmpParts[5]
|
|
73
|
+
};
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
return fCallback(null, {
|
|
77
|
+
Outputs: { Containers: tmpContainers, Count: tmpContainers.length },
|
|
78
|
+
Log: [`Found ${tmpContainers.length} containers`]
|
|
79
|
+
});
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// --- Action: RestartContainer ---
|
|
84
|
+
|
|
85
|
+
get actionRestartContainer_Description()
|
|
86
|
+
{
|
|
87
|
+
return 'Restart a Docker container by name or ID';
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
get actionRestartContainer_Schema()
|
|
91
|
+
{
|
|
92
|
+
return [
|
|
93
|
+
{ Name: 'Container', DataType: 'String', Required: true, Description: 'Container name or ID' },
|
|
94
|
+
{ Name: 'TimeoutSeconds', DataType: 'Integer', Required: false, Default: 10 }
|
|
95
|
+
];
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
actionRestartContainer(pSettings, pWorkItem, fCallback)
|
|
99
|
+
{
|
|
100
|
+
let tmpTimeout = parseInt(pSettings.TimeoutSeconds, 10) || 10;
|
|
101
|
+
let tmpCmd = `docker restart --time ${tmpTimeout} ${pSettings.Container}`;
|
|
102
|
+
|
|
103
|
+
this.log.info(`Restarting container: ${pSettings.Container}`);
|
|
104
|
+
|
|
105
|
+
this._exec(tmpCmd, 120000, (pError, pOutput) =>
|
|
106
|
+
{
|
|
107
|
+
if (pError) return fCallback(pError);
|
|
108
|
+
|
|
109
|
+
// Verify it's running
|
|
110
|
+
this._exec(`docker inspect --format '{{.State.Status}}' ${pSettings.Container}`, 10000, (pInspectError, pStatus) =>
|
|
111
|
+
{
|
|
112
|
+
return fCallback(null, {
|
|
113
|
+
Outputs: {
|
|
114
|
+
Container: pSettings.Container,
|
|
115
|
+
Status: pStatus || 'unknown',
|
|
116
|
+
Restarted: true
|
|
117
|
+
},
|
|
118
|
+
Log: [`Restarted ${pSettings.Container}, status: ${pStatus || 'unknown'}`]
|
|
119
|
+
});
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// --- Action: PruneSystem ---
|
|
125
|
+
|
|
126
|
+
get actionPruneSystem_Description()
|
|
127
|
+
{
|
|
128
|
+
return 'Remove unused containers, images, networks, and volumes';
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
get actionPruneSystem_Schema()
|
|
132
|
+
{
|
|
133
|
+
return [
|
|
134
|
+
{ Name: 'Volumes', DataType: 'Boolean', Required: false, Default: false, Description: 'Also prune unused volumes' },
|
|
135
|
+
{ Name: 'OlderThanHours', DataType: 'Integer', Required: false, Description: 'Only prune resources older than N hours' }
|
|
136
|
+
];
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
actionPruneSystem(pSettings, pWorkItem, fCallback, fReportProgress)
|
|
140
|
+
{
|
|
141
|
+
let tmpFilter = pSettings.OlderThanHours ? `--filter "until=${pSettings.OlderThanHours}h"` : '';
|
|
142
|
+
let tmpVolumes = pSettings.Volumes ? '--volumes' : '';
|
|
143
|
+
let tmpResults = {};
|
|
144
|
+
|
|
145
|
+
fReportProgress({ Percent: 10, Message: 'Pruning containers...' });
|
|
146
|
+
|
|
147
|
+
// Prune stopped containers
|
|
148
|
+
this._exec(`docker container prune -f ${tmpFilter}`, 60000, (pContainerError, pContainerOut) =>
|
|
149
|
+
{
|
|
150
|
+
tmpResults.ContainerPrune = pContainerOut || (pContainerError ? pContainerError.message : 'done');
|
|
151
|
+
|
|
152
|
+
fReportProgress({ Percent: 30, Message: 'Pruning images...' });
|
|
153
|
+
|
|
154
|
+
// Prune dangling images
|
|
155
|
+
this._exec(`docker image prune -f ${tmpFilter}`, 120000, (pImageError, pImageOut) =>
|
|
156
|
+
{
|
|
157
|
+
tmpResults.ImagePrune = pImageOut || (pImageError ? pImageError.message : 'done');
|
|
158
|
+
|
|
159
|
+
fReportProgress({ Percent: 60, Message: 'Pruning networks...' });
|
|
160
|
+
|
|
161
|
+
// Prune networks
|
|
162
|
+
this._exec(`docker network prune -f ${tmpFilter}`, 30000, (pNetError, pNetOut) =>
|
|
163
|
+
{
|
|
164
|
+
tmpResults.NetworkPrune = pNetOut || (pNetError ? pNetError.message : 'done');
|
|
165
|
+
|
|
166
|
+
if (pSettings.Volumes)
|
|
167
|
+
{
|
|
168
|
+
fReportProgress({ Percent: 80, Message: 'Pruning volumes...' });
|
|
169
|
+
|
|
170
|
+
this._exec('docker volume prune -f', 60000, (pVolError, pVolOut) =>
|
|
171
|
+
{
|
|
172
|
+
tmpResults.VolumePrune = pVolOut || (pVolError ? pVolError.message : 'done');
|
|
173
|
+
|
|
174
|
+
return fCallback(null, {
|
|
175
|
+
Outputs: tmpResults,
|
|
176
|
+
Log: ['Docker system prune complete (including volumes)']
|
|
177
|
+
});
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
else
|
|
181
|
+
{
|
|
182
|
+
return fCallback(null, {
|
|
183
|
+
Outputs: tmpResults,
|
|
184
|
+
Log: ['Docker system prune complete (volumes skipped)']
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
});
|
|
188
|
+
});
|
|
189
|
+
});
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
// --- Action: ContainerLogs ---
|
|
193
|
+
|
|
194
|
+
get actionContainerLogs_Description()
|
|
195
|
+
{
|
|
196
|
+
return 'Retrieve recent logs from a Docker container';
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
get actionContainerLogs_Schema()
|
|
200
|
+
{
|
|
201
|
+
return [
|
|
202
|
+
{ Name: 'Container', DataType: 'String', Required: true },
|
|
203
|
+
{ Name: 'TailLines', DataType: 'Integer', Required: false, Default: 100 },
|
|
204
|
+
{ Name: 'Since', DataType: 'String', Required: false, Description: 'Show logs since timestamp or relative (e.g. "1h", "2024-01-01")' }
|
|
205
|
+
];
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
actionContainerLogs(pSettings, pWorkItem, fCallback)
|
|
209
|
+
{
|
|
210
|
+
let tmpTail = parseInt(pSettings.TailLines, 10) || 100;
|
|
211
|
+
let tmpSince = pSettings.Since ? `--since ${pSettings.Since}` : '';
|
|
212
|
+
|
|
213
|
+
let tmpCmd = `docker logs --tail ${tmpTail} ${tmpSince} ${pSettings.Container} 2>&1`;
|
|
214
|
+
|
|
215
|
+
this._exec(tmpCmd, 30000, (pError, pOutput) =>
|
|
216
|
+
{
|
|
217
|
+
if (pError) return fCallback(pError);
|
|
218
|
+
return fCallback(null, {
|
|
219
|
+
Outputs: {
|
|
220
|
+
Container: pSettings.Container,
|
|
221
|
+
Logs: pOutput,
|
|
222
|
+
LineCount: pOutput.split('\n').length
|
|
223
|
+
},
|
|
224
|
+
Log: [`Retrieved ${tmpTail} tail lines from ${pSettings.Container}`]
|
|
225
|
+
});
|
|
226
|
+
});
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// --- Startup ---
|
|
231
|
+
|
|
232
|
+
let tmpFable = new libFable({ Product: 'DockerManagement', ProductVersion: '1.0.0' });
|
|
233
|
+
tmpFable.addServiceType('DockerManagement', DockerManagement);
|
|
234
|
+
let tmpCap = tmpFable.instantiateServiceProvider('DockerManagement');
|
|
235
|
+
|
|
236
|
+
tmpCap.connect(
|
|
237
|
+
{
|
|
238
|
+
ServerURL: process.env.ULTRAVISOR_URL || 'http://localhost:54321',
|
|
239
|
+
Name: `docker-mgmt-${require('os').hostname()}`,
|
|
240
|
+
MaxConcurrent: 2
|
|
241
|
+
},
|
|
242
|
+
(pError) =>
|
|
243
|
+
{
|
|
244
|
+
if (pError) throw pError;
|
|
245
|
+
console.log('Docker management beacon online');
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
process.on('SIGTERM', () => { tmpCap.disconnect(() => process.exit(0)); });
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
## Registered Task Types
|
|
252
|
+
|
|
253
|
+
- `beacon-dockermanagement-listcontainers`
|
|
254
|
+
- `beacon-dockermanagement-restartcontainer`
|
|
255
|
+
- `beacon-dockermanagement-prunesystem`
|
|
256
|
+
- `beacon-dockermanagement-containerlogs`
|
|
257
|
+
|
|
258
|
+
## Key Points
|
|
259
|
+
|
|
260
|
+
- **Wraps Docker CLI** -- requires `docker` to be available on the host
|
|
261
|
+
- **RestartContainer** verifies the container is running after restart
|
|
262
|
+
- **PruneSystem** is sequential (containers -> images -> networks -> volumes) with progress reporting
|
|
263
|
+
- **OlderThanHours** filter prevents pruning of recently used resources
|
|
264
|
+
- **ContainerLogs** captures both stdout and stderr via `2>&1`
|
|
265
|
+
- Useful for managing Docker hosts where you want centralized visibility and control through Ultravisor
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
# Example: Log Archive and Upload
|
|
2
|
+
|
|
3
|
+
A capability that compresses log directories into tar.gz archives and optionally uploads them to S3. Useful for archiving logs before cleanup or for centralized log storage.
|
|
4
|
+
|
|
5
|
+
## Full Source
|
|
6
|
+
|
|
7
|
+
```javascript
|
|
8
|
+
const libFable = require('fable');
|
|
9
|
+
const libBeaconCapability = require('ultravisor-beacon-capability');
|
|
10
|
+
const libChildProcess = require('child_process');
|
|
11
|
+
const libPath = require('path');
|
|
12
|
+
const libFS = require('fs');
|
|
13
|
+
|
|
14
|
+
class LogArchive extends libBeaconCapability
|
|
15
|
+
{
|
|
16
|
+
constructor(pFable, pOptions, pServiceHash)
|
|
17
|
+
{
|
|
18
|
+
super(pFable, pOptions, pServiceHash);
|
|
19
|
+
this.serviceType = 'LogArchive';
|
|
20
|
+
this.capabilityName = 'LogArchive';
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
// --- Action: CompressLogs ---
|
|
24
|
+
|
|
25
|
+
get actionCompressLogs_Description()
|
|
26
|
+
{
|
|
27
|
+
return 'Compress a log directory into a timestamped tar.gz archive';
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
get actionCompressLogs_Schema()
|
|
31
|
+
{
|
|
32
|
+
return [
|
|
33
|
+
{ Name: 'SourceDirectory', DataType: 'String', Required: true },
|
|
34
|
+
{ Name: 'OutputDirectory', DataType: 'String', Required: true },
|
|
35
|
+
{ Name: 'ArchivePrefix', DataType: 'String', Required: false, Default: 'logs' },
|
|
36
|
+
{ Name: 'Pattern', DataType: 'String', Required: false, Default: '*.log', Description: 'File pattern to include' },
|
|
37
|
+
{ Name: 'OlderThanDays', DataType: 'Integer', Required: false, Description: 'Only archive files older than N days' },
|
|
38
|
+
{ Name: 'DeleteOriginals', DataType: 'Boolean', Required: false, Default: false }
|
|
39
|
+
];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
actionCompressLogs(pSettings, pWorkItem, fCallback, fReportProgress)
|
|
43
|
+
{
|
|
44
|
+
let tmpTimestamp = new Date().toISOString().replace(/[:.]/g, '-').substring(0, 19);
|
|
45
|
+
let tmpArchiveName = `${pSettings.ArchivePrefix || 'logs'}-${tmpTimestamp}.tar.gz`;
|
|
46
|
+
let tmpOutputPath = libPath.join(pSettings.OutputDirectory, tmpArchiveName);
|
|
47
|
+
|
|
48
|
+
// Build the find command to collect files
|
|
49
|
+
let tmpFindCmd = `find ${pSettings.SourceDirectory} -name '${pSettings.Pattern || '*.log'}' -type f`;
|
|
50
|
+
if (pSettings.OlderThanDays)
|
|
51
|
+
{
|
|
52
|
+
tmpFindCmd += ` -mtime +${pSettings.OlderThanDays}`;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
fReportProgress({ Percent: 10, Message: 'Scanning for files to archive...' });
|
|
56
|
+
|
|
57
|
+
// Count files first
|
|
58
|
+
libChildProcess.exec(`${tmpFindCmd} | wc -l`, (pCountError, pCountOut) =>
|
|
59
|
+
{
|
|
60
|
+
if (pCountError) return fCallback(pCountError);
|
|
61
|
+
|
|
62
|
+
let tmpFileCount = parseInt(pCountOut.trim(), 10);
|
|
63
|
+
if (tmpFileCount === 0)
|
|
64
|
+
{
|
|
65
|
+
return fCallback(null, {
|
|
66
|
+
Outputs: { FileCount: 0, ArchivePath: null },
|
|
67
|
+
Log: ['No files matched the criteria']
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
fReportProgress({ Percent: 20, Message: `Found ${tmpFileCount} files, compressing...` });
|
|
72
|
+
|
|
73
|
+
// Create the archive
|
|
74
|
+
let tmpTarCmd = `${tmpFindCmd} | tar -czf ${tmpOutputPath} -T -`;
|
|
75
|
+
|
|
76
|
+
libChildProcess.exec(tmpTarCmd, { timeout: 600000 }, (pTarError) =>
|
|
77
|
+
{
|
|
78
|
+
if (pTarError) return fCallback(pTarError);
|
|
79
|
+
|
|
80
|
+
fReportProgress({ Percent: 70, Message: 'Archive created, checking size...' });
|
|
81
|
+
|
|
82
|
+
// Get archive size
|
|
83
|
+
libFS.stat(tmpOutputPath, (pStatError, pStats) =>
|
|
84
|
+
{
|
|
85
|
+
if (pStatError) return fCallback(pStatError);
|
|
86
|
+
|
|
87
|
+
let tmpSizeMB = (pStats.size / (1024 * 1024)).toFixed(2);
|
|
88
|
+
|
|
89
|
+
if (pSettings.DeleteOriginals)
|
|
90
|
+
{
|
|
91
|
+
fReportProgress({ Percent: 80, Message: 'Deleting original files...' });
|
|
92
|
+
libChildProcess.exec(`${tmpFindCmd} -delete`, (pDelError) =>
|
|
93
|
+
{
|
|
94
|
+
if (pDelError) this.log.warn(`Delete error: ${pDelError.message}`);
|
|
95
|
+
|
|
96
|
+
return fCallback(null, {
|
|
97
|
+
Outputs: {
|
|
98
|
+
ArchivePath: tmpOutputPath,
|
|
99
|
+
ArchiveName: tmpArchiveName,
|
|
100
|
+
ArchiveSizeMB: parseFloat(tmpSizeMB),
|
|
101
|
+
FileCount: tmpFileCount,
|
|
102
|
+
OriginalsDeleted: true
|
|
103
|
+
},
|
|
104
|
+
Log: [`Archived ${tmpFileCount} files to ${tmpArchiveName} (${tmpSizeMB}MB), originals deleted`]
|
|
105
|
+
});
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
else
|
|
109
|
+
{
|
|
110
|
+
return fCallback(null, {
|
|
111
|
+
Outputs: {
|
|
112
|
+
ArchivePath: tmpOutputPath,
|
|
113
|
+
ArchiveName: tmpArchiveName,
|
|
114
|
+
ArchiveSizeMB: parseFloat(tmpSizeMB),
|
|
115
|
+
FileCount: tmpFileCount,
|
|
116
|
+
OriginalsDeleted: false
|
|
117
|
+
},
|
|
118
|
+
Log: [`Archived ${tmpFileCount} files to ${tmpArchiveName} (${tmpSizeMB}MB)`]
|
|
119
|
+
});
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// --- Action: UploadToS3 ---
|
|
127
|
+
|
|
128
|
+
get actionUploadToS3_Description()
|
|
129
|
+
{
|
|
130
|
+
return 'Upload a file to an S3 bucket using the AWS CLI';
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
get actionUploadToS3_Schema()
|
|
134
|
+
{
|
|
135
|
+
return [
|
|
136
|
+
{ Name: 'FilePath', DataType: 'String', Required: true },
|
|
137
|
+
{ Name: 'Bucket', DataType: 'String', Required: true },
|
|
138
|
+
{ Name: 'Prefix', DataType: 'String', Required: false, Default: 'logs/' },
|
|
139
|
+
{ Name: 'StorageClass', DataType: 'String', Required: false, Default: 'STANDARD_IA' },
|
|
140
|
+
{ Name: 'DeleteAfterUpload', DataType: 'Boolean', Required: false, Default: false }
|
|
141
|
+
];
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
actionUploadToS3(pSettings, pWorkItem, fCallback, fReportProgress)
|
|
145
|
+
{
|
|
146
|
+
let tmpFileName = libPath.basename(pSettings.FilePath);
|
|
147
|
+
let tmpS3Key = `${pSettings.Prefix || 'logs/'}${tmpFileName}`;
|
|
148
|
+
let tmpS3URI = `s3://${pSettings.Bucket}/${tmpS3Key}`;
|
|
149
|
+
|
|
150
|
+
fReportProgress({ Percent: 10, Message: `Uploading to ${tmpS3URI}...` });
|
|
151
|
+
|
|
152
|
+
let tmpCmd = `aws s3 cp ${pSettings.FilePath} ${tmpS3URI} --storage-class ${pSettings.StorageClass || 'STANDARD_IA'}`;
|
|
153
|
+
|
|
154
|
+
libChildProcess.exec(tmpCmd, { timeout: 600000 }, (pUploadError, pStdOut) =>
|
|
155
|
+
{
|
|
156
|
+
if (pUploadError) return fCallback(pUploadError);
|
|
157
|
+
|
|
158
|
+
fReportProgress({ Percent: 90, Message: 'Upload complete' });
|
|
159
|
+
|
|
160
|
+
if (pSettings.DeleteAfterUpload)
|
|
161
|
+
{
|
|
162
|
+
libFS.unlink(pSettings.FilePath, (pUnlinkError) =>
|
|
163
|
+
{
|
|
164
|
+
if (pUnlinkError) this.log.warn(`Could not delete ${pSettings.FilePath}: ${pUnlinkError.message}`);
|
|
165
|
+
|
|
166
|
+
return fCallback(null, {
|
|
167
|
+
Outputs: { S3URI: tmpS3URI, LocalDeleted: !pUnlinkError },
|
|
168
|
+
Log: [`Uploaded ${tmpFileName} to ${tmpS3URI}, local file deleted`]
|
|
169
|
+
});
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
else
|
|
173
|
+
{
|
|
174
|
+
return fCallback(null, {
|
|
175
|
+
Outputs: { S3URI: tmpS3URI, LocalDeleted: false },
|
|
176
|
+
Log: [`Uploaded ${tmpFileName} to ${tmpS3URI}`]
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// --- Startup ---
|
|
184
|
+
|
|
185
|
+
let tmpFable = new libFable({ Product: 'LogArchive', ProductVersion: '1.0.0' });
|
|
186
|
+
tmpFable.addServiceType('LogArchive', LogArchive);
|
|
187
|
+
let tmpCap = tmpFable.instantiateServiceProvider('LogArchive');
|
|
188
|
+
|
|
189
|
+
tmpCap.connect(
|
|
190
|
+
{
|
|
191
|
+
ServerURL: process.env.ULTRAVISOR_URL || 'http://localhost:54321',
|
|
192
|
+
Name: `log-archive-${require('os').hostname()}`
|
|
193
|
+
},
|
|
194
|
+
(pError) =>
|
|
195
|
+
{
|
|
196
|
+
if (pError) throw pError;
|
|
197
|
+
console.log('Log archive beacon online');
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
process.on('SIGTERM', () => { tmpCap.disconnect(() => process.exit(0)); });
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
## Registered Task Types
|
|
204
|
+
|
|
205
|
+
- `beacon-logarchive-compresslogs`
|
|
206
|
+
- `beacon-logarchive-uploadtos3`
|
|
207
|
+
|
|
208
|
+
## Key Points
|
|
209
|
+
|
|
210
|
+
- **CompressLogs** and **UploadToS3** can be composed into an operation graph: compress first, then upload
|
|
211
|
+
- **Timestamped archive names** prevent collisions
|
|
212
|
+
- **OlderThanDays** filter lets you archive only stale logs while leaving recent ones in place
|
|
213
|
+
- **StorageClass** defaults to `STANDARD_IA` for cost-effective long-term storage
|
|
214
|
+
- The S3 upload uses the AWS CLI, so credentials must be configured on the host (environment variables or IAM role)
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
# Example: Log File Cleanup
|
|
2
|
+
|
|
3
|
+
A capability for finding and deleting old log files by age and size. Replaces the cron-job-plus-bash-script pattern with observable, auditable cleanup operations.
|
|
4
|
+
|
|
5
|
+
## Full Source
|
|
6
|
+
|
|
7
|
+
```javascript
|
|
8
|
+
const libFable = require('fable');
|
|
9
|
+
const libBeaconCapability = require('ultravisor-beacon-capability');
|
|
10
|
+
const libChildProcess = require('child_process');
|
|
11
|
+
const libFS = require('fs');
|
|
12
|
+
const libPath = require('path');
|
|
13
|
+
|
|
14
|
+
class LogFileCleanup extends libBeaconCapability
|
|
15
|
+
{
|
|
16
|
+
constructor(pFable, pOptions, pServiceHash)
|
|
17
|
+
{
|
|
18
|
+
super(pFable, pOptions, pServiceHash);
|
|
19
|
+
this.serviceType = 'LogFileCleanup';
|
|
20
|
+
this.capabilityName = 'LogFileCleanup';
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
// --- Action: PurgeByAge ---
|
|
24
|
+
|
|
25
|
+
get actionPurgeByAge_Description()
|
|
26
|
+
{
|
|
27
|
+
return 'Delete log files older than a specified number of days';
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
get actionPurgeByAge_Schema()
|
|
31
|
+
{
|
|
32
|
+
return [
|
|
33
|
+
{ Name: 'Directory', DataType: 'String', Required: true },
|
|
34
|
+
{ Name: 'Pattern', DataType: 'String', Required: false, Default: '*.log' },
|
|
35
|
+
{ Name: 'MaxAgeDays', DataType: 'Integer', Required: true },
|
|
36
|
+
{ Name: 'Recursive', DataType: 'Boolean', Required: false, Default: false },
|
|
37
|
+
{ Name: 'DryRun', DataType: 'Boolean', Required: false, Default: true }
|
|
38
|
+
];
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
actionPurgeByAge(pSettings, pWorkItem, fCallback, fReportProgress)
|
|
42
|
+
{
|
|
43
|
+
let tmpDir = pSettings.Directory;
|
|
44
|
+
let tmpPattern = pSettings.Pattern || '*.log';
|
|
45
|
+
let tmpDays = parseInt(pSettings.MaxAgeDays, 10);
|
|
46
|
+
let tmpRecursive = pSettings.Recursive ? '' : '-maxdepth 1';
|
|
47
|
+
let tmpAction = pSettings.DryRun ? '-print' : '-print -delete';
|
|
48
|
+
|
|
49
|
+
let tmpCmd = `find ${tmpDir} ${tmpRecursive} -name '${tmpPattern}' -type f -mtime +${tmpDays} ${tmpAction}`;
|
|
50
|
+
|
|
51
|
+
fReportProgress({ Percent: 10, Message: `Scanning ${tmpDir} for files older than ${tmpDays} days...` });
|
|
52
|
+
|
|
53
|
+
libChildProcess.exec(tmpCmd, { timeout: 120000, maxBuffer: 10 * 1024 * 1024 }, (pError, pStdOut, pStdErr) =>
|
|
54
|
+
{
|
|
55
|
+
if (pError && pError.code !== 0)
|
|
56
|
+
{
|
|
57
|
+
return fCallback(pError);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
let tmpFiles = pStdOut.trim().split('\n').filter((pLine) => pLine.length > 0);
|
|
61
|
+
|
|
62
|
+
return fCallback(null, {
|
|
63
|
+
Outputs: {
|
|
64
|
+
FilesFound: tmpFiles.length,
|
|
65
|
+
Files: tmpFiles.slice(0, 200),
|
|
66
|
+
DryRun: !!pSettings.DryRun,
|
|
67
|
+
Truncated: tmpFiles.length > 200
|
|
68
|
+
},
|
|
69
|
+
Log: [
|
|
70
|
+
pSettings.DryRun
|
|
71
|
+
? `DRY RUN: Found ${tmpFiles.length} files to delete`
|
|
72
|
+
: `Deleted ${tmpFiles.length} files`
|
|
73
|
+
]
|
|
74
|
+
});
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// --- Action: PurgeBySize ---
|
|
79
|
+
|
|
80
|
+
get actionPurgeBySize_Description()
|
|
81
|
+
{
|
|
82
|
+
return 'Delete log files larger than a specified size';
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
get actionPurgeBySize_Schema()
|
|
86
|
+
{
|
|
87
|
+
return [
|
|
88
|
+
{ Name: 'Directory', DataType: 'String', Required: true },
|
|
89
|
+
{ Name: 'Pattern', DataType: 'String', Required: false, Default: '*.log' },
|
|
90
|
+
{ Name: 'MaxSizeMB', DataType: 'Integer', Required: true },
|
|
91
|
+
{ Name: 'DryRun', DataType: 'Boolean', Required: false, Default: true }
|
|
92
|
+
];
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
actionPurgeBySize(pSettings, pWorkItem, fCallback)
|
|
96
|
+
{
|
|
97
|
+
let tmpDir = pSettings.Directory;
|
|
98
|
+
let tmpPattern = pSettings.Pattern || '*.log';
|
|
99
|
+
let tmpSizeMB = parseInt(pSettings.MaxSizeMB, 10);
|
|
100
|
+
let tmpAction = pSettings.DryRun ? '-print' : '-print -delete';
|
|
101
|
+
|
|
102
|
+
let tmpCmd = `find ${tmpDir} -name '${tmpPattern}' -type f -size +${tmpSizeMB}M ${tmpAction}`;
|
|
103
|
+
|
|
104
|
+
libChildProcess.exec(tmpCmd, { timeout: 120000, maxBuffer: 10 * 1024 * 1024 }, (pError, pStdOut) =>
|
|
105
|
+
{
|
|
106
|
+
if (pError && pError.code !== 0) return fCallback(pError);
|
|
107
|
+
|
|
108
|
+
let tmpFiles = pStdOut.trim().split('\n').filter((pLine) => pLine.length > 0);
|
|
109
|
+
|
|
110
|
+
return fCallback(null, {
|
|
111
|
+
Outputs: {
|
|
112
|
+
FilesFound: tmpFiles.length,
|
|
113
|
+
Files: tmpFiles.slice(0, 200),
|
|
114
|
+
DryRun: !!pSettings.DryRun
|
|
115
|
+
},
|
|
116
|
+
Log: [
|
|
117
|
+
pSettings.DryRun
|
|
118
|
+
? `DRY RUN: Found ${tmpFiles.length} files larger than ${tmpSizeMB}MB`
|
|
119
|
+
: `Deleted ${tmpFiles.length} files larger than ${tmpSizeMB}MB`
|
|
120
|
+
]
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// --- Action: ScanUsage ---
|
|
126
|
+
|
|
127
|
+
get actionScanUsage_Description()
|
|
128
|
+
{
|
|
129
|
+
return 'Report total log directory size and file count by extension';
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
get actionScanUsage_Schema()
|
|
133
|
+
{
|
|
134
|
+
return [
|
|
135
|
+
{ Name: 'Directory', DataType: 'String', Required: true }
|
|
136
|
+
];
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
actionScanUsage(pSettings, pWorkItem, fCallback)
|
|
140
|
+
{
|
|
141
|
+
let tmpDir = pSettings.Directory;
|
|
142
|
+
|
|
143
|
+
// Get total size
|
|
144
|
+
let tmpSizeCmd = `du -sh ${tmpDir} 2>/dev/null | cut -f1`;
|
|
145
|
+
// Get file count by extension
|
|
146
|
+
let tmpCountCmd = `find ${tmpDir} -type f | sed 's/.*\\.//' | sort | uniq -c | sort -rn | head -20`;
|
|
147
|
+
|
|
148
|
+
libChildProcess.exec(`${tmpSizeCmd} && echo '---SEPARATOR---' && ${tmpCountCmd}`, { timeout: 60000 }, (pError, pStdOut) =>
|
|
149
|
+
{
|
|
150
|
+
if (pError) return fCallback(pError);
|
|
151
|
+
|
|
152
|
+
let tmpParts = pStdOut.split('---SEPARATOR---');
|
|
153
|
+
let tmpTotalSize = tmpParts[0].trim();
|
|
154
|
+
let tmpExtensions = tmpParts[1] ? tmpParts[1].trim().split('\n').map((pLine) =>
|
|
155
|
+
{
|
|
156
|
+
let tmpMatch = pLine.trim().match(/(\d+)\s+(.*)/);
|
|
157
|
+
return tmpMatch ? { Count: parseInt(tmpMatch[1], 10), Extension: tmpMatch[2] } : null;
|
|
158
|
+
}).filter(Boolean) : [];
|
|
159
|
+
|
|
160
|
+
return fCallback(null, {
|
|
161
|
+
Outputs: { TotalSize: tmpTotalSize, Extensions: tmpExtensions, Directory: tmpDir },
|
|
162
|
+
Log: [`Log directory ${tmpDir}: ${tmpTotalSize} total`]
|
|
163
|
+
});
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
// --- Startup ---
|
|
169
|
+
|
|
170
|
+
let tmpFable = new libFable({ Product: 'LogFileCleanup', ProductVersion: '1.0.0' });
|
|
171
|
+
tmpFable.addServiceType('LogFileCleanup', LogFileCleanup);
|
|
172
|
+
let tmpCap = tmpFable.instantiateServiceProvider('LogFileCleanup');
|
|
173
|
+
|
|
174
|
+
tmpCap.connect(
|
|
175
|
+
{
|
|
176
|
+
ServerURL: process.env.ULTRAVISOR_URL || 'http://localhost:54321',
|
|
177
|
+
Name: `log-cleanup-${require('os').hostname()}`
|
|
178
|
+
},
|
|
179
|
+
(pError) =>
|
|
180
|
+
{
|
|
181
|
+
if (pError) throw pError;
|
|
182
|
+
console.log('Log file cleanup beacon online');
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
process.on('SIGTERM', () => { tmpCap.disconnect(() => process.exit(0)); });
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
## Registered Task Types
|
|
189
|
+
|
|
190
|
+
- `beacon-logfilecleanup-purgebyage`
|
|
191
|
+
- `beacon-logfilecleanup-purgebysize`
|
|
192
|
+
- `beacon-logfilecleanup-scanusage`
|
|
193
|
+
|
|
194
|
+
## Key Points
|
|
195
|
+
|
|
196
|
+
- **DryRun defaults to `true`** -- a safe default that shows what would be deleted without actually deleting
|
|
197
|
+
- **File list is capped** at 200 entries in the output to prevent excessive payload sizes
|
|
198
|
+
- **ScanUsage** provides a quick overview of disk consumption by file type before deciding what to clean
|
|
199
|
+
- Schedule `PurgeByAge` weekly to keep log directories from growing unbounded
|