clawsql 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +97 -0
- package/README.md +372 -0
- package/dist/__tests__/config/settings.test.d.ts +5 -0
- package/dist/__tests__/config/settings.test.d.ts.map +1 -0
- package/dist/__tests__/config/settings.test.js +154 -0
- package/dist/__tests__/config/settings.test.js.map +1 -0
- package/dist/__tests__/core/discovery/topology.test.d.ts +5 -0
- package/dist/__tests__/core/discovery/topology.test.d.ts.map +1 -0
- package/dist/__tests__/core/discovery/topology.test.js +191 -0
- package/dist/__tests__/core/discovery/topology.test.js.map +1 -0
- package/dist/__tests__/core/failover/executor.test.d.ts +5 -0
- package/dist/__tests__/core/failover/executor.test.d.ts.map +1 -0
- package/dist/__tests__/core/failover/executor.test.js +256 -0
- package/dist/__tests__/core/failover/executor.test.js.map +1 -0
- package/dist/__tests__/core/monitoring/collector.test.d.ts +5 -0
- package/dist/__tests__/core/monitoring/collector.test.d.ts.map +1 -0
- package/dist/__tests__/core/monitoring/collector.test.js +131 -0
- package/dist/__tests__/core/monitoring/collector.test.js.map +1 -0
- package/dist/__tests__/core/monitoring/exporters.test.d.ts +5 -0
- package/dist/__tests__/core/monitoring/exporters.test.d.ts.map +1 -0
- package/dist/__tests__/core/monitoring/exporters.test.js +90 -0
- package/dist/__tests__/core/monitoring/exporters.test.js.map +1 -0
- package/dist/__tests__/core/routing/proxysql-manager.test.d.ts +5 -0
- package/dist/__tests__/core/routing/proxysql-manager.test.d.ts.map +1 -0
- package/dist/__tests__/core/routing/proxysql-manager.test.js +155 -0
- package/dist/__tests__/core/routing/proxysql-manager.test.js.map +1 -0
- package/dist/__tests__/types/index.test.d.ts +5 -0
- package/dist/__tests__/types/index.test.d.ts.map +1 -0
- package/dist/__tests__/types/index.test.js +290 -0
- package/dist/__tests__/types/index.test.js.map +1 -0
- package/dist/__tests__/utils/exceptions.test.d.ts +5 -0
- package/dist/__tests__/utils/exceptions.test.d.ts.map +1 -0
- package/dist/__tests__/utils/exceptions.test.js +142 -0
- package/dist/__tests__/utils/exceptions.test.js.map +1 -0
- package/dist/api/routes/clusters.d.ts +7 -0
- package/dist/api/routes/clusters.d.ts.map +1 -0
- package/dist/api/routes/clusters.js +123 -0
- package/dist/api/routes/clusters.js.map +1 -0
- package/dist/api/routes/config.d.ts +7 -0
- package/dist/api/routes/config.d.ts.map +1 -0
- package/dist/api/routes/config.js +65 -0
- package/dist/api/routes/config.js.map +1 -0
- package/dist/api/routes/failover.d.ts +7 -0
- package/dist/api/routes/failover.d.ts.map +1 -0
- package/dist/api/routes/failover.js +100 -0
- package/dist/api/routes/failover.js.map +1 -0
- package/dist/api/routes/instances.d.ts +11 -0
- package/dist/api/routes/instances.d.ts.map +1 -0
- package/dist/api/routes/instances.js +315 -0
- package/dist/api/routes/instances.js.map +1 -0
- package/dist/api/routes/monitoring.d.ts +7 -0
- package/dist/api/routes/monitoring.d.ts.map +1 -0
- package/dist/api/routes/monitoring.js +72 -0
- package/dist/api/routes/monitoring.js.map +1 -0
- package/dist/api/routes/webhooks.d.ts +12 -0
- package/dist/api/routes/webhooks.d.ts.map +1 -0
- package/dist/api/routes/webhooks.js +232 -0
- package/dist/api/routes/webhooks.js.map +1 -0
- package/dist/api/schemas/index.d.ts +965 -0
- package/dist/api/schemas/index.d.ts.map +1 -0
- package/dist/api/schemas/index.js +171 -0
- package/dist/api/schemas/index.js.map +1 -0
- package/dist/app.d.ts +13 -0
- package/dist/app.d.ts.map +1 -0
- package/dist/app.js +197 -0
- package/dist/app.js.map +1 -0
- package/dist/bin/clawsql.d.ts +12 -0
- package/dist/bin/clawsql.d.ts.map +1 -0
- package/dist/bin/clawsql.js +43 -0
- package/dist/bin/clawsql.js.map +1 -0
- package/dist/cli/agent/handler.d.ts +73 -0
- package/dist/cli/agent/handler.d.ts.map +1 -0
- package/dist/cli/agent/handler.js +258 -0
- package/dist/cli/agent/handler.js.map +1 -0
- package/dist/cli/agent/index.d.ts +14 -0
- package/dist/cli/agent/index.d.ts.map +1 -0
- package/dist/cli/agent/index.js +30 -0
- package/dist/cli/agent/index.js.map +1 -0
- package/dist/cli/agent/openclaw-integration.d.ts +81 -0
- package/dist/cli/agent/openclaw-integration.d.ts.map +1 -0
- package/dist/cli/agent/openclaw-integration.js +341 -0
- package/dist/cli/agent/openclaw-integration.js.map +1 -0
- package/dist/cli/agent/providers/anthropic.d.ts +27 -0
- package/dist/cli/agent/providers/anthropic.d.ts.map +1 -0
- package/dist/cli/agent/providers/anthropic.js +106 -0
- package/dist/cli/agent/providers/anthropic.js.map +1 -0
- package/dist/cli/agent/providers/base.d.ts +91 -0
- package/dist/cli/agent/providers/base.d.ts.map +1 -0
- package/dist/cli/agent/providers/base.js +24 -0
- package/dist/cli/agent/providers/base.js.map +1 -0
- package/dist/cli/agent/providers/openai.d.ts +27 -0
- package/dist/cli/agent/providers/openai.d.ts.map +1 -0
- package/dist/cli/agent/providers/openai.js +98 -0
- package/dist/cli/agent/providers/openai.js.map +1 -0
- package/dist/cli/agent/tools/index.d.ts +32 -0
- package/dist/cli/agent/tools/index.d.ts.map +1 -0
- package/dist/cli/agent/tools/index.js +263 -0
- package/dist/cli/agent/tools/index.js.map +1 -0
- package/dist/cli/commands/cleanup.d.ts +12 -0
- package/dist/cli/commands/cleanup.d.ts.map +1 -0
- package/dist/cli/commands/cleanup.js +205 -0
- package/dist/cli/commands/cleanup.js.map +1 -0
- package/dist/cli/commands/clusters.d.ts +12 -0
- package/dist/cli/commands/clusters.d.ts.map +1 -0
- package/dist/cli/commands/clusters.js +468 -0
- package/dist/cli/commands/clusters.js.map +1 -0
- package/dist/cli/commands/config.d.ts +12 -0
- package/dist/cli/commands/config.d.ts.map +1 -0
- package/dist/cli/commands/config.js +406 -0
- package/dist/cli/commands/config.js.map +1 -0
- package/dist/cli/commands/cron.d.ts +12 -0
- package/dist/cli/commands/cron.d.ts.map +1 -0
- package/dist/cli/commands/cron.js +215 -0
- package/dist/cli/commands/cron.js.map +1 -0
- package/dist/cli/commands/doctor.d.ts +13 -0
- package/dist/cli/commands/doctor.d.ts.map +1 -0
- package/dist/cli/commands/doctor.js +687 -0
- package/dist/cli/commands/doctor.js.map +1 -0
- package/dist/cli/commands/failover.d.ts +16 -0
- package/dist/cli/commands/failover.d.ts.map +1 -0
- package/dist/cli/commands/failover.js +333 -0
- package/dist/cli/commands/failover.js.map +1 -0
- package/dist/cli/commands/health.d.ts +12 -0
- package/dist/cli/commands/health.d.ts.map +1 -0
- package/dist/cli/commands/health.js +125 -0
- package/dist/cli/commands/health.js.map +1 -0
- package/dist/cli/commands/help.d.ts +12 -0
- package/dist/cli/commands/help.d.ts.map +1 -0
- package/dist/cli/commands/help.js +52 -0
- package/dist/cli/commands/help.js.map +1 -0
- package/dist/cli/commands/instances.d.ts +12 -0
- package/dist/cli/commands/instances.d.ts.map +1 -0
- package/dist/cli/commands/instances.js +801 -0
- package/dist/cli/commands/instances.js.map +1 -0
- package/dist/cli/commands/notify.d.ts +12 -0
- package/dist/cli/commands/notify.d.ts.map +1 -0
- package/dist/cli/commands/notify.js +43 -0
- package/dist/cli/commands/notify.js.map +1 -0
- package/dist/cli/commands/sql.d.ts +12 -0
- package/dist/cli/commands/sql.d.ts.map +1 -0
- package/dist/cli/commands/sql.js +90 -0
- package/dist/cli/commands/sql.js.map +1 -0
- package/dist/cli/commands/start.d.ts +12 -0
- package/dist/cli/commands/start.d.ts.map +1 -0
- package/dist/cli/commands/start.js +174 -0
- package/dist/cli/commands/start.js.map +1 -0
- package/dist/cli/commands/status.d.ts +12 -0
- package/dist/cli/commands/status.d.ts.map +1 -0
- package/dist/cli/commands/status.js +218 -0
- package/dist/cli/commands/status.js.map +1 -0
- package/dist/cli/commands/stop.d.ts +12 -0
- package/dist/cli/commands/stop.d.ts.map +1 -0
- package/dist/cli/commands/stop.js +128 -0
- package/dist/cli/commands/stop.js.map +1 -0
- package/dist/cli/commands/topology.d.ts +12 -0
- package/dist/cli/commands/topology.d.ts.map +1 -0
- package/dist/cli/commands/topology.js +106 -0
- package/dist/cli/commands/topology.js.map +1 -0
- package/dist/cli/completer.d.ts +47 -0
- package/dist/cli/completer.d.ts.map +1 -0
- package/dist/cli/completer.js +332 -0
- package/dist/cli/completer.js.map +1 -0
- package/dist/cli/formatter.d.ts +165 -0
- package/dist/cli/formatter.d.ts.map +1 -0
- package/dist/cli/formatter.js +408 -0
- package/dist/cli/formatter.js.map +1 -0
- package/dist/cli/index.d.ts +21 -0
- package/dist/cli/index.d.ts.map +1 -0
- package/dist/cli/index.js +79 -0
- package/dist/cli/index.js.map +1 -0
- package/dist/cli/raw-input.d.ts +97 -0
- package/dist/cli/raw-input.d.ts.map +1 -0
- package/dist/cli/raw-input.js +493 -0
- package/dist/cli/raw-input.js.map +1 -0
- package/dist/cli/registry.d.ts +103 -0
- package/dist/cli/registry.d.ts.map +1 -0
- package/dist/cli/registry.js +205 -0
- package/dist/cli/registry.js.map +1 -0
- package/dist/cli/repl.d.ts +83 -0
- package/dist/cli/repl.d.ts.map +1 -0
- package/dist/cli/repl.js +447 -0
- package/dist/cli/repl.js.map +1 -0
- package/dist/cli/ui/components.d.ts +144 -0
- package/dist/cli/ui/components.d.ts.map +1 -0
- package/dist/cli/ui/components.js +331 -0
- package/dist/cli/ui/components.js.map +1 -0
- package/dist/cli/ui/index.d.ts +7 -0
- package/dist/cli/ui/index.d.ts.map +1 -0
- package/dist/cli/ui/index.js +23 -0
- package/dist/cli/ui/index.js.map +1 -0
- package/dist/cli/utils/docker-files.d.ts +39 -0
- package/dist/cli/utils/docker-files.d.ts.map +1 -0
- package/dist/cli/utils/docker-files.js +223 -0
- package/dist/cli/utils/docker-files.js.map +1 -0
- package/dist/cli/utils/docker-prereq.d.ts +48 -0
- package/dist/cli/utils/docker-prereq.d.ts.map +1 -0
- package/dist/cli/utils/docker-prereq.js +203 -0
- package/dist/cli/utils/docker-prereq.js.map +1 -0
- package/dist/config/settings.d.ts +594 -0
- package/dist/config/settings.d.ts.map +1 -0
- package/dist/config/settings.js +250 -0
- package/dist/config/settings.js.map +1 -0
- package/dist/core/discovery/cluster-view.d.ts +50 -0
- package/dist/core/discovery/cluster-view.d.ts.map +1 -0
- package/dist/core/discovery/cluster-view.js +235 -0
- package/dist/core/discovery/cluster-view.js.map +1 -0
- package/dist/core/discovery/scanner.d.ts +70 -0
- package/dist/core/discovery/scanner.d.ts.map +1 -0
- package/dist/core/discovery/scanner.js +197 -0
- package/dist/core/discovery/scanner.js.map +1 -0
- package/dist/core/discovery/topology.d.ts +118 -0
- package/dist/core/discovery/topology.d.ts.map +1 -0
- package/dist/core/discovery/topology.js +550 -0
- package/dist/core/discovery/topology.js.map +1 -0
- package/dist/core/failover/candidate-selector.d.ts +46 -0
- package/dist/core/failover/candidate-selector.d.ts.map +1 -0
- package/dist/core/failover/candidate-selector.js +70 -0
- package/dist/core/failover/candidate-selector.js.map +1 -0
- package/dist/core/failover/executor.d.ts +104 -0
- package/dist/core/failover/executor.d.ts.map +1 -0
- package/dist/core/failover/executor.js +248 -0
- package/dist/core/failover/executor.js.map +1 -0
- package/dist/core/failover/operation-builder.d.ts +71 -0
- package/dist/core/failover/operation-builder.d.ts.map +1 -0
- package/dist/core/failover/operation-builder.js +157 -0
- package/dist/core/failover/operation-builder.js.map +1 -0
- package/dist/core/failover/operation-runner.d.ts +75 -0
- package/dist/core/failover/operation-runner.d.ts.map +1 -0
- package/dist/core/failover/operation-runner.js +191 -0
- package/dist/core/failover/operation-runner.js.map +1 -0
- package/dist/core/failover/promoter.d.ts +33 -0
- package/dist/core/failover/promoter.d.ts.map +1 -0
- package/dist/core/failover/promoter.js +97 -0
- package/dist/core/failover/promoter.js.map +1 -0
- package/dist/core/failover/recovery-manager.d.ts +47 -0
- package/dist/core/failover/recovery-manager.d.ts.map +1 -0
- package/dist/core/failover/recovery-manager.js +145 -0
- package/dist/core/failover/recovery-manager.js.map +1 -0
- package/dist/core/failover/types.d.ts +54 -0
- package/dist/core/failover/types.d.ts.map +1 -0
- package/dist/core/failover/types.js +8 -0
- package/dist/core/failover/types.js.map +1 -0
- package/dist/core/monitoring/collector.d.ts +25 -0
- package/dist/core/monitoring/collector.d.ts.map +1 -0
- package/dist/core/monitoring/collector.js +115 -0
- package/dist/core/monitoring/collector.js.map +1 -0
- package/dist/core/monitoring/exporters.d.ts +49 -0
- package/dist/core/monitoring/exporters.d.ts.map +1 -0
- package/dist/core/monitoring/exporters.js +126 -0
- package/dist/core/monitoring/exporters.js.map +1 -0
- package/dist/core/routing/proxysql-manager.d.ts +213 -0
- package/dist/core/routing/proxysql-manager.d.ts.map +1 -0
- package/dist/core/routing/proxysql-manager.js +632 -0
- package/dist/core/routing/proxysql-manager.js.map +1 -0
- package/dist/core/sync/replica-recovery.d.ts +40 -0
- package/dist/core/sync/replica-recovery.d.ts.map +1 -0
- package/dist/core/sync/replica-recovery.js +134 -0
- package/dist/core/sync/replica-recovery.js.map +1 -0
- package/dist/core/sync/sync-coordinator.d.ts +83 -0
- package/dist/core/sync/sync-coordinator.d.ts.map +1 -0
- package/dist/core/sync/sync-coordinator.js +254 -0
- package/dist/core/sync/sync-coordinator.js.map +1 -0
- package/dist/core/sync/topology-watcher.d.ts +76 -0
- package/dist/core/sync/topology-watcher.d.ts.map +1 -0
- package/dist/core/sync/topology-watcher.js +222 -0
- package/dist/core/sync/topology-watcher.js.map +1 -0
- package/dist/core/sync/types.d.ts +85 -0
- package/dist/core/sync/types.d.ts.map +1 -0
- package/dist/core/sync/types.js +8 -0
- package/dist/core/sync/types.js.map +1 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +9 -0
- package/dist/index.js.map +1 -0
- package/dist/types/index.d.ts +212 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +153 -0
- package/dist/types/index.js.map +1 -0
- package/dist/utils/database.d.ts +62 -0
- package/dist/utils/database.d.ts.map +1 -0
- package/dist/utils/database.js +257 -0
- package/dist/utils/database.js.map +1 -0
- package/dist/utils/exceptions.d.ts +69 -0
- package/dist/utils/exceptions.d.ts.map +1 -0
- package/dist/utils/exceptions.js +121 -0
- package/dist/utils/exceptions.js.map +1 -0
- package/dist/utils/logger.d.ts +20 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +90 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/mysql-client.d.ts +43 -0
- package/dist/utils/mysql-client.d.ts.map +1 -0
- package/dist/utils/mysql-client.js +125 -0
- package/dist/utils/mysql-client.js.map +1 -0
- package/docker/Dockerfile +61 -0
- package/docker/Dockerfile.node +41 -0
- package/docker/grafana/dashboards/clawsql.json +212 -0
- package/docker/grafana/provisioning/dashboards/dashboards.yml +13 -0
- package/docker/grafana/provisioning/datasources/datasources.yml +12 -0
- package/docker/init/primary.sql +26 -0
- package/docker/init/replica.sql +16 -0
- package/docker/orchestrator/orchestrator.conf.json +98 -0
- package/docker/prometheus/prometheus.yml +45 -0
- package/docker/proxysql/entrypoint.sh +8 -0
- package/docker/proxysql/init.sql.demo +30 -0
- package/docker/proxysql/proxysql.cnf +38 -0
- package/docker-compose.demo.yml +115 -0
- package/docker-compose.yml +217 -0
- package/init/primary.sql +19 -0
- package/init/replica.sql +13 -0
- package/package.json +84 -0
|
@@ -0,0 +1,801 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* ClawSQL CLI - Instances Command
|
|
4
|
+
*
|
|
5
|
+
* Manage MySQL instances.
|
|
6
|
+
*/
|
|
7
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
8
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
9
|
+
};
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
exports.instancesCommand = void 0;
|
|
12
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
13
|
+
const scanner_js_1 = require("../../core/discovery/scanner.js");
|
|
14
|
+
const mysql_client_js_1 = require("../../utils/mysql-client.js");
|
|
15
|
+
const ora_1 = __importDefault(require("ora"));
|
|
16
|
+
/**
|
|
17
|
+
* Instances command
|
|
18
|
+
*/
|
|
19
|
+
exports.instancesCommand = {
|
|
20
|
+
name: 'instances',
|
|
21
|
+
description: 'Manage MySQL instances',
|
|
22
|
+
usage: '/instances <list|register|discover|remove|replication|setup-replication|read-only|writeable|start-slave|stop-slave|reset-slave|relocate|begin-maintenance|end-maintenance> [args...]',
|
|
23
|
+
handler: async (args, ctx) => {
|
|
24
|
+
const formatter = ctx.formatter;
|
|
25
|
+
if (args.length === 0) {
|
|
26
|
+
console.log(formatter.error('Missing subcommand. Usage: /instances <subcommand> [args...]'));
|
|
27
|
+
console.log(formatter.info(' list - List discovered instances'));
|
|
28
|
+
console.log(formatter.info(' register - Register a new instance'));
|
|
29
|
+
console.log(formatter.info(' discover - Scan network for instances'));
|
|
30
|
+
console.log(formatter.info(' remove - Remove instance from topology'));
|
|
31
|
+
console.log(formatter.info(' replication - Show detailed replication status'));
|
|
32
|
+
console.log(formatter.info(' setup-replication - Configure replication (direct MySQL)'));
|
|
33
|
+
console.log(formatter.info(' read-only - Set instance read-only (via Orchestrator)'));
|
|
34
|
+
console.log(formatter.info(' writeable - Set instance writeable (via Orchestrator)'));
|
|
35
|
+
console.log(formatter.info(' start-slave - Start replication (via Orchestrator)'));
|
|
36
|
+
console.log(formatter.info(' stop-slave - Stop replication (via Orchestrator)'));
|
|
37
|
+
console.log(formatter.info(' reset-slave - Reset replication (via Orchestrator)'));
|
|
38
|
+
console.log(formatter.info(' relocate - Move replica to follow new master'));
|
|
39
|
+
console.log(formatter.info(' begin-maintenance - Put instance in maintenance mode'));
|
|
40
|
+
console.log(formatter.info(' end-maintenance - Remove instance from maintenance mode'));
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
const subcommand = args[0].toLowerCase();
|
|
44
|
+
switch (subcommand) {
|
|
45
|
+
case 'list':
|
|
46
|
+
await listInstances(ctx);
|
|
47
|
+
break;
|
|
48
|
+
case 'register':
|
|
49
|
+
await registerInstance(args.slice(1), ctx);
|
|
50
|
+
break;
|
|
51
|
+
case 'discover':
|
|
52
|
+
await discoverInstances(args.slice(1), ctx);
|
|
53
|
+
break;
|
|
54
|
+
case 'remove':
|
|
55
|
+
case 'forget':
|
|
56
|
+
await removeInstance(args.slice(1), ctx);
|
|
57
|
+
break;
|
|
58
|
+
case 'replication':
|
|
59
|
+
await showReplicationStatus(args.slice(1), ctx);
|
|
60
|
+
break;
|
|
61
|
+
case 'setup-replication':
|
|
62
|
+
await setupReplication(args.slice(1), ctx);
|
|
63
|
+
break;
|
|
64
|
+
case 'read-only':
|
|
65
|
+
await setReadOnly(args.slice(1), ctx);
|
|
66
|
+
break;
|
|
67
|
+
case 'writeable':
|
|
68
|
+
await setWriteable(args.slice(1), ctx);
|
|
69
|
+
break;
|
|
70
|
+
case 'start-slave':
|
|
71
|
+
await startSlave(args.slice(1), ctx);
|
|
72
|
+
break;
|
|
73
|
+
case 'stop-slave':
|
|
74
|
+
await stopSlave(args.slice(1), ctx);
|
|
75
|
+
break;
|
|
76
|
+
case 'reset-slave':
|
|
77
|
+
await resetSlave(args.slice(1), ctx);
|
|
78
|
+
break;
|
|
79
|
+
case 'relocate':
|
|
80
|
+
await relocateReplica(args.slice(1), ctx);
|
|
81
|
+
break;
|
|
82
|
+
case 'begin-maintenance':
|
|
83
|
+
await beginMaintenance(args.slice(1), ctx);
|
|
84
|
+
break;
|
|
85
|
+
case 'end-maintenance':
|
|
86
|
+
await endMaintenance(args.slice(1), ctx);
|
|
87
|
+
break;
|
|
88
|
+
default:
|
|
89
|
+
console.log(formatter.error(`Unknown subcommand: ${subcommand}`));
|
|
90
|
+
console.log(formatter.info('Available: list, register, discover, remove, replication, setup-replication, read-only, writeable, start-slave, stop-slave, reset-slave, relocate, begin-maintenance, end-maintenance'));
|
|
91
|
+
}
|
|
92
|
+
},
|
|
93
|
+
};
|
|
94
|
+
/**
|
|
95
|
+
* List discovered instances
|
|
96
|
+
*/
|
|
97
|
+
async function listInstances(ctx) {
|
|
98
|
+
const formatter = ctx.formatter;
|
|
99
|
+
const orchestrator = ctx.orchestrator;
|
|
100
|
+
try {
|
|
101
|
+
const clusters = await orchestrator.getClusters();
|
|
102
|
+
const instances = [];
|
|
103
|
+
for (const clusterName of clusters) {
|
|
104
|
+
const cluster = await orchestrator.getTopology(clusterName);
|
|
105
|
+
if (!cluster)
|
|
106
|
+
continue;
|
|
107
|
+
if (cluster.primary) {
|
|
108
|
+
instances.push({
|
|
109
|
+
host: cluster.primary.host,
|
|
110
|
+
port: cluster.primary.port,
|
|
111
|
+
role: 'primary',
|
|
112
|
+
state: cluster.primary.state,
|
|
113
|
+
cluster: clusterName,
|
|
114
|
+
version: cluster.primary.version,
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
for (const replica of cluster.replicas) {
|
|
118
|
+
instances.push({
|
|
119
|
+
host: replica.host,
|
|
120
|
+
port: replica.port,
|
|
121
|
+
role: 'replica',
|
|
122
|
+
state: replica.state,
|
|
123
|
+
cluster: clusterName,
|
|
124
|
+
lag: replica.replicationLag,
|
|
125
|
+
version: replica.version,
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
if (instances.length === 0) {
|
|
130
|
+
if (ctx.outputFormat === 'json') {
|
|
131
|
+
console.log(JSON.stringify({ instances: [] }, null, 2));
|
|
132
|
+
}
|
|
133
|
+
else {
|
|
134
|
+
console.log(formatter.warning('No instances discovered.'));
|
|
135
|
+
console.log(formatter.info('Use /instances register <host> to add instances.'));
|
|
136
|
+
console.log(formatter.info('Use /instances discover --network <cidr> to scan for instances.'));
|
|
137
|
+
}
|
|
138
|
+
return;
|
|
139
|
+
}
|
|
140
|
+
// JSON output
|
|
141
|
+
if (ctx.outputFormat === 'json') {
|
|
142
|
+
console.log(JSON.stringify({ instances }, null, 2));
|
|
143
|
+
return;
|
|
144
|
+
}
|
|
145
|
+
// Table output
|
|
146
|
+
console.log(formatter.header('Discovered Instances'));
|
|
147
|
+
console.log(formatter.table(instances, [
|
|
148
|
+
{ key: 'host', header: 'Host', width: 25 },
|
|
149
|
+
{ key: 'port', header: 'Port', width: 8 },
|
|
150
|
+
{ key: 'role', header: 'Role', width: 10 },
|
|
151
|
+
{ key: 'state', header: 'State', width: 10 },
|
|
152
|
+
{ key: 'cluster', header: 'Cluster', width: 20 },
|
|
153
|
+
{ key: 'version', header: 'Version', width: 10 },
|
|
154
|
+
{ key: 'lag', header: 'Lag', width: 8 },
|
|
155
|
+
]));
|
|
156
|
+
console.log(formatter.info(`Total: ${instances.length} instances`));
|
|
157
|
+
}
|
|
158
|
+
catch (error) {
|
|
159
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
160
|
+
console.log(formatter.error(`Failed to list instances: ${message}`));
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Register a new instance
|
|
165
|
+
*/
|
|
166
|
+
async function registerInstance(args, ctx) {
|
|
167
|
+
const formatter = ctx.formatter;
|
|
168
|
+
const orchestrator = ctx.orchestrator;
|
|
169
|
+
// Parse arguments
|
|
170
|
+
const parsed = parseInstanceArgs(args);
|
|
171
|
+
if (!parsed) {
|
|
172
|
+
console.log(formatter.error('Missing host. Usage: /instances register <host> [port]'));
|
|
173
|
+
console.log(formatter.info(' <host> MySQL hostname or IP'));
|
|
174
|
+
console.log(formatter.info(' [port] MySQL port (default: 3306)'));
|
|
175
|
+
console.log(formatter.info(' --user <u> MySQL username'));
|
|
176
|
+
console.log(formatter.info(' --password <p> MySQL password'));
|
|
177
|
+
return;
|
|
178
|
+
}
|
|
179
|
+
const { host, port, user, password } = parsed;
|
|
180
|
+
console.log(formatter.info(`Registering instance ${host}:${port}...`));
|
|
181
|
+
// First, probe to verify it's MySQL
|
|
182
|
+
const probe = await (0, scanner_js_1.probeMySQLInstance)(host, port, user || ctx.settings.mysql.adminUser, password || ctx.settings.mysql.adminPassword, 5000);
|
|
183
|
+
if (!probe.isMySQL) {
|
|
184
|
+
console.log(formatter.error(`No MySQL instance found at ${host}:${port}`));
|
|
185
|
+
console.log(formatter.info('Make sure the MySQL instance is running and accessible.'));
|
|
186
|
+
return;
|
|
187
|
+
}
|
|
188
|
+
console.log(formatter.keyValue('MySQL Version', probe.version || 'unknown'));
|
|
189
|
+
// Register with Orchestrator
|
|
190
|
+
try {
|
|
191
|
+
const success = await orchestrator.discoverInstance(host, port);
|
|
192
|
+
if (success) {
|
|
193
|
+
console.log(formatter.success(`Instance ${host}:${port} registered successfully.`));
|
|
194
|
+
}
|
|
195
|
+
else {
|
|
196
|
+
console.log(formatter.error(`Failed to register instance ${host}:${port}.`));
|
|
197
|
+
console.log(formatter.info('Check Orchestrator logs for details.'));
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
catch (error) {
|
|
201
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
202
|
+
console.log(formatter.error(`Registration failed: ${message}`));
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Discover instances on a network
|
|
207
|
+
*/
|
|
208
|
+
async function discoverInstances(args, ctx) {
|
|
209
|
+
const formatter = ctx.formatter;
|
|
210
|
+
// Parse arguments
|
|
211
|
+
const network = args.find(a => !a.startsWith('--') && a.includes('/'));
|
|
212
|
+
const autoRegister = args.includes('--auto-register');
|
|
213
|
+
const portStart = parseNumberArg(args, '--port-start', 3306);
|
|
214
|
+
const portEnd = parseNumberArg(args, '--port-end', 3306);
|
|
215
|
+
const user = parseStringArg(args, '--user') || ctx.settings.mysql.adminUser;
|
|
216
|
+
const password = parseStringArg(args, '--password') || ctx.settings.mysql.adminPassword;
|
|
217
|
+
if (!network) {
|
|
218
|
+
console.log(formatter.error('Missing network. Usage: /instances discover <network> [options]'));
|
|
219
|
+
console.log(formatter.info(' <network> Network CIDR (e.g., 192.168.1.0/24)'));
|
|
220
|
+
console.log(formatter.info(' --port-start N Port range start (default: 3306)'));
|
|
221
|
+
console.log(formatter.info(' --port-end N Port range end (default: 3306)'));
|
|
222
|
+
console.log(formatter.info(' --auto-register Register discovered instances'));
|
|
223
|
+
console.log(formatter.info(' --user <u> MySQL username'));
|
|
224
|
+
console.log(formatter.info(' --password <p> MySQL password'));
|
|
225
|
+
return;
|
|
226
|
+
}
|
|
227
|
+
console.log(formatter.header('Network Discovery'));
|
|
228
|
+
console.log(formatter.keyValue('Network', network));
|
|
229
|
+
console.log(formatter.keyValue('Ports', `${portStart}-${portEnd}`));
|
|
230
|
+
console.log(formatter.keyValue('Auto-register', autoRegister ? 'yes' : 'no'));
|
|
231
|
+
console.log();
|
|
232
|
+
// Create scanner
|
|
233
|
+
const scanner = new scanner_js_1.NetworkScanner({
|
|
234
|
+
network,
|
|
235
|
+
portStart,
|
|
236
|
+
portEnd,
|
|
237
|
+
timeout: 2000,
|
|
238
|
+
maxConcurrent: 50,
|
|
239
|
+
user,
|
|
240
|
+
password,
|
|
241
|
+
});
|
|
242
|
+
// Run scan with progress
|
|
243
|
+
const spinner = (0, ora_1.default)('Scanning network...').start();
|
|
244
|
+
const results = await scanner.scan((found, scanned) => {
|
|
245
|
+
spinner.text = `Scanning... ${found} MySQL instances found (${scanned} hosts scanned)`;
|
|
246
|
+
});
|
|
247
|
+
spinner.succeed(`Scan complete: ${results.length} MySQL instances found`);
|
|
248
|
+
if (results.length === 0) {
|
|
249
|
+
console.log(formatter.warning('No MySQL instances found on the network.'));
|
|
250
|
+
return;
|
|
251
|
+
}
|
|
252
|
+
// Display results
|
|
253
|
+
console.log();
|
|
254
|
+
console.log(formatter.header('Discovered MySQL Instances'));
|
|
255
|
+
console.log(formatter.table(results.map(r => ({
|
|
256
|
+
host: r.host,
|
|
257
|
+
port: r.port,
|
|
258
|
+
version: r.version || (r.isMySQL ? 'unknown' : 'N/A'),
|
|
259
|
+
status: r.isMySQL ? 'MySQL' : 'Not MySQL',
|
|
260
|
+
error: r.error || '',
|
|
261
|
+
})), [
|
|
262
|
+
{ key: 'host', header: 'Host', width: 20 },
|
|
263
|
+
{ key: 'port', header: 'Port', width: 8 },
|
|
264
|
+
{ key: 'version', header: 'Version', width: 12 },
|
|
265
|
+
{ key: 'status', header: 'Status', width: 12 },
|
|
266
|
+
{ key: 'error', header: 'Note', width: 25 },
|
|
267
|
+
]));
|
|
268
|
+
// Auto-register if requested
|
|
269
|
+
if (autoRegister) {
|
|
270
|
+
console.log();
|
|
271
|
+
console.log(formatter.info('Registering discovered instances...'));
|
|
272
|
+
let registered = 0;
|
|
273
|
+
for (const instance of results.filter(r => r.isMySQL)) {
|
|
274
|
+
try {
|
|
275
|
+
const success = await ctx.orchestrator.discoverInstance(instance.host, instance.port);
|
|
276
|
+
if (success) {
|
|
277
|
+
registered++;
|
|
278
|
+
console.log(formatter.keyValue(` ${instance.host}:${instance.port}`, chalk_1.default.green('registered')));
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
catch (error) {
|
|
282
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
283
|
+
console.log(formatter.keyValue(` ${instance.host}:${instance.port}`, chalk_1.default.red(message)));
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
console.log();
|
|
287
|
+
console.log(formatter.success(`Registered ${registered} of ${results.filter(r => r.isMySQL).length} instances`));
|
|
288
|
+
}
|
|
289
|
+
else {
|
|
290
|
+
console.log();
|
|
291
|
+
console.log(formatter.info('To register these instances, run:'));
|
|
292
|
+
console.log(formatter.info(` /instances discover ${network} --auto-register`));
|
|
293
|
+
console.log(formatter.info('Or register individually with:'));
|
|
294
|
+
console.log(formatter.info(' /instances register <host> [port]'));
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
/**
|
|
298
|
+
* Remove an instance from the topology
|
|
299
|
+
*/
|
|
300
|
+
async function removeInstance(args, ctx) {
|
|
301
|
+
const formatter = ctx.formatter;
|
|
302
|
+
const orchestrator = ctx.orchestrator;
|
|
303
|
+
// Parse arguments
|
|
304
|
+
const parsed = parseInstanceArgs(args);
|
|
305
|
+
if (!parsed) {
|
|
306
|
+
console.log(formatter.error('Missing host. Usage: /instances remove <host> [port]'));
|
|
307
|
+
return;
|
|
308
|
+
}
|
|
309
|
+
const { host, port } = parsed;
|
|
310
|
+
console.log(formatter.info(`Removing instance ${host}:${port} from topology...`));
|
|
311
|
+
try {
|
|
312
|
+
const success = await orchestrator.forgetInstance(host, port);
|
|
313
|
+
if (success) {
|
|
314
|
+
console.log(formatter.success(`Instance ${host}:${port} removed from topology.`));
|
|
315
|
+
}
|
|
316
|
+
else {
|
|
317
|
+
console.log(formatter.error(`Failed to remove instance ${host}:${port}.`));
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
catch (error) {
|
|
321
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
322
|
+
console.log(formatter.error(`Removal failed: ${message}`));
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
/**
|
|
326
|
+
* Parse instance arguments
|
|
327
|
+
*/
|
|
328
|
+
function parseInstanceArgs(args) {
|
|
329
|
+
const hostArg = args.find(a => !a.startsWith('--') && !a.includes('/'));
|
|
330
|
+
if (!hostArg)
|
|
331
|
+
return null;
|
|
332
|
+
const host = hostArg;
|
|
333
|
+
const port = parseNumberArg(args, '--port', 3306) ||
|
|
334
|
+
(args[1] && !args[1].startsWith('--') ? parseInt(args[1], 10) : 3306);
|
|
335
|
+
const user = parseStringArg(args, '--user');
|
|
336
|
+
const password = parseStringArg(args, '--password');
|
|
337
|
+
return { host, port, user, password };
|
|
338
|
+
}
|
|
339
|
+
/**
|
|
340
|
+
* Parse a number argument
|
|
341
|
+
*/
|
|
342
|
+
function parseNumberArg(args, name, defaultValue) {
|
|
343
|
+
const idx = args.indexOf(name);
|
|
344
|
+
if (idx === -1 || idx + 1 >= args.length)
|
|
345
|
+
return defaultValue;
|
|
346
|
+
const val = parseInt(args[idx + 1], 10);
|
|
347
|
+
return isNaN(val) ? defaultValue : val;
|
|
348
|
+
}
|
|
349
|
+
/**
|
|
350
|
+
* Parse a string argument
|
|
351
|
+
*/
|
|
352
|
+
function parseStringArg(args, name) {
|
|
353
|
+
const idx = args.indexOf(name);
|
|
354
|
+
if (idx === -1 || idx + 1 >= args.length)
|
|
355
|
+
return undefined;
|
|
356
|
+
return args[idx + 1];
|
|
357
|
+
}
|
|
358
|
+
/**
|
|
359
|
+
* Show detailed replication status for an instance
|
|
360
|
+
*/
|
|
361
|
+
async function showReplicationStatus(args, ctx) {
|
|
362
|
+
const formatter = ctx.formatter;
|
|
363
|
+
const parsed = parseHostPortArgs(args);
|
|
364
|
+
if (!parsed) {
|
|
365
|
+
console.log(formatter.error('Missing host. Usage: /instances replication <host:port>'));
|
|
366
|
+
return;
|
|
367
|
+
}
|
|
368
|
+
const { host, port } = parsed;
|
|
369
|
+
const instanceId = `${host}:${port}`;
|
|
370
|
+
console.log(formatter.header(`Replication Status: ${instanceId}`));
|
|
371
|
+
try {
|
|
372
|
+
const mysqlClient = (0, mysql_client_js_1.getMySQLClient)();
|
|
373
|
+
// Get SHOW SLAVE STATUS
|
|
374
|
+
const status = await mysqlClient.getReplicationStatus(host, port);
|
|
375
|
+
if (!status) {
|
|
376
|
+
console.log(formatter.info('No replication configured (not a replica).'));
|
|
377
|
+
console.log(formatter.info('This instance may be a primary or has no replication set up.'));
|
|
378
|
+
return;
|
|
379
|
+
}
|
|
380
|
+
// Display replication status
|
|
381
|
+
console.log();
|
|
382
|
+
console.log(formatter.keyValue('IO Thread Running', status.ioRunning ? chalk_1.default.green('Yes') : chalk_1.default.red('No')));
|
|
383
|
+
console.log(formatter.keyValue('SQL Thread Running', status.sqlRunning ? chalk_1.default.green('Yes') : chalk_1.default.red('No')));
|
|
384
|
+
console.log(formatter.keyValue('Seconds Behind Master', status.secondsBehind !== null ? `${status.secondsBehind}s` : 'N/A'));
|
|
385
|
+
// Show additional details via direct query
|
|
386
|
+
const mysql = await import('mysql2/promise');
|
|
387
|
+
const connection = await mysql.createConnection({
|
|
388
|
+
host,
|
|
389
|
+
port,
|
|
390
|
+
user: ctx.settings.mysql.adminUser,
|
|
391
|
+
password: ctx.settings.mysql.adminPassword,
|
|
392
|
+
connectTimeout: 5000,
|
|
393
|
+
});
|
|
394
|
+
const [rows] = await connection.execute('SHOW SLAVE STATUS');
|
|
395
|
+
const slaveStatus = rows[0];
|
|
396
|
+
await connection.end();
|
|
397
|
+
if (slaveStatus) {
|
|
398
|
+
console.log();
|
|
399
|
+
console.log(formatter.keyValue('Master Host', String(slaveStatus.Master_Host || 'N/A')));
|
|
400
|
+
console.log(formatter.keyValue('Master Port', String(slaveStatus.Master_Port || 'N/A')));
|
|
401
|
+
console.log(formatter.keyValue('Master User', String(slaveStatus.Master_User || 'N/A')));
|
|
402
|
+
console.log(formatter.keyValue('Relay Log File', String(slaveStatus.Relay_Log_File || 'N/A')));
|
|
403
|
+
console.log(formatter.keyValue('Relay Log Pos', String(slaveStatus.Relay_Log_Pos || 'N/A')));
|
|
404
|
+
console.log(formatter.keyValue('Exec Master Log Pos', String(slaveStatus.Exec_Master_Log_Pos || 'N/A')));
|
|
405
|
+
if (slaveStatus.Last_IO_Error) {
|
|
406
|
+
console.log();
|
|
407
|
+
console.log(formatter.error('Last IO Error:'));
|
|
408
|
+
console.log(formatter.info(` ${slaveStatus.Last_IO_Error}`));
|
|
409
|
+
}
|
|
410
|
+
if (slaveStatus.Last_SQL_Error) {
|
|
411
|
+
console.log();
|
|
412
|
+
console.log(formatter.error('Last SQL Error:'));
|
|
413
|
+
console.log(formatter.info(` ${slaveStatus.Last_SQL_Error}`));
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
console.log();
|
|
417
|
+
if (status.ioRunning && status.sqlRunning) {
|
|
418
|
+
console.log(formatter.success('Replication is healthy.'));
|
|
419
|
+
}
|
|
420
|
+
else {
|
|
421
|
+
console.log(formatter.warning('Replication has issues. Check IO and SQL thread status.'));
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
catch (error) {
|
|
425
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
426
|
+
console.log(formatter.error(`Failed to get replication status: ${message}`));
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
/**
|
|
430
|
+
* Set up replication for an instance
|
|
431
|
+
*/
|
|
432
|
+
async function setupReplication(args, ctx) {
|
|
433
|
+
const formatter = ctx.formatter;
|
|
434
|
+
// Parse arguments
|
|
435
|
+
const hostArg = parseStringArg(args, '--host');
|
|
436
|
+
const masterArg = parseStringArg(args, '--master');
|
|
437
|
+
const user = parseStringArg(args, '--user') || 'repl';
|
|
438
|
+
const password = parseStringArg(args, '--password') || 'replpassword';
|
|
439
|
+
if (!hostArg || !masterArg) {
|
|
440
|
+
console.log(formatter.error('Missing required arguments. Usage: /instances setup-replication --host <host:port> --master <master:port>'));
|
|
441
|
+
console.log(formatter.info(' --host <host:port> Instance to configure as replica'));
|
|
442
|
+
console.log(formatter.info(' --master <master:port> Master instance to replicate from'));
|
|
443
|
+
console.log(formatter.info(' --user <user> Replication user (default: repl)'));
|
|
444
|
+
console.log(formatter.info(' --password <password> Replication password'));
|
|
445
|
+
return;
|
|
446
|
+
}
|
|
447
|
+
// Parse host and master
|
|
448
|
+
const [host, hostPortStr] = hostArg.split(':');
|
|
449
|
+
const port = parseInt(hostPortStr || '3306', 10);
|
|
450
|
+
const [masterHost, masterPortStr] = masterArg.split(':');
|
|
451
|
+
const masterPort = parseInt(masterPortStr || '3306', 10);
|
|
452
|
+
console.log(formatter.header('Setting Up Replication'));
|
|
453
|
+
console.log(formatter.keyValue('Replica', `${host}:${port}`));
|
|
454
|
+
console.log(formatter.keyValue('Master', `${masterHost}:${masterPort}`));
|
|
455
|
+
console.log(formatter.keyValue('Replication User', user));
|
|
456
|
+
console.log();
|
|
457
|
+
try {
|
|
458
|
+
const mysql = await import('mysql2/promise');
|
|
459
|
+
const connection = await mysql.createConnection({
|
|
460
|
+
host,
|
|
461
|
+
port,
|
|
462
|
+
user: ctx.settings.mysql.adminUser,
|
|
463
|
+
password: ctx.settings.mysql.adminPassword,
|
|
464
|
+
connectTimeout: 10000,
|
|
465
|
+
});
|
|
466
|
+
// Stop slave first
|
|
467
|
+
console.log(formatter.info('Stopping existing replication...'));
|
|
468
|
+
await connection.execute('STOP SLAVE');
|
|
469
|
+
// Configure replication
|
|
470
|
+
console.log(formatter.info('Configuring replication...'));
|
|
471
|
+
await connection.execute(`
|
|
472
|
+
CHANGE MASTER TO
|
|
473
|
+
MASTER_HOST = ?,
|
|
474
|
+
MASTER_PORT = ?,
|
|
475
|
+
MASTER_USER = ?,
|
|
476
|
+
MASTER_PASSWORD = ?,
|
|
477
|
+
MASTER_AUTO_POSITION = 1
|
|
478
|
+
`, [masterHost, masterPort, user, password]);
|
|
479
|
+
// Start slave
|
|
480
|
+
console.log(formatter.info('Starting replication...'));
|
|
481
|
+
await connection.execute('START SLAVE');
|
|
482
|
+
// Wait and check status
|
|
483
|
+
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
484
|
+
const [rows] = await connection.execute('SHOW SLAVE STATUS');
|
|
485
|
+
const status = rows[0];
|
|
486
|
+
await connection.end();
|
|
487
|
+
if (status) {
|
|
488
|
+
const ioRunning = status.Slave_IO_Running === 'Yes';
|
|
489
|
+
const sqlRunning = status.Slave_SQL_Running === 'Yes';
|
|
490
|
+
console.log();
|
|
491
|
+
console.log(formatter.keyValue('IO Thread', ioRunning ? chalk_1.default.green('Running') : chalk_1.default.red('Not running')));
|
|
492
|
+
console.log(formatter.keyValue('SQL Thread', sqlRunning ? chalk_1.default.green('Running') : chalk_1.default.red('Not running')));
|
|
493
|
+
if (ioRunning && sqlRunning) {
|
|
494
|
+
console.log();
|
|
495
|
+
console.log(formatter.success('Replication configured and running successfully!'));
|
|
496
|
+
}
|
|
497
|
+
else {
|
|
498
|
+
console.log();
|
|
499
|
+
console.log(formatter.warning('Replication configured but not fully running.'));
|
|
500
|
+
if (status.Last_IO_Error) {
|
|
501
|
+
console.log(formatter.error(`IO Error: ${status.Last_IO_Error}`));
|
|
502
|
+
}
|
|
503
|
+
if (status.Last_SQL_Error) {
|
|
504
|
+
console.log(formatter.error(`SQL Error: ${status.Last_SQL_Error}`));
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
catch (error) {
|
|
510
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
511
|
+
console.log(formatter.error(`Failed to set up replication: ${message}`));
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
/**
|
|
515
|
+
* Set instance read-only (via Orchestrator)
|
|
516
|
+
* Requires instance to be registered in Orchestrator
|
|
517
|
+
*/
|
|
518
|
+
async function setReadOnly(args, ctx) {
|
|
519
|
+
const formatter = ctx.formatter;
|
|
520
|
+
const orchestrator = ctx.orchestrator;
|
|
521
|
+
const parsed = parseHostPortArgs(args);
|
|
522
|
+
if (!parsed) {
|
|
523
|
+
console.log(formatter.error('Missing host. Usage: /instances read-only <host:port>'));
|
|
524
|
+
console.log(formatter.info(' <host:port> Instance to set read-only'));
|
|
525
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
526
|
+
return;
|
|
527
|
+
}
|
|
528
|
+
const { host, port } = parsed;
|
|
529
|
+
console.log(formatter.info(`Setting ${host}:${port} to read-only...`));
|
|
530
|
+
try {
|
|
531
|
+
const success = await orchestrator.setReadOnly(host, port);
|
|
532
|
+
if (success) {
|
|
533
|
+
console.log(formatter.success(`Instance ${host}:${port} is now read-only.`));
|
|
534
|
+
}
|
|
535
|
+
else {
|
|
536
|
+
console.log(formatter.error(`Failed to set ${host}:${port} read-only.`));
|
|
537
|
+
console.log(formatter.info('Ensure the instance is registered in Orchestrator.'));
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
catch (error) {
|
|
541
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
542
|
+
console.log(formatter.error(`Failed to set read-only: ${message}`));
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
/**
|
|
546
|
+
* Set instance writeable (via Orchestrator)
|
|
547
|
+
* Requires instance to be registered in Orchestrator
|
|
548
|
+
*/
|
|
549
|
+
async function setWriteable(args, ctx) {
|
|
550
|
+
const formatter = ctx.formatter;
|
|
551
|
+
const orchestrator = ctx.orchestrator;
|
|
552
|
+
const parsed = parseHostPortArgs(args);
|
|
553
|
+
if (!parsed) {
|
|
554
|
+
console.log(formatter.error('Missing host. Usage: /instances writeable <host:port>'));
|
|
555
|
+
console.log(formatter.info(' <host:port> Instance to set writeable'));
|
|
556
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
557
|
+
return;
|
|
558
|
+
}
|
|
559
|
+
const { host, port } = parsed;
|
|
560
|
+
console.log(formatter.info(`Setting ${host}:${port} to writeable...`));
|
|
561
|
+
try {
|
|
562
|
+
const success = await orchestrator.setWriteable(host, port);
|
|
563
|
+
if (success) {
|
|
564
|
+
console.log(formatter.success(`Instance ${host}:${port} is now writeable.`));
|
|
565
|
+
}
|
|
566
|
+
else {
|
|
567
|
+
console.log(formatter.error(`Failed to set ${host}:${port} writeable.`));
|
|
568
|
+
console.log(formatter.info('Ensure the instance is registered in Orchestrator.'));
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
catch (error) {
|
|
572
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
573
|
+
console.log(formatter.error(`Failed to set writeable: ${message}`));
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
/**
|
|
577
|
+
* Start replication on an instance (via Orchestrator)
|
|
578
|
+
* Requires instance to be registered in Orchestrator
|
|
579
|
+
*/
|
|
580
|
+
async function startSlave(args, ctx) {
|
|
581
|
+
const formatter = ctx.formatter;
|
|
582
|
+
const orchestrator = ctx.orchestrator;
|
|
583
|
+
const parsed = parseHostPortArgs(args);
|
|
584
|
+
if (!parsed) {
|
|
585
|
+
console.log(formatter.error('Missing host. Usage: /instances start-slave <host:port>'));
|
|
586
|
+
console.log(formatter.info(' <host:port> Instance to start replication on'));
|
|
587
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
588
|
+
return;
|
|
589
|
+
}
|
|
590
|
+
const { host, port } = parsed;
|
|
591
|
+
console.log(formatter.info(`Starting replication on ${host}:${port}...`));
|
|
592
|
+
try {
|
|
593
|
+
const success = await orchestrator.startSlave(host, port);
|
|
594
|
+
if (success) {
|
|
595
|
+
console.log(formatter.success(`Replication started on ${host}:${port}.`));
|
|
596
|
+
}
|
|
597
|
+
else {
|
|
598
|
+
console.log(formatter.error(`Failed to start replication on ${host}:${port}.`));
|
|
599
|
+
console.log(formatter.info('Ensure the instance is registered and replication is configured.'));
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
catch (error) {
|
|
603
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
604
|
+
console.log(formatter.error(`Failed to start replication: ${message}`));
|
|
605
|
+
}
|
|
606
|
+
}
|
|
607
|
+
/**
|
|
608
|
+
* Stop replication on an instance (via Orchestrator)
|
|
609
|
+
* Requires instance to be registered in Orchestrator
|
|
610
|
+
*/
|
|
611
|
+
async function stopSlave(args, ctx) {
|
|
612
|
+
const formatter = ctx.formatter;
|
|
613
|
+
const orchestrator = ctx.orchestrator;
|
|
614
|
+
const parsed = parseHostPortArgs(args);
|
|
615
|
+
if (!parsed) {
|
|
616
|
+
console.log(formatter.error('Missing host. Usage: /instances stop-slave <host:port>'));
|
|
617
|
+
console.log(formatter.info(' <host:port> Instance to stop replication on'));
|
|
618
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
619
|
+
return;
|
|
620
|
+
}
|
|
621
|
+
const { host, port } = parsed;
|
|
622
|
+
console.log(formatter.info(`Stopping replication on ${host}:${port}...`));
|
|
623
|
+
try {
|
|
624
|
+
const success = await orchestrator.stopSlave(host, port);
|
|
625
|
+
if (success) {
|
|
626
|
+
console.log(formatter.success(`Replication stopped on ${host}:${port}.`));
|
|
627
|
+
}
|
|
628
|
+
else {
|
|
629
|
+
console.log(formatter.error(`Failed to stop replication on ${host}:${port}.`));
|
|
630
|
+
console.log(formatter.info('Ensure the instance is registered in Orchestrator.'));
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
catch (error) {
|
|
634
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
635
|
+
console.log(formatter.error(`Failed to stop replication: ${message}`));
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
/**
|
|
639
|
+
* Reset replication on an instance (via Orchestrator)
|
|
640
|
+
* Requires instance to be registered in Orchestrator
|
|
641
|
+
*/
|
|
642
|
+
async function resetSlave(args, ctx) {
|
|
643
|
+
const formatter = ctx.formatter;
|
|
644
|
+
const orchestrator = ctx.orchestrator;
|
|
645
|
+
const parsed = parseHostPortArgs(args);
|
|
646
|
+
if (!parsed) {
|
|
647
|
+
console.log(formatter.error('Missing host. Usage: /instances reset-slave <host:port>'));
|
|
648
|
+
console.log(formatter.info(' <host:port> Instance to reset replication on'));
|
|
649
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
650
|
+
console.log(formatter.warning('Warning: This removes all replication configuration!'));
|
|
651
|
+
return;
|
|
652
|
+
}
|
|
653
|
+
const { host, port } = parsed;
|
|
654
|
+
// Confirm destructive action
|
|
655
|
+
console.log(formatter.warning(`This will remove all replication configuration on ${host}:${port}.`));
|
|
656
|
+
console.log(formatter.info('Use /instances reset-slave --confirm <host:port> to proceed.'));
|
|
657
|
+
if (!args.includes('--confirm')) {
|
|
658
|
+
return;
|
|
659
|
+
}
|
|
660
|
+
console.log(formatter.info(`Resetting replication on ${host}:${port}...`));
|
|
661
|
+
try {
|
|
662
|
+
const success = await orchestrator.resetSlave(host, port);
|
|
663
|
+
if (success) {
|
|
664
|
+
console.log(formatter.success(`Replication reset on ${host}:${port}.`));
|
|
665
|
+
}
|
|
666
|
+
else {
|
|
667
|
+
console.log(formatter.error(`Failed to reset replication on ${host}:${port}.`));
|
|
668
|
+
console.log(formatter.info('Ensure the instance is registered in Orchestrator.'));
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
catch (error) {
|
|
672
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
673
|
+
console.log(formatter.error(`Failed to reset replication: ${message}`));
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
/**
|
|
677
|
+
* Relocate a replica to follow a new master (via Orchestrator)
|
|
678
|
+
* Requires both instances to be registered in Orchestrator
|
|
679
|
+
*/
|
|
680
|
+
async function relocateReplica(args, ctx) {
|
|
681
|
+
const formatter = ctx.formatter;
|
|
682
|
+
const orchestrator = ctx.orchestrator;
|
|
683
|
+
const hostArg = parseStringArg(args, '--host');
|
|
684
|
+
const masterArg = parseStringArg(args, '--master');
|
|
685
|
+
if (!hostArg || !masterArg) {
|
|
686
|
+
console.log(formatter.error('Missing required arguments. Usage: /instances relocate --host <host:port> --master <new-master:port>'));
|
|
687
|
+
console.log(formatter.info(' --host <host:port> Replica to relocate'));
|
|
688
|
+
console.log(formatter.info(' --master <host:port> New master to follow'));
|
|
689
|
+
console.log(formatter.info('Note: Both instances must be registered in Orchestrator'));
|
|
690
|
+
return;
|
|
691
|
+
}
|
|
692
|
+
const [host, portStr] = hostArg.split(':');
|
|
693
|
+
const port = parseInt(portStr || '3306', 10);
|
|
694
|
+
const [masterHost, masterPortStr] = masterArg.split(':');
|
|
695
|
+
const masterPort = parseInt(masterPortStr || '3306', 10);
|
|
696
|
+
console.log(formatter.header('Relocating Replica'));
|
|
697
|
+
console.log(formatter.keyValue('Replica', `${host}:${port}`));
|
|
698
|
+
console.log(formatter.keyValue('New Master', `${masterHost}:${masterPort}`));
|
|
699
|
+
console.log();
|
|
700
|
+
try {
|
|
701
|
+
const success = await orchestrator.relocateReplicas(host, port, masterHost, masterPort);
|
|
702
|
+
if (success) {
|
|
703
|
+
console.log(formatter.success(`Replica ${host}:${port} relocated to follow ${masterHost}:${masterPort}.`));
|
|
704
|
+
}
|
|
705
|
+
else {
|
|
706
|
+
console.log(formatter.error(`Failed to relocate ${host}:${port}.`));
|
|
707
|
+
console.log(formatter.info('Ensure both instances are registered in Orchestrator.'));
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
catch (error) {
|
|
711
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
712
|
+
console.log(formatter.error(`Failed to relocate replica: ${message}`));
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
/**
|
|
716
|
+
* Put instance in maintenance mode (via Orchestrator)
|
|
717
|
+
* Requires instance to be registered in Orchestrator
|
|
718
|
+
*/
|
|
719
|
+
async function beginMaintenance(args, ctx) {
|
|
720
|
+
const formatter = ctx.formatter;
|
|
721
|
+
const orchestrator = ctx.orchestrator;
|
|
722
|
+
const parsed = parseHostPortArgs(args);
|
|
723
|
+
if (!parsed) {
|
|
724
|
+
console.log(formatter.error('Missing host. Usage: /instances begin-maintenance <host:port> [--reason <reason>] [--duration <minutes>]'));
|
|
725
|
+
console.log(formatter.info(' <host:port> Instance to put in maintenance'));
|
|
726
|
+
console.log(formatter.info(' --reason <r> Reason for maintenance'));
|
|
727
|
+
console.log(formatter.info(' --duration <m> Duration in minutes (default: 60)'));
|
|
728
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
729
|
+
return;
|
|
730
|
+
}
|
|
731
|
+
const { host, port } = parsed;
|
|
732
|
+
const reason = parseStringArg(args, '--reason') || 'Manual maintenance via ClawSQL';
|
|
733
|
+
const duration = parseNumberArg(args, '--duration', 60);
|
|
734
|
+
console.log(formatter.info(`Putting ${host}:${port} in maintenance mode...`));
|
|
735
|
+
console.log(formatter.keyValue('Reason', reason));
|
|
736
|
+
console.log(formatter.keyValue('Duration', `${duration} minutes`));
|
|
737
|
+
try {
|
|
738
|
+
const success = await orchestrator.beginMaintenance(host, port, reason, duration);
|
|
739
|
+
if (success) {
|
|
740
|
+
console.log(formatter.success(`Instance ${host}:${port} is now in maintenance mode.`));
|
|
741
|
+
}
|
|
742
|
+
else {
|
|
743
|
+
console.log(formatter.error(`Failed to put ${host}:${port} in maintenance mode.`));
|
|
744
|
+
console.log(formatter.info('Ensure the instance is registered in Orchestrator.'));
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
catch (error) {
|
|
748
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
749
|
+
console.log(formatter.error(`Failed to begin maintenance: ${message}`));
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
/**
|
|
753
|
+
* Remove instance from maintenance mode (via Orchestrator)
|
|
754
|
+
* Requires instance to be registered in Orchestrator
|
|
755
|
+
*/
|
|
756
|
+
async function endMaintenance(args, ctx) {
|
|
757
|
+
const formatter = ctx.formatter;
|
|
758
|
+
const orchestrator = ctx.orchestrator;
|
|
759
|
+
const parsed = parseHostPortArgs(args);
|
|
760
|
+
if (!parsed) {
|
|
761
|
+
console.log(formatter.error('Missing host. Usage: /instances end-maintenance <host:port>'));
|
|
762
|
+
console.log(formatter.info(' <host:port> Instance to remove from maintenance'));
|
|
763
|
+
console.log(formatter.info('Note: Instance must be registered in Orchestrator'));
|
|
764
|
+
return;
|
|
765
|
+
}
|
|
766
|
+
const { host, port } = parsed;
|
|
767
|
+
console.log(formatter.info(`Removing ${host}:${port} from maintenance mode...`));
|
|
768
|
+
try {
|
|
769
|
+
const success = await orchestrator.endMaintenance(host, port);
|
|
770
|
+
if (success) {
|
|
771
|
+
console.log(formatter.success(`Instance ${host}:${port} is no longer in maintenance mode.`));
|
|
772
|
+
}
|
|
773
|
+
else {
|
|
774
|
+
console.log(formatter.error(`Failed to remove ${host}:${port} from maintenance mode.`));
|
|
775
|
+
console.log(formatter.info('Ensure the instance is registered in Orchestrator.'));
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
catch (error) {
|
|
779
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
780
|
+
console.log(formatter.error(`Failed to end maintenance: ${message}`));
|
|
781
|
+
}
|
|
782
|
+
}
|
|
783
|
+
/**
|
|
784
|
+
* Parse host:port argument format
|
|
785
|
+
*/
|
|
786
|
+
function parseHostPortArgs(args) {
|
|
787
|
+
const hostArg = args.find(a => !a.startsWith('--') && a.includes(':'));
|
|
788
|
+
if (!hostArg) {
|
|
789
|
+
// Try host without port
|
|
790
|
+
const simpleHost = args.find(a => !a.startsWith('--') && !a.includes(':'));
|
|
791
|
+
if (simpleHost) {
|
|
792
|
+
return { host: simpleHost, port: 3306 };
|
|
793
|
+
}
|
|
794
|
+
return null;
|
|
795
|
+
}
|
|
796
|
+
const [host, portStr] = hostArg.split(':');
|
|
797
|
+
const port = parseInt(portStr || '3306', 10);
|
|
798
|
+
return { host, port };
|
|
799
|
+
}
|
|
800
|
+
exports.default = exports.instancesCommand;
|
|
801
|
+
//# sourceMappingURL=instances.js.map
|