blue-js-sdk 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +446 -0
- package/LICENSE +21 -0
- package/README.md +75 -0
- package/ai-path/ADMIN-ELEVATION.md +116 -0
- package/ai-path/AI-MANIFESTO.md +185 -0
- package/ai-path/BREAKING.md +74 -0
- package/ai-path/CHECKLIST.md +619 -0
- package/ai-path/CONNECTION-STEPS.md +724 -0
- package/ai-path/DECISION-TREE.md +378 -0
- package/ai-path/DEPENDENCIES.md +459 -0
- package/ai-path/E2E-FLOW.md +1555 -0
- package/ai-path/FAILURES.md +403 -0
- package/ai-path/GUIDE.md +1217 -0
- package/ai-path/README.md +558 -0
- package/ai-path/SPLIT-TUNNEL.md +266 -0
- package/ai-path/cli.js +535 -0
- package/ai-path/connect.js +884 -0
- package/ai-path/discover.js +178 -0
- package/ai-path/environment.js +266 -0
- package/ai-path/errors.js +86 -0
- package/ai-path/examples/autonomous-agent.mjs +220 -0
- package/ai-path/examples/multi-region.mjs +174 -0
- package/ai-path/examples/one-shot.mjs +31 -0
- package/ai-path/index.js +60 -0
- package/ai-path/pricing.js +136 -0
- package/ai-path/recommend.js +413 -0
- package/ai-path/run-admin.vbs +25 -0
- package/ai-path/setup.js +291 -0
- package/ai-path/wallet.js +137 -0
- package/app-helpers.js +363 -0
- package/app-settings.js +95 -0
- package/app-types.js +267 -0
- package/audit.js +847 -0
- package/batch.js +293 -0
- package/bin/setup.js +376 -0
- package/chain/authz.js +109 -0
- package/chain/broadcast.js +472 -0
- package/chain/client.js +160 -0
- package/chain/fee-grants.js +305 -0
- package/chain/index.js +891 -0
- package/chain/lcd.js +313 -0
- package/chain/queries.js +547 -0
- package/chain/rpc.js +408 -0
- package/chain/wallet.js +141 -0
- package/cli/config.js +143 -0
- package/cli/index.js +463 -0
- package/cli/output.js +182 -0
- package/cli.js +491 -0
- package/client/index.js +251 -0
- package/client.js +271 -0
- package/config/index.js +255 -0
- package/connection/connect.js +849 -0
- package/connection/disconnect.js +180 -0
- package/connection/discovery.js +321 -0
- package/connection/index.js +76 -0
- package/connection/proxy.js +148 -0
- package/connection/resilience.js +428 -0
- package/connection/security.js +232 -0
- package/connection/state.js +369 -0
- package/connection/tunnel.js +691 -0
- package/consumer.js +132 -0
- package/cosmjs-setup.js +1884 -0
- package/defaults.js +366 -0
- package/disk-cache.js +107 -0
- package/dist/client.d.ts +108 -0
- package/dist/client.d.ts.map +1 -0
- package/dist/client.js +400 -0
- package/dist/client.js.map +1 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +8 -0
- package/dist/index.js.map +1 -0
- package/errors/index.js +112 -0
- package/errors.js +218 -0
- package/examples/README.md +64 -0
- package/examples/connect-direct.mjs +106 -0
- package/examples/connect-plan.mjs +125 -0
- package/examples/error-handling.mjs +109 -0
- package/examples/query-nodes.mjs +94 -0
- package/examples/wallet-basics.mjs +61 -0
- package/generated/amino/amino.ts +9 -0
- package/generated/cosmos/base/v1beta1/coin.ts +365 -0
- package/generated/cosmos_proto/cosmos.ts +323 -0
- package/generated/gogoproto/gogo.ts +9 -0
- package/generated/google/protobuf/descriptor.ts +7601 -0
- package/generated/google/protobuf/duration.ts +208 -0
- package/generated/google/protobuf/timestamp.ts +238 -0
- package/generated/sentinel/lease/v1/events.ts +924 -0
- package/generated/sentinel/lease/v1/lease.ts +292 -0
- package/generated/sentinel/lease/v1/msg.ts +949 -0
- package/generated/sentinel/lease/v1/params.ts +164 -0
- package/generated/sentinel/node/v3/events.ts +881 -0
- package/generated/sentinel/node/v3/msg.ts +1002 -0
- package/generated/sentinel/node/v3/node.ts +263 -0
- package/generated/sentinel/node/v3/params.ts +183 -0
- package/generated/sentinel/plan/v3/events.ts +675 -0
- package/generated/sentinel/plan/v3/msg.ts +1191 -0
- package/generated/sentinel/plan/v3/plan.ts +283 -0
- package/generated/sentinel/provider/v2/events.ts +171 -0
- package/generated/sentinel/provider/v2/msg.ts +480 -0
- package/generated/sentinel/provider/v2/params.ts +131 -0
- package/generated/sentinel/provider/v2/provider.ts +246 -0
- package/generated/sentinel/session/v3/events.ts +480 -0
- package/generated/sentinel/session/v3/msg.ts +616 -0
- package/generated/sentinel/session/v3/params.ts +260 -0
- package/generated/sentinel/session/v3/proof.ts +180 -0
- package/generated/sentinel/session/v3/session.ts +384 -0
- package/generated/sentinel/subscription/v3/events.ts +1181 -0
- package/generated/sentinel/subscription/v3/msg.ts +1305 -0
- package/generated/sentinel/subscription/v3/params.ts +167 -0
- package/generated/sentinel/subscription/v3/subscription.ts +315 -0
- package/generated/sentinel/types/v1/bandwidth.ts +124 -0
- package/generated/sentinel/types/v1/price.ts +149 -0
- package/generated/sentinel/types/v1/renewal.ts +87 -0
- package/generated/sentinel/types/v1/status.ts +54 -0
- package/generated/typeRegistry.ts +27 -0
- package/index.js +486 -0
- package/node-connect.js +3015 -0
- package/operator.js +134 -0
- package/package.json +113 -0
- package/plan-operations.js +199 -0
- package/preflight.js +352 -0
- package/pricing/index.js +262 -0
- package/proto/amino/amino.proto +84 -0
- package/proto/cosmos/base/v1beta1/coin.proto +61 -0
- package/proto/cosmos_proto/cosmos.proto +112 -0
- package/proto/gogoproto/gogo.proto +145 -0
- package/proto/google/api/annotations.proto +31 -0
- package/proto/google/api/http.proto +370 -0
- package/proto/google/protobuf/any.proto +106 -0
- package/proto/google/protobuf/duration.proto +115 -0
- package/proto/google/protobuf/timestamp.proto +145 -0
- package/proto/sentinel/lease/v1/events.proto +52 -0
- package/proto/sentinel/lease/v1/genesis.proto +15 -0
- package/proto/sentinel/lease/v1/lease.proto +25 -0
- package/proto/sentinel/lease/v1/msg.proto +62 -0
- package/proto/sentinel/lease/v1/params.proto +17 -0
- package/proto/sentinel/node/v3/events.proto +50 -0
- package/proto/sentinel/node/v3/genesis.proto +15 -0
- package/proto/sentinel/node/v3/msg.proto +63 -0
- package/proto/sentinel/node/v3/node.proto +27 -0
- package/proto/sentinel/node/v3/params.proto +21 -0
- package/proto/sentinel/node/v3/querier.proto +63 -0
- package/proto/sentinel/plan/v3/events.proto +41 -0
- package/proto/sentinel/plan/v3/genesis.proto +21 -0
- package/proto/sentinel/plan/v3/msg.proto +83 -0
- package/proto/sentinel/plan/v3/plan.proto +32 -0
- package/proto/sentinel/plan/v3/querier.proto +53 -0
- package/proto/sentinel/provider/v2/events.proto +16 -0
- package/proto/sentinel/provider/v2/genesis.proto +15 -0
- package/proto/sentinel/provider/v2/msg.proto +35 -0
- package/proto/sentinel/provider/v2/params.proto +17 -0
- package/proto/sentinel/provider/v2/provider.proto +24 -0
- package/proto/sentinel/provider/v3/genesis.proto +15 -0
- package/proto/sentinel/provider/v3/params.proto +13 -0
- package/proto/sentinel/session/v3/events.proto +30 -0
- package/proto/sentinel/session/v3/genesis.proto +15 -0
- package/proto/sentinel/session/v3/msg.proto +50 -0
- package/proto/sentinel/session/v3/params.proto +25 -0
- package/proto/sentinel/session/v3/proof.proto +25 -0
- package/proto/sentinel/session/v3/querier.proto +100 -0
- package/proto/sentinel/session/v3/session.proto +50 -0
- package/proto/sentinel/subscription/v2/allocation.proto +21 -0
- package/proto/sentinel/subscription/v2/payout.proto +22 -0
- package/proto/sentinel/subscription/v3/events.proto +65 -0
- package/proto/sentinel/subscription/v3/genesis.proto +17 -0
- package/proto/sentinel/subscription/v3/msg.proto +83 -0
- package/proto/sentinel/subscription/v3/params.proto +21 -0
- package/proto/sentinel/subscription/v3/subscription.proto +33 -0
- package/proto/sentinel/types/v1/bandwidth.proto +19 -0
- package/proto/sentinel/types/v1/price.proto +21 -0
- package/proto/sentinel/types/v1/renewal.proto +21 -0
- package/proto/sentinel/types/v1/status.proto +16 -0
- package/protocol/encoding.js +341 -0
- package/protocol/events.js +361 -0
- package/protocol/handshake.js +297 -0
- package/protocol/index.js +15 -0
- package/protocol/messages.js +346 -0
- package/protocol/plans.js +199 -0
- package/protocol/v2ray.js +268 -0
- package/protocol/v3.js +723 -0
- package/protocol/wireguard.js +125 -0
- package/security/index.js +132 -0
- package/session-manager.js +329 -0
- package/session-tracker.js +80 -0
- package/setup.js +376 -0
- package/speedtest/index.js +528 -0
- package/speedtest.js +567 -0
- package/src/client.ts +502 -0
- package/src/index.ts +20 -0
- package/state/index.js +347 -0
- package/state.js +516 -0
- package/test-all-chain-ops.js +493 -0
- package/test-all-logic.js +199 -0
- package/test-all-msg-types.js +292 -0
- package/test-every-connection.js +208 -0
- package/test-feegrant-connect.js +98 -0
- package/test-logic.js +148 -0
- package/test-mainnet.js +176 -0
- package/test-plan-lifecycle.js +335 -0
- package/tls-trust.js +132 -0
- package/tsconfig.build.json +20 -0
- package/tsconfig.json +34 -0
- package/types/chain.d.ts +746 -0
- package/types/connection.d.ts +425 -0
- package/types/errors.d.ts +174 -0
- package/types/index.d.ts +1380 -0
- package/types/nodes.d.ts +187 -0
- package/types/pricing.d.ts +156 -0
- package/types/protocol.d.ts +332 -0
- package/types/session.d.ts +236 -0
- package/types/settings.d.ts +192 -0
- package/v3protocol.js +1053 -0
- package/wallet/index.js +153 -0
- package/wireguard.js +307 -0
package/node-connect.js
ADDED
|
@@ -0,0 +1,3015 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Node Connection Orchestration for Sentinel dVPN
|
|
3
|
+
*
|
|
4
|
+
* Complete flows for connecting to a Sentinel node via WireGuard or V2Ray.
|
|
5
|
+
* Handles: query → pay → handshake → tunnel setup → speed test.
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* import { connectDirect, connectViaPlan, queryOnlineNodes, disconnect } from './node-connect.js';
|
|
9
|
+
*
|
|
10
|
+
* // Direct pay-per-GB connection (full tunnel — changes your IP)
|
|
11
|
+
* const conn = await connectDirect({ mnemonic, nodeAddress, rpcUrl, lcdUrl, v2rayExePath });
|
|
12
|
+
*
|
|
13
|
+
* // With progress callback
|
|
14
|
+
* const conn = await connectDirect({
|
|
15
|
+
* mnemonic, nodeAddress, rpcUrl, lcdUrl, v2rayExePath,
|
|
16
|
+
* onProgress: (step, detail) => console.log(`[${step}] ${detail}`),
|
|
17
|
+
* });
|
|
18
|
+
*
|
|
19
|
+
* // Connection via existing plan
|
|
20
|
+
* const conn = await connectViaPlan({ mnemonic, planId, nodeAddress, rpcUrl, lcdUrl, v2rayExePath });
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
import https from 'https';
|
|
24
|
+
import { EventEmitter } from 'events';
|
|
25
|
+
import axios from 'axios';
|
|
26
|
+
import { execSync, execFileSync, spawn } from 'child_process';
|
|
27
|
+
import { writeFileSync, mkdirSync, existsSync, unlinkSync } from 'fs';
|
|
28
|
+
import { createServer } from 'net';
|
|
29
|
+
import path from 'path';
|
|
30
|
+
import os from 'os';
|
|
31
|
+
|
|
32
|
+
// axios adapter set in defaults.js (imported below) — prevents undici "fetch failed" on Node.js v18+.
|
|
33
|
+
|
|
34
|
+
import {
|
|
35
|
+
createWallet, privKeyFromMnemonic, createClient, broadcast, broadcastWithFeeGrant,
|
|
36
|
+
extractId, findExistingSession, getBalance, MSG_TYPES, resolveNodeUrl,
|
|
37
|
+
fetchActiveNodes, filterNodes, queryNode, buildEndSessionMsg,
|
|
38
|
+
} from './cosmjs-setup.js';
|
|
39
|
+
|
|
40
|
+
import {
|
|
41
|
+
nodeStatusV3, generateWgKeyPair, initHandshakeV3,
|
|
42
|
+
writeWgConfig, generateV2RayUUID, initHandshakeV3V2Ray,
|
|
43
|
+
buildV2RayClientConfig, extractSessionId, waitForPort,
|
|
44
|
+
} from './v3protocol.js';
|
|
45
|
+
|
|
46
|
+
import { installWgTunnel, disconnectWireGuard, emergencyCleanupSync, WG_AVAILABLE, IS_ADMIN } from './wireguard.js';
|
|
47
|
+
import { speedtestViaSocks5, speedtestDirect, resolveSpeedtestIPs, flushSpeedTestDnsCache } from './speedtest.js';
|
|
48
|
+
import { saveState, clearState, recoverOrphans, markSessionPoisoned, markSessionActive, isSessionPoisoned, saveCredentials, loadCredentials, clearCredentials } from './state.js';
|
|
49
|
+
import {
|
|
50
|
+
DEFAULT_RPC, DEFAULT_LCD, RPC_ENDPOINTS, LCD_ENDPOINTS,
|
|
51
|
+
BROKEN_NODES, tryWithFallback, LAST_VERIFIED, DEFAULT_TIMEOUTS, sleep,
|
|
52
|
+
recordTransportResult, resolveDnsServers,
|
|
53
|
+
} from './defaults.js';
|
|
54
|
+
import {
|
|
55
|
+
SentinelError, ValidationError, NodeError, ChainError, TunnelError, ErrorCodes,
|
|
56
|
+
} from './errors.js';
|
|
57
|
+
import { createNodeHttpsAgent, publicEndpointAgent } from './tls-trust.js';
|
|
58
|
+
|
|
59
|
+
// CA-validated agent for LCD/RPC public endpoints (valid CA certs)
|
|
60
|
+
const httpsAgent = publicEndpointAgent;
|
|
61
|
+
|
|
62
|
+
// ─── Event Emitter ───────────────────────────────────────────────────────────
|
|
63
|
+
// Subscribe to SDK lifecycle events without polling:
|
|
64
|
+
// import { events } from './node-connect.js';
|
|
65
|
+
// events.on('connected', ({ sessionId, serviceType }) => updateUI());
|
|
66
|
+
// events.on('disconnected', ({ reason }) => showNotification());
|
|
67
|
+
// events.on('progress', ({ step, detail }) => updateProgressBar());
|
|
68
|
+
|
|
69
|
+
export const events = new EventEmitter();
|
|
70
|
+
|
|
71
|
+
// ─── Cleanup Safety ──────────────────────────────────────────────────────────
|
|
72
|
+
// Track whether registerCleanupHandlers() has been called. If a developer calls
|
|
73
|
+
// connect() without registering, they risk orphaning WireGuard adapters or V2Ray
|
|
74
|
+
// processes on crash/SIGINT — the "Dead Internet" bug.
|
|
75
|
+
let _cleanupRegistered = false;
|
|
76
|
+
let _cleanupWarned = false;
|
|
77
|
+
|
|
78
|
+
function warnIfNoCleanup(fnName) {
|
|
79
|
+
if (!_cleanupRegistered) {
|
|
80
|
+
throw new SentinelError(ErrorCodes.INVALID_OPTIONS,
|
|
81
|
+
`${fnName}() called without registerCleanupHandlers(). ` +
|
|
82
|
+
`If your app crashes, WireGuard/V2Ray tunnels will orphan and kill the user's internet. ` +
|
|
83
|
+
`Call registerCleanupHandlers() once at app startup, or use quickConnect() which does it automatically.`
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// ─── Connection Mutex ─────────────────────────────────────────────────────────
|
|
89
|
+
// v27: Prevent concurrent connection attempts (backported from C# SemaphoreSlim).
|
|
90
|
+
// Only one connect call may be in-flight at a time. quickConnect inherits via connectAuto.
|
|
91
|
+
let _connectLock = false;
|
|
92
|
+
|
|
93
|
+
// v30: Abort flag — disconnect() sets this to stop a running connectAuto() retry loop.
|
|
94
|
+
// Without this, disconnect() clears tunnel state but connectAuto() keeps retrying,
|
|
95
|
+
// paying for new sessions. The user cannot reconnect because _connectLock stays held.
|
|
96
|
+
let _abortConnect = false;
|
|
97
|
+
|
|
98
|
+
/** Check if a connection attempt is currently in progress. */
|
|
99
|
+
export function isConnecting() { return _connectLock; }
|
|
100
|
+
|
|
101
|
+
// ─── Connection State ─────────────────────────────────────────────────────────
|
|
102
|
+
// v22: Encapsulated state enables per-instance connections via SentinelClient.
|
|
103
|
+
// Module-level functions use _defaultState for backward compatibility.
|
|
104
|
+
|
|
105
|
+
export class ConnectionState {
|
|
106
|
+
constructor() {
|
|
107
|
+
this.v2rayProc = null;
|
|
108
|
+
this.wgTunnel = null;
|
|
109
|
+
this.systemProxy = false;
|
|
110
|
+
this.connection = null; // { nodeAddress, serviceType, sessionId, connectedAt, socksPort? }
|
|
111
|
+
this.savedProxyState = null;
|
|
112
|
+
this._mnemonic = null; // Stored for session-end TX on disconnect (zeroed after use)
|
|
113
|
+
_activeStates.add(this);
|
|
114
|
+
}
|
|
115
|
+
get isConnected() { return !!(this.v2rayProc || this.wgTunnel); }
|
|
116
|
+
destroy() { _activeStates.delete(this); }
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Global registry of active states — used by exit handlers to clean up all instances
|
|
120
|
+
const _activeStates = new Set();
|
|
121
|
+
const _defaultState = new ConnectionState();
|
|
122
|
+
|
|
123
|
+
// Default logger — can be overridden per-call via opts.log
|
|
124
|
+
let defaultLog = console.log;
|
|
125
|
+
|
|
126
|
+
// ─── Wallet Cache ────────────────────────────────────────────────────────────
|
|
127
|
+
// v21: Cache wallet derivation (BIP39 → SLIP-10 is CPU-bound, ~300ms).
|
|
128
|
+
// Same mnemonic always produces the same wallet — safe to cache.
|
|
129
|
+
// Keyed by full SHA256 of mnemonic to avoid storing the raw mnemonic.
|
|
130
|
+
|
|
131
|
+
import { sha256 as _sha256 } from '@cosmjs/crypto';
|
|
132
|
+
const _walletCache = new Map();
|
|
133
|
+
|
|
134
|
+
async function cachedCreateWallet(mnemonic) {
|
|
135
|
+
const key = Buffer.from(_sha256(Buffer.from(mnemonic))).toString('hex'); // full SHA256 — no truncation
|
|
136
|
+
if (_walletCache.has(key)) return _walletCache.get(key);
|
|
137
|
+
const result = await createWallet(mnemonic);
|
|
138
|
+
_walletCache.set(key, result);
|
|
139
|
+
return result;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/** Clear the wallet derivation cache. Call after disconnect to release key material from memory. */
|
|
143
|
+
export function clearWalletCache() {
|
|
144
|
+
_walletCache.clear();
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// ─── Circuit Breaker ─────────────────────────────────────────────────────────
|
|
148
|
+
// v22: Skip nodes that repeatedly fail. Resets after TTL expires.
|
|
149
|
+
// v25: Configurable threshold/TTL via configureCircuitBreaker().
|
|
150
|
+
|
|
151
|
+
const _circuitBreaker = new Map(); // address -> { count, lastFail }
|
|
152
|
+
let _cbTtl = 5 * 60_000; // default 5 minutes
|
|
153
|
+
let _cbThreshold = 3; // default 3 failures before tripping
|
|
154
|
+
|
|
155
|
+
function recordNodeFailure(address) {
|
|
156
|
+
const entry = _circuitBreaker.get(address) || { count: 0, lastFail: 0 };
|
|
157
|
+
entry.count++;
|
|
158
|
+
entry.lastFail = Date.now();
|
|
159
|
+
_circuitBreaker.set(address, entry);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
function isCircuitOpen(address) {
|
|
163
|
+
const entry = _circuitBreaker.get(address);
|
|
164
|
+
if (!entry) return false;
|
|
165
|
+
if (Date.now() - entry.lastFail > _cbTtl) {
|
|
166
|
+
_circuitBreaker.delete(address);
|
|
167
|
+
return false;
|
|
168
|
+
}
|
|
169
|
+
return entry.count >= _cbThreshold;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
export function resetCircuitBreaker(address) {
|
|
173
|
+
if (address) _circuitBreaker.delete(address);
|
|
174
|
+
else _circuitBreaker.clear();
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Configure circuit breaker thresholds globally.
|
|
179
|
+
* @param {{ threshold?: number, ttlMs?: number }} opts
|
|
180
|
+
*/
|
|
181
|
+
export function configureCircuitBreaker(opts = {}) {
|
|
182
|
+
if (opts.threshold != null) _cbThreshold = Math.max(1, Math.floor(opts.threshold));
|
|
183
|
+
if (opts.ttlMs != null) _cbTtl = Math.max(1000, Math.floor(opts.ttlMs));
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Get circuit breaker status for observability.
|
|
188
|
+
* @param {string} [address] - Specific node, or omit for all.
|
|
189
|
+
* @returns {object} Status per node: { count, lastFail, isOpen }
|
|
190
|
+
*/
|
|
191
|
+
export function getCircuitBreakerStatus(address) {
|
|
192
|
+
if (address) {
|
|
193
|
+
const entry = _circuitBreaker.get(address);
|
|
194
|
+
if (!entry) return null;
|
|
195
|
+
return { count: entry.count, lastFail: entry.lastFail, isOpen: isCircuitOpen(address) };
|
|
196
|
+
}
|
|
197
|
+
const result = {};
|
|
198
|
+
for (const [addr, entry] of _circuitBreaker) {
|
|
199
|
+
result[addr] = { count: entry.count, lastFail: entry.lastFail, isOpen: isCircuitOpen(addr) };
|
|
200
|
+
}
|
|
201
|
+
return result;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// ─── Connection Metrics (v25) ────────────────────────────────────────────────
|
|
205
|
+
// Track per-node connection stats for reliability tracking over time.
|
|
206
|
+
|
|
207
|
+
const _connectionMetrics = new Map(); // nodeAddress -> { attempts, successes, failures, avgTimeMs, lastAttempt }
|
|
208
|
+
|
|
209
|
+
function _recordMetric(nodeAddress, success, durationMs) {
|
|
210
|
+
const entry = _connectionMetrics.get(nodeAddress) || { attempts: 0, successes: 0, failures: 0, totalTimeMs: 0, lastAttempt: 0 };
|
|
211
|
+
entry.attempts++;
|
|
212
|
+
if (success) entry.successes++; else entry.failures++;
|
|
213
|
+
entry.totalTimeMs += durationMs || 0;
|
|
214
|
+
entry.lastAttempt = Date.now();
|
|
215
|
+
_connectionMetrics.set(nodeAddress, entry);
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
/**
|
|
219
|
+
* Get connection metrics for observability.
|
|
220
|
+
* @param {string} [nodeAddress] - Specific node, or omit for all.
|
|
221
|
+
* @returns {object} Per-node stats: { attempts, successes, failures, successRate, avgTimeMs, lastAttempt }
|
|
222
|
+
*/
|
|
223
|
+
export function getConnectionMetrics(nodeAddress) {
|
|
224
|
+
const format = (entry) => ({
|
|
225
|
+
...entry,
|
|
226
|
+
successRate: entry.attempts > 0 ? entry.successes / entry.attempts : 0,
|
|
227
|
+
avgTimeMs: entry.attempts > 0 ? Math.round(entry.totalTimeMs / entry.attempts) : 0,
|
|
228
|
+
});
|
|
229
|
+
if (nodeAddress) {
|
|
230
|
+
const entry = _connectionMetrics.get(nodeAddress);
|
|
231
|
+
return entry ? format(entry) : null;
|
|
232
|
+
}
|
|
233
|
+
const result = {};
|
|
234
|
+
for (const [addr, entry] of _connectionMetrics) result[addr] = format(entry);
|
|
235
|
+
return result;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// ─── Node List Cache ─────────────────────────────────────────────────────────
|
|
239
|
+
// v21: Cache queryOnlineNodes results for 5 minutes. Returns cached results
|
|
240
|
+
// immediately on repeat calls and refreshes in background if stale.
|
|
241
|
+
// v25: Deduplicated concurrent refreshes + flushNodeCache() export.
|
|
242
|
+
|
|
243
|
+
const NODE_CACHE_TTL = 5 * 60_000; // 5 minutes
|
|
244
|
+
let _nodeCache = null; // { nodes, timestamp, key }
|
|
245
|
+
let _inflightRefresh = null; // Promise — prevents duplicate concurrent refreshes
|
|
246
|
+
|
|
247
|
+
/** Clear the node list cache. Next queryOnlineNodes() call will fetch fresh data. */
|
|
248
|
+
export function flushNodeCache() {
|
|
249
|
+
_nodeCache = null;
|
|
250
|
+
_inflightRefresh = null;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// ─── Abort helper ────────────────────────────────────────────────────────────
|
|
254
|
+
|
|
255
|
+
function checkAborted(signal) {
|
|
256
|
+
if (signal?.aborted) {
|
|
257
|
+
throw new SentinelError(ErrorCodes.ABORTED, 'Connection aborted', { reason: signal.reason });
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// ─── Progress helper ─────────────────────────────────────────────────────────
|
|
262
|
+
|
|
263
|
+
function progress(cb, logFn, step, detail, meta = {}) {
|
|
264
|
+
const entry = { event: `sdk.${step}`, detail, ts: Date.now(), ...meta };
|
|
265
|
+
events.emit('progress', entry);
|
|
266
|
+
if (logFn) try { logFn(`[${step}] ${detail}`); } catch {} // user callback may throw — don't crash SDK
|
|
267
|
+
if (cb) try { cb(step, detail, entry); } catch {} // user callback may throw — don't crash SDK
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// ─── Node Inactive Retry Helper ──────────────────────────────────────────────
|
|
271
|
+
// LCD may show node as active, but chain rejects TX with code 105 ("invalid
|
|
272
|
+
// status inactive") if the node went offline between query and payment.
|
|
273
|
+
// Retry once after 15s in case LCD data was stale.
|
|
274
|
+
|
|
275
|
+
function _isNodeInactiveError(err) {
|
|
276
|
+
const msg = String(err?.message || '');
|
|
277
|
+
const code = err?.details?.code;
|
|
278
|
+
return msg.includes('invalid status inactive') || code === 105;
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
async function broadcastWithInactiveRetry(client, address, msgs, logFn, onProgress) {
|
|
282
|
+
try {
|
|
283
|
+
return await broadcast(client, address, msgs);
|
|
284
|
+
} catch (err) {
|
|
285
|
+
if (_isNodeInactiveError(err)) {
|
|
286
|
+
progress(onProgress, logFn, 'session', 'Node reported inactive (code 105) — LCD stale data. Retrying in 15s...');
|
|
287
|
+
await sleep(15000);
|
|
288
|
+
try {
|
|
289
|
+
return await broadcast(client, address, msgs);
|
|
290
|
+
} catch (retryErr) {
|
|
291
|
+
if (_isNodeInactiveError(retryErr)) {
|
|
292
|
+
throw new NodeError(ErrorCodes.NODE_INACTIVE, 'Node went inactive between query and payment (code 105). LCD stale data confirmed after retry.', {
|
|
293
|
+
original: retryErr.message,
|
|
294
|
+
code: 105,
|
|
295
|
+
});
|
|
296
|
+
}
|
|
297
|
+
throw retryErr;
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
throw err;
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
// ─── System Proxy (for V2Ray SOCKS5) ─────────────────────────────────────────
|
|
305
|
+
|
|
306
|
+
const WIN_REG = 'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings';
|
|
307
|
+
|
|
308
|
+
/**
|
|
309
|
+
* Set system SOCKS proxy so browser/system traffic goes through V2Ray.
|
|
310
|
+
* Windows: registry (Internet Settings). macOS: networksetup. Linux: gsettings (GNOME).
|
|
311
|
+
*/
|
|
312
|
+
export function setSystemProxy(socksPort, state) {
|
|
313
|
+
const _state = state || _defaultState;
|
|
314
|
+
const port = String(Math.floor(Number(socksPort))); // sanitize to numeric string
|
|
315
|
+
try {
|
|
316
|
+
if (process.platform === 'win32') {
|
|
317
|
+
// Backup current proxy state before modifying (restored in clearSystemProxy)
|
|
318
|
+
try {
|
|
319
|
+
const enableOut = execFileSync('reg', ['query', WIN_REG, '/v', 'ProxyEnable'], { encoding: 'utf8', stdio: 'pipe' });
|
|
320
|
+
let serverOut = '';
|
|
321
|
+
try { serverOut = execFileSync('reg', ['query', WIN_REG, '/v', 'ProxyServer'], { encoding: 'utf8', stdio: 'pipe' }); } catch {}
|
|
322
|
+
_state.savedProxyState = { platform: 'win32', enableOut, serverOut };
|
|
323
|
+
} catch {
|
|
324
|
+
_state.savedProxyState = { platform: 'win32', enableOut: '', serverOut: '' };
|
|
325
|
+
}
|
|
326
|
+
execFileSync('reg', ['add', WIN_REG, '/v', 'ProxyEnable', '/t', 'REG_DWORD', '/d', '1', '/f'], { stdio: 'pipe' });
|
|
327
|
+
execFileSync('reg', ['add', WIN_REG, '/v', 'ProxyServer', '/t', 'REG_SZ', '/d', `socks=127.0.0.1:${port}`, '/f'], { stdio: 'pipe' });
|
|
328
|
+
} else if (process.platform === 'darwin') {
|
|
329
|
+
// macOS: set SOCKS proxy on all network services
|
|
330
|
+
const services = execFileSync('networksetup', ['-listallnetworkservices'], { encoding: 'utf8', stdio: 'pipe' })
|
|
331
|
+
.split('\n').filter(s => s && !s.startsWith('*') && !s.startsWith('An asterisk'));
|
|
332
|
+
for (const svc of services) {
|
|
333
|
+
try { execFileSync('networksetup', ['-setsocksfirewallproxy', svc, '127.0.0.1', port], { stdio: 'pipe' }); } catch {}
|
|
334
|
+
try { execFileSync('networksetup', ['-setsocksfirewallproxystate', svc, 'on'], { stdio: 'pipe' }); } catch {}
|
|
335
|
+
}
|
|
336
|
+
} else {
|
|
337
|
+
// Linux: GNOME gsettings (most common desktop)
|
|
338
|
+
try {
|
|
339
|
+
execFileSync('gsettings', ['set', 'org.gnome.system.proxy', 'mode', 'manual'], { stdio: 'pipe' });
|
|
340
|
+
execFileSync('gsettings', ['set', 'org.gnome.system.proxy.socks', 'host', '127.0.0.1'], { stdio: 'pipe' });
|
|
341
|
+
execFileSync('gsettings', ['set', 'org.gnome.system.proxy.socks', 'port', port], { stdio: 'pipe' });
|
|
342
|
+
} catch {} // gsettings not available (headless/non-GNOME) — silent no-op
|
|
343
|
+
}
|
|
344
|
+
_state.systemProxy = true;
|
|
345
|
+
} catch (e) { console.warn('[sentinel-sdk] setSystemProxy warning:', e.message); }
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
/**
|
|
349
|
+
* Clear system proxy. Always call on disconnect/exit.
|
|
350
|
+
* Safe to call multiple times.
|
|
351
|
+
*/
|
|
352
|
+
export function clearSystemProxy(state) {
|
|
353
|
+
const _state = state || _defaultState;
|
|
354
|
+
try {
|
|
355
|
+
if (process.platform === 'win32') {
|
|
356
|
+
if (_state.savedProxyState?.platform === 'win32' && _state.savedProxyState.enableOut.includes('0x1') && _state.savedProxyState.serverOut) {
|
|
357
|
+
// User had a proxy before — restore their previous ProxyServer value
|
|
358
|
+
const match = _state.savedProxyState.serverOut.match(/ProxyServer\s+REG_SZ\s+(.+)/);
|
|
359
|
+
if (match) {
|
|
360
|
+
execFileSync('reg', ['add', WIN_REG, '/v', 'ProxyServer', '/t', 'REG_SZ', '/d', match[1].trim(), '/f'], { stdio: 'pipe' });
|
|
361
|
+
} else {
|
|
362
|
+
execFileSync('reg', ['delete', WIN_REG, '/v', 'ProxyServer', '/f'], { stdio: 'pipe' });
|
|
363
|
+
}
|
|
364
|
+
// Keep ProxyEnable=1 since they had it on
|
|
365
|
+
} else {
|
|
366
|
+
// User had no proxy before (or no backup) — disable
|
|
367
|
+
execFileSync('reg', ['add', WIN_REG, '/v', 'ProxyEnable', '/t', 'REG_DWORD', '/d', '0', '/f'], { stdio: 'pipe' });
|
|
368
|
+
try { execFileSync('reg', ['delete', WIN_REG, '/v', 'ProxyServer', '/f'], { stdio: 'pipe' }); } catch {} // may not exist
|
|
369
|
+
}
|
|
370
|
+
} else if (process.platform === 'darwin') {
|
|
371
|
+
const services = execFileSync('networksetup', ['-listallnetworkservices'], { encoding: 'utf8', stdio: 'pipe' })
|
|
372
|
+
.split('\n').filter(s => s && !s.startsWith('*') && !s.startsWith('An asterisk'));
|
|
373
|
+
for (const svc of services) {
|
|
374
|
+
try { execFileSync('networksetup', ['-setsocksfirewallproxystate', svc, 'off'], { stdio: 'pipe' }); } catch {}
|
|
375
|
+
}
|
|
376
|
+
} else {
|
|
377
|
+
try { execFileSync('gsettings', ['set', 'org.gnome.system.proxy', 'mode', 'none'], { stdio: 'pipe' }); } catch {} // gsettings unavailable — headless/non-GNOME
|
|
378
|
+
}
|
|
379
|
+
} catch (e) { console.warn('[sentinel-sdk] clearSystemProxy warning:', e.message); }
|
|
380
|
+
_state.systemProxy = false;
|
|
381
|
+
_state.savedProxyState = null;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// ─── Query Nodes ─────────────────────────────────────────────────────────────
|
|
385
|
+
|
|
386
|
+
/**
|
|
387
|
+
* Fetch active nodes from LCD and check which are actually online.
|
|
388
|
+
* Returns array sorted by quality score (best first).
|
|
389
|
+
*
|
|
390
|
+
* Built-in quality scoring (from 400+ node tests):
|
|
391
|
+
* - WireGuard nodes scored higher than V2Ray (simpler tunnel, fewer failure modes)
|
|
392
|
+
* - V2Ray with grpc/tls deprioritized (0% success rate in testing)
|
|
393
|
+
* - High clock drift nodes penalized (VMess fails silently at >120s)
|
|
394
|
+
* - Nodes with fewer peers scored higher (less congestion)
|
|
395
|
+
*
|
|
396
|
+
* @param {object} options
|
|
397
|
+
* @param {string} options.lcdUrl - LCD endpoint (default: https://lcd.sentinel.co)
|
|
398
|
+
* @param {string} options.serviceType - Filter: 'wireguard' | 'v2ray' | null (both)
|
|
399
|
+
* @param {number} options.maxNodes - Max nodes to check online status (default: 100)
|
|
400
|
+
* @param {number} options.concurrency - Parallel online checks (default: 20)
|
|
401
|
+
* @param {boolean} options.sort - Sort by quality score, best first (default: true). Set false for random order.
|
|
402
|
+
*/
|
|
403
|
+
export async function queryOnlineNodes(options = {}) {
|
|
404
|
+
// v25: waitForFresh skips cache entirely
|
|
405
|
+
if (options.waitForFresh) {
|
|
406
|
+
const nodes = await _queryOnlineNodesImpl(options);
|
|
407
|
+
_nodeCache = { nodes, timestamp: Date.now(), key: `${options.lcdUrl || 'default'}_${options.serviceType || 'all'}_${options.maxNodes || 100}` };
|
|
408
|
+
return nodes;
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
// v21: Node cache — return cached results if fresh, background-refresh if stale
|
|
412
|
+
const cacheKey = `${options.lcdUrl || 'default'}_${options.serviceType || 'all'}_${options.maxNodes || 100}`;
|
|
413
|
+
if (!options.noCache && _nodeCache && _nodeCache.key === cacheKey && Date.now() - _nodeCache.timestamp < NODE_CACHE_TTL) {
|
|
414
|
+
// Cache hit — fire deduplicated background refresh but return instantly
|
|
415
|
+
if (!_inflightRefresh) {
|
|
416
|
+
_inflightRefresh = _queryOnlineNodesImpl(options).then(nodes => {
|
|
417
|
+
_nodeCache = { nodes, timestamp: Date.now(), key: cacheKey };
|
|
418
|
+
}).catch(e => {
|
|
419
|
+
if (typeof console !== 'undefined') console.warn('[sentinel-sdk] Node cache refresh failed:', e.message);
|
|
420
|
+
}).finally(() => { _inflightRefresh = null; });
|
|
421
|
+
}
|
|
422
|
+
return _nodeCache.nodes;
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
// No cache — deduplicate concurrent cold fetches
|
|
426
|
+
if (!_inflightRefresh) {
|
|
427
|
+
_inflightRefresh = _queryOnlineNodesImpl(options).then(nodes => {
|
|
428
|
+
_nodeCache = { nodes, timestamp: Date.now(), key: cacheKey };
|
|
429
|
+
return nodes;
|
|
430
|
+
}).finally(() => { _inflightRefresh = null; });
|
|
431
|
+
}
|
|
432
|
+
const nodes = await _inflightRefresh;
|
|
433
|
+
return nodes || _nodeCache?.nodes || [];
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
async function _queryOnlineNodesImpl(options = {}) {
|
|
437
|
+
const maxNodes = options.maxNodes || 5000; // v25b: raised from 100 — chain has 1000+ nodes
|
|
438
|
+
const concurrency = options.concurrency || 20;
|
|
439
|
+
const shouldSort = options.sort !== false; // default true
|
|
440
|
+
const logFn = options.log || null;
|
|
441
|
+
const brokenAddrs = new Set(BROKEN_NODES.map(n => n.address));
|
|
442
|
+
|
|
443
|
+
// 1. Fetch ALL active nodes from LCD — uses lcdPaginatedSafe (handles broken pagination)
|
|
444
|
+
let nodes = [];
|
|
445
|
+
if (options.lcdUrl) {
|
|
446
|
+
nodes = await fetchActiveNodes(options.lcdUrl);
|
|
447
|
+
} else {
|
|
448
|
+
const { result } = await tryWithFallback(LCD_ENDPOINTS, fetchActiveNodes, 'LCD node list');
|
|
449
|
+
nodes = result;
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
// Resolve remote_addrs → remote_url (LCD v3 returns "IP:PORT" array, not "https://..." string)
|
|
453
|
+
nodes = nodes.map(n => {
|
|
454
|
+
try { n.remote_url = resolveNodeUrl(n); } catch { n.remote_url = null; }
|
|
455
|
+
return n;
|
|
456
|
+
});
|
|
457
|
+
|
|
458
|
+
// Filter: must accept udvpn, must have URL, skip known broken nodes (verified ${LAST_VERIFIED})
|
|
459
|
+
nodes = nodes.filter(n =>
|
|
460
|
+
n.remote_url &&
|
|
461
|
+
!brokenAddrs.has(n.address) &&
|
|
462
|
+
(n.gigabyte_prices || []).some(p => p.denom === 'udvpn')
|
|
463
|
+
);
|
|
464
|
+
|
|
465
|
+
// Warn if maxNodes truncates results
|
|
466
|
+
if (maxNodes < nodes.length && logFn) {
|
|
467
|
+
logFn(`[queryOnlineNodes] Warning: ${nodes.length} nodes on chain, returning ${maxNodes} (capped by maxNodes)`);
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
// Shuffle and limit
|
|
471
|
+
// Fisher-Yates shuffle (unbiased)
|
|
472
|
+
for (let i = nodes.length - 1; i > 0; i--) {
|
|
473
|
+
const j = Math.floor(Math.random() * (i + 1));
|
|
474
|
+
[nodes[i], nodes[j]] = [nodes[j], nodes[i]];
|
|
475
|
+
}
|
|
476
|
+
nodes = nodes.slice(0, maxNodes);
|
|
477
|
+
|
|
478
|
+
// 2. Check online status in parallel batches
|
|
479
|
+
const online = [];
|
|
480
|
+
let probed = 0;
|
|
481
|
+
const onNodeProbed = options.onNodeProbed; // callback: ({ total, probed, online }) => void
|
|
482
|
+
for (let i = 0; i < nodes.length; i += concurrency) {
|
|
483
|
+
const batch = nodes.slice(i, i + concurrency);
|
|
484
|
+
const results = await Promise.allSettled(
|
|
485
|
+
batch.map(async (node) => {
|
|
486
|
+
const status = await nodeStatusV3(node.remote_url);
|
|
487
|
+
if (options.serviceType && status.type !== options.serviceType) return null;
|
|
488
|
+
return {
|
|
489
|
+
address: node.address,
|
|
490
|
+
remoteUrl: node.remote_url,
|
|
491
|
+
serviceType: status.type,
|
|
492
|
+
moniker: status.moniker,
|
|
493
|
+
country: status.location.country,
|
|
494
|
+
city: status.location.city,
|
|
495
|
+
peers: status.peers,
|
|
496
|
+
clockDriftSec: status.clockDriftSec,
|
|
497
|
+
gigabytePrices: node.gigabyte_prices,
|
|
498
|
+
hourlyPrices: node.hourly_prices,
|
|
499
|
+
qualityScore: scoreNode(status),
|
|
500
|
+
};
|
|
501
|
+
})
|
|
502
|
+
);
|
|
503
|
+
for (const r of results) {
|
|
504
|
+
if (r.status === 'fulfilled' && r.value) online.push(r.value);
|
|
505
|
+
}
|
|
506
|
+
probed += batch.length;
|
|
507
|
+
if (onNodeProbed) try { onNodeProbed({ total: nodes.length, probed, online: online.length }); } catch {}
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
// 3. Sort by quality score (best first) unless disabled
|
|
511
|
+
if (shouldSort) {
|
|
512
|
+
online.sort((a, b) => b.qualityScore - a.qualityScore);
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
return online;
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
// ─── Full Node Catalog (LCD only, no per-node status checks) ────────────────
|
|
519
|
+
|
|
520
|
+
/**
|
|
521
|
+
* Fetch ALL active nodes from the LCD. No per-node HTTP checks — instant.
|
|
522
|
+
*
|
|
523
|
+
* Returns every node that accepts udvpn, with LCD data only:
|
|
524
|
+
* address, remote_url, gigabyte_prices, hourly_prices.
|
|
525
|
+
*
|
|
526
|
+
* Use this for: building node lists/maps, country pickers, price comparisons.
|
|
527
|
+
* Use queryOnlineNodes() when you need verified online status + quality scores.
|
|
528
|
+
*
|
|
529
|
+
* @param {object} [options]
|
|
530
|
+
* @param {string} [options.lcdUrl] - LCD endpoint (uses fallback chain if omitted)
|
|
531
|
+
* @returns {Promise<Array>} All active nodes (900+)
|
|
532
|
+
*/
|
|
533
|
+
export async function fetchAllNodes(options = {}) {
|
|
534
|
+
let nodes;
|
|
535
|
+
if (options.lcdUrl) {
|
|
536
|
+
nodes = await fetchActiveNodes(options.lcdUrl);
|
|
537
|
+
} else {
|
|
538
|
+
const { result } = await tryWithFallback(
|
|
539
|
+
LCD_ENDPOINTS,
|
|
540
|
+
async (url) => fetchActiveNodes(url),
|
|
541
|
+
'LCD full node list',
|
|
542
|
+
);
|
|
543
|
+
nodes = result;
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
// Filter: must accept udvpn, must have a resolvable URL
|
|
547
|
+
return nodes.filter(n =>
|
|
548
|
+
n.remote_url &&
|
|
549
|
+
(n.gigabyte_prices || []).some(p => p.denom === 'udvpn')
|
|
550
|
+
);
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
/**
|
|
554
|
+
* Build a geographic index from a node list for instant country/city lookups.
|
|
555
|
+
*
|
|
556
|
+
* Requires enriched nodes (with country/city fields from nodeStatusV3).
|
|
557
|
+
* For LCD-only nodes, call enrichNodes() first.
|
|
558
|
+
*
|
|
559
|
+
* @param {Array} nodes - Array of node objects with country/city fields
|
|
560
|
+
* @returns {{ countries: Object, cities: Object, stats: Object }}
|
|
561
|
+
* - countries: { "Germany": [node, ...], "United States": [...] }
|
|
562
|
+
* - cities: { "Berlin": [node, ...], "New York": [...] }
|
|
563
|
+
* - stats: { totalNodes, totalCountries, totalCities, byCountry: [{country, count}] }
|
|
564
|
+
*/
|
|
565
|
+
export function buildNodeIndex(nodes) {
|
|
566
|
+
const countries = {};
|
|
567
|
+
const cities = {};
|
|
568
|
+
|
|
569
|
+
for (const node of nodes) {
|
|
570
|
+
const country = node.country || node.location?.country || 'Unknown';
|
|
571
|
+
const city = node.city || node.location?.city || 'Unknown';
|
|
572
|
+
|
|
573
|
+
if (!countries[country]) countries[country] = [];
|
|
574
|
+
countries[country].push(node);
|
|
575
|
+
|
|
576
|
+
const cityKey = city === 'Unknown' ? `${city} (${country})` : city;
|
|
577
|
+
if (!cities[cityKey]) cities[cityKey] = [];
|
|
578
|
+
cities[cityKey].push(node);
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
// Stats sorted by node count (most nodes first)
|
|
582
|
+
const byCountry = Object.entries(countries)
|
|
583
|
+
.map(([country, nodes]) => ({ country, count: nodes.length }))
|
|
584
|
+
.sort((a, b) => b.count - a.count);
|
|
585
|
+
|
|
586
|
+
return {
|
|
587
|
+
countries,
|
|
588
|
+
cities,
|
|
589
|
+
stats: {
|
|
590
|
+
totalNodes: nodes.length,
|
|
591
|
+
totalCountries: Object.keys(countries).length,
|
|
592
|
+
totalCities: Object.keys(cities).length,
|
|
593
|
+
byCountry,
|
|
594
|
+
},
|
|
595
|
+
};
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
/**
|
|
599
|
+
* Enrich LCD nodes with type/country/city by probing each node's status API.
|
|
600
|
+
*
|
|
601
|
+
* @param {Array} nodes - Raw LCD nodes from fetchAllNodes()
|
|
602
|
+
* @param {object} [options]
|
|
603
|
+
* @param {number} [options.concurrency=30] - Parallel probes
|
|
604
|
+
* @param {function} [options.onProgress] - Callback: ({ total, done, enriched }) => void
|
|
605
|
+
* @returns {Promise<Array>} Enriched nodes with serviceType, country, city, moniker, qualityScore
|
|
606
|
+
*/
|
|
607
|
+
export async function enrichNodes(nodes, options = {}) {
|
|
608
|
+
const concurrency = options.concurrency || 30;
|
|
609
|
+
const enriched = [];
|
|
610
|
+
let done = 0;
|
|
611
|
+
|
|
612
|
+
for (let i = 0; i < nodes.length; i += concurrency) {
|
|
613
|
+
const batch = nodes.slice(i, i + concurrency);
|
|
614
|
+
const results = await Promise.allSettled(
|
|
615
|
+
batch.map(async (node) => {
|
|
616
|
+
const status = await nodeStatusV3(node.remote_url);
|
|
617
|
+
return {
|
|
618
|
+
address: node.address,
|
|
619
|
+
remoteUrl: node.remote_url,
|
|
620
|
+
serviceType: status.type,
|
|
621
|
+
moniker: status.moniker,
|
|
622
|
+
country: status.location.country,
|
|
623
|
+
city: status.location.city,
|
|
624
|
+
peers: status.peers,
|
|
625
|
+
clockDriftSec: status.clockDriftSec,
|
|
626
|
+
gigabytePrices: node.gigabyte_prices,
|
|
627
|
+
hourlyPrices: node.hourly_prices,
|
|
628
|
+
qualityScore: scoreNode(status),
|
|
629
|
+
};
|
|
630
|
+
})
|
|
631
|
+
);
|
|
632
|
+
for (const r of results) {
|
|
633
|
+
if (r.status === 'fulfilled' && r.value) enriched.push(r.value);
|
|
634
|
+
}
|
|
635
|
+
done += batch.length;
|
|
636
|
+
if (options.onProgress) {
|
|
637
|
+
try { options.onProgress({ total: nodes.length, done, enriched: enriched.length }); } catch {}
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
return enriched;
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
/**
|
|
645
|
+
* Score a node's expected connection quality (0-100).
|
|
646
|
+
* Based on real success rates from 400+ node tests.
|
|
647
|
+
* Higher = more likely to produce a working tunnel.
|
|
648
|
+
*/
|
|
649
|
+
function scoreNode(status) {
|
|
650
|
+
let score = 50; // baseline
|
|
651
|
+
|
|
652
|
+
// WireGuard is simpler and more reliable than V2Ray
|
|
653
|
+
if (status.type === 'wireguard') score += 20;
|
|
654
|
+
|
|
655
|
+
// Clock drift penalty — VMess fails at >120s, VLess is immune.
|
|
656
|
+
// We can't know VMess vs VLess until handshake, but high drift is still risky.
|
|
657
|
+
if (status.clockDriftSec !== null) {
|
|
658
|
+
const drift = Math.abs(status.clockDriftSec);
|
|
659
|
+
if (drift > 120) score -= 40; // VMess will fail entirely (VLess OK but rare)
|
|
660
|
+
else if (drift > 60) score -= 15;
|
|
661
|
+
else if (drift > 30) score -= 5;
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
// Peer count — fewer peers = less congestion
|
|
665
|
+
if (status.peers !== undefined) {
|
|
666
|
+
if (status.peers === 0) score += 10; // empty node = fast
|
|
667
|
+
else if (status.peers < 5) score += 5;
|
|
668
|
+
else if (status.peers > 20) score -= 10;
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
return Math.max(0, Math.min(100, score));
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
// ─── Fast Reconnect (Credential Cache) ───────────────────────────────────────
|
|
675
|
+
|
|
676
|
+
/**
|
|
677
|
+
* Attempt fast reconnect using saved credentials. Skips payment and handshake.
|
|
678
|
+
* Returns null if no saved credentials, session expired, or tunnel setup fails.
|
|
679
|
+
*
|
|
680
|
+
* @param {object} opts - Same as connectDirect options
|
|
681
|
+
* @param {ConnectionState} [state] - Connection state instance
|
|
682
|
+
* @returns {Promise<object|null>} Connection result or null
|
|
683
|
+
*/
|
|
684
|
+
export async function tryFastReconnect(opts, state = _defaultState) {
|
|
685
|
+
const saved = loadCredentials(opts.nodeAddress);
|
|
686
|
+
if (!saved) return null;
|
|
687
|
+
|
|
688
|
+
const onProgress = opts.onProgress || null;
|
|
689
|
+
const logFn = opts.log || defaultLog;
|
|
690
|
+
const fullTunnel = opts.fullTunnel !== false;
|
|
691
|
+
const killSwitch = opts.killSwitch === true;
|
|
692
|
+
const systemProxy = opts.systemProxy === true;
|
|
693
|
+
const tlsTrust = opts.tlsTrust || 'tofu';
|
|
694
|
+
|
|
695
|
+
progress(onProgress, logFn, 'cache', `Found saved credentials for ${opts.nodeAddress}, verifying session...`);
|
|
696
|
+
|
|
697
|
+
// Verify session is still active on chain
|
|
698
|
+
try {
|
|
699
|
+
const lcd = opts.lcdUrl || DEFAULT_LCD;
|
|
700
|
+
const { wallet, account } = await cachedCreateWallet(opts.mnemonic);
|
|
701
|
+
const existingSession = await findExistingSession(lcd, account.address, opts.nodeAddress);
|
|
702
|
+
if (!existingSession || String(existingSession) !== saved.sessionId) {
|
|
703
|
+
clearCredentials(opts.nodeAddress);
|
|
704
|
+
progress(onProgress, logFn, 'cache', 'Saved session expired — proceeding with fresh payment');
|
|
705
|
+
return null;
|
|
706
|
+
}
|
|
707
|
+
} catch (err) {
|
|
708
|
+
// Chain query failed — can't verify session, fall back to normal flow
|
|
709
|
+
progress(onProgress, logFn, 'cache', `Session verification failed (${err.message}) — proceeding with fresh payment`);
|
|
710
|
+
clearCredentials(opts.nodeAddress);
|
|
711
|
+
return null;
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
progress(onProgress, logFn, 'cache', `Session ${saved.sessionId} still active — skipping payment and handshake`);
|
|
715
|
+
|
|
716
|
+
try {
|
|
717
|
+
if (saved.serviceType === 'wireguard') {
|
|
718
|
+
// Validate tunnel requirements
|
|
719
|
+
if (!WG_AVAILABLE) {
|
|
720
|
+
clearCredentials(opts.nodeAddress);
|
|
721
|
+
return null;
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
// Resolve split IPs
|
|
725
|
+
let resolvedSplitIPs = null;
|
|
726
|
+
if (opts.splitIPs && Array.isArray(opts.splitIPs) && opts.splitIPs.length > 0) {
|
|
727
|
+
resolvedSplitIPs = opts.splitIPs;
|
|
728
|
+
} else if (fullTunnel) {
|
|
729
|
+
resolvedSplitIPs = null;
|
|
730
|
+
} else {
|
|
731
|
+
try { resolvedSplitIPs = await resolveSpeedtestIPs(); } catch { resolvedSplitIPs = null; }
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
const confPath = writeWgConfig(
|
|
735
|
+
Buffer.from(saved.wgPrivateKey, 'base64'),
|
|
736
|
+
saved.wgAssignedAddrs,
|
|
737
|
+
saved.wgServerPubKey,
|
|
738
|
+
saved.wgServerEndpoint,
|
|
739
|
+
resolvedSplitIPs,
|
|
740
|
+
{ dns: resolveDnsServers(opts.dns) },
|
|
741
|
+
);
|
|
742
|
+
|
|
743
|
+
progress(onProgress, logFn, 'tunnel', 'Installing WireGuard tunnel from cached credentials...');
|
|
744
|
+
const installDelays = [1500, 1500, 2000];
|
|
745
|
+
let tunnelInstalled = false;
|
|
746
|
+
for (let i = 0; i < installDelays.length; i++) {
|
|
747
|
+
await sleep(installDelays[i]);
|
|
748
|
+
try {
|
|
749
|
+
await installWgTunnel(confPath);
|
|
750
|
+
state.wgTunnel = 'wgsent0';
|
|
751
|
+
tunnelInstalled = true;
|
|
752
|
+
break;
|
|
753
|
+
} catch (installErr) {
|
|
754
|
+
if (i === installDelays.length - 1) throw installErr;
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
// Verify connectivity
|
|
759
|
+
progress(onProgress, logFn, 'verify', 'Verifying tunnel connectivity...');
|
|
760
|
+
const tunnelWorks = await verifyWgConnectivity();
|
|
761
|
+
if (!tunnelWorks) {
|
|
762
|
+
try { await disconnectWireGuard(); } catch {}
|
|
763
|
+
state.wgTunnel = null;
|
|
764
|
+
clearCredentials(opts.nodeAddress);
|
|
765
|
+
return null;
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
if (killSwitch) {
|
|
769
|
+
try { enableKillSwitch(saved.wgServerEndpoint); } catch {}
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
progress(onProgress, logFn, 'verify', 'WireGuard reconnected from cached credentials!');
|
|
773
|
+
const sessionIdStr = saved.sessionId;
|
|
774
|
+
saveState({ sessionId: sessionIdStr, serviceType: 'wireguard', wgTunnelName: 'wgsent0', confPath, systemProxySet: false });
|
|
775
|
+
state.connection = { sessionId: sessionIdStr, serviceType: 'wireguard', nodeAddress: opts.nodeAddress, connectedAt: Date.now() };
|
|
776
|
+
events.emit('connected', { sessionId: BigInt(sessionIdStr), serviceType: 'wireguard', nodeAddress: opts.nodeAddress, cached: true });
|
|
777
|
+
return {
|
|
778
|
+
sessionId: sessionIdStr,
|
|
779
|
+
serviceType: 'wireguard',
|
|
780
|
+
nodeAddress: opts.nodeAddress,
|
|
781
|
+
confPath,
|
|
782
|
+
cached: true,
|
|
783
|
+
cleanup: async () => {
|
|
784
|
+
if (_killSwitchEnabled) disableKillSwitch();
|
|
785
|
+
try { await disconnectWireGuard(); } catch {}
|
|
786
|
+
// End session on chain (fire-and-forget)
|
|
787
|
+
if (saved.sessionId && state._mnemonic) {
|
|
788
|
+
_endSessionOnChain(saved.sessionId, state._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
789
|
+
}
|
|
790
|
+
state.wgTunnel = null;
|
|
791
|
+
state.connection = null;
|
|
792
|
+
state._mnemonic = null;
|
|
793
|
+
clearState();
|
|
794
|
+
},
|
|
795
|
+
};
|
|
796
|
+
|
|
797
|
+
} else if (saved.serviceType === 'v2ray') {
|
|
798
|
+
const v2rayExePath = findV2RayExe(opts.v2rayExePath);
|
|
799
|
+
if (!v2rayExePath) {
|
|
800
|
+
clearCredentials(opts.nodeAddress);
|
|
801
|
+
return null;
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
// Fetch node info to get serverHost
|
|
805
|
+
const nodeInfo = await queryNode(opts.nodeAddress, { lcdUrl: opts.lcdUrl || DEFAULT_LCD });
|
|
806
|
+
const serverHost = new URL(nodeInfo.remote_url).hostname;
|
|
807
|
+
|
|
808
|
+
// Rebuild V2Ray config from saved metadata
|
|
809
|
+
// Sequential increment from random start avoids repeated collisions
|
|
810
|
+
// with TIME_WAIT ports that pure random retries can hit.
|
|
811
|
+
const startPort1 = 10800 + Math.floor(Math.random() * 1000);
|
|
812
|
+
let socksPort = startPort1;
|
|
813
|
+
for (let i = 0; i < 5; i++) {
|
|
814
|
+
socksPort = startPort1 + i;
|
|
815
|
+
if (await checkPortFree(socksPort)) break;
|
|
816
|
+
}
|
|
817
|
+
const config = buildV2RayClientConfig(serverHost, saved.v2rayConfig, saved.v2rayUuid, socksPort, { dns: resolveDnsServers(opts.dns), systemProxy: opts.systemProxy === true });
|
|
818
|
+
|
|
819
|
+
const tmpDir = path.join(os.tmpdir(), 'sentinel-v2ray');
|
|
820
|
+
mkdirSync(tmpDir, { recursive: true, mode: 0o700 });
|
|
821
|
+
const cfgPath = path.join(tmpDir, 'config.json');
|
|
822
|
+
|
|
823
|
+
let workingOutbound = null;
|
|
824
|
+
for (const ob of config.outbounds) {
|
|
825
|
+
if (state.v2rayProc) {
|
|
826
|
+
state.v2rayProc.kill();
|
|
827
|
+
state.v2rayProc = null;
|
|
828
|
+
await sleep(2000);
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
const attempt = {
|
|
832
|
+
...config,
|
|
833
|
+
outbounds: [ob],
|
|
834
|
+
routing: {
|
|
835
|
+
domainStrategy: 'IPIfNonMatch',
|
|
836
|
+
rules: [
|
|
837
|
+
{ inboundTag: ['api'], outboundTag: 'api', type: 'field' },
|
|
838
|
+
{ inboundTag: ['proxy'], outboundTag: ob.tag, type: 'field' },
|
|
839
|
+
],
|
|
840
|
+
},
|
|
841
|
+
};
|
|
842
|
+
|
|
843
|
+
writeFileSync(cfgPath, JSON.stringify(attempt, null, 2), { mode: 0o600 });
|
|
844
|
+
const proc = spawn(v2rayExePath, ['run', '-config', cfgPath], { stdio: 'pipe' });
|
|
845
|
+
// Filter V2Ray stderr noise (fast reconnect path)
|
|
846
|
+
if (proc.stderr) {
|
|
847
|
+
proc.stderr.on('data', (chunk) => {
|
|
848
|
+
const lines = chunk.toString().split('\n');
|
|
849
|
+
for (const line of lines) {
|
|
850
|
+
const trimmed = line.trim();
|
|
851
|
+
if (!trimmed || trimmed.includes('insufficient header')) continue;
|
|
852
|
+
logFn?.(`[v2ray stderr] ${trimmed}`);
|
|
853
|
+
}
|
|
854
|
+
});
|
|
855
|
+
}
|
|
856
|
+
setTimeout(() => { try { unlinkSync(cfgPath); } catch {} }, 2000);
|
|
857
|
+
|
|
858
|
+
const ready = await waitForPort(socksPort, DEFAULT_TIMEOUTS.v2rayReady);
|
|
859
|
+
if (!ready || proc.exitCode !== null) {
|
|
860
|
+
proc.kill();
|
|
861
|
+
continue;
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
// Test SOCKS5 connectivity
|
|
865
|
+
let connected = false;
|
|
866
|
+
try {
|
|
867
|
+
const { SocksProxyAgent } = await import('socks-proxy-agent');
|
|
868
|
+
const auth = config._socksAuth;
|
|
869
|
+
const proxyUrl = (auth?.user && auth?.pass)
|
|
870
|
+
? `socks5://${auth.user}:${auth.pass}@127.0.0.1:${socksPort}`
|
|
871
|
+
: `socks5://127.0.0.1:${socksPort}`;
|
|
872
|
+
const agent = new SocksProxyAgent(proxyUrl);
|
|
873
|
+
try {
|
|
874
|
+
await axios.get('https://www.google.com', { httpAgent: agent, httpsAgent: agent, timeout: 10000, maxRedirects: 2, validateStatus: () => true });
|
|
875
|
+
connected = true;
|
|
876
|
+
} catch {} finally { agent.destroy(); }
|
|
877
|
+
} catch {}
|
|
878
|
+
|
|
879
|
+
if (connected) {
|
|
880
|
+
workingOutbound = ob;
|
|
881
|
+
state.v2rayProc = proc;
|
|
882
|
+
break;
|
|
883
|
+
}
|
|
884
|
+
proc.kill();
|
|
885
|
+
}
|
|
886
|
+
|
|
887
|
+
if (!workingOutbound) {
|
|
888
|
+
clearCredentials(opts.nodeAddress);
|
|
889
|
+
return null;
|
|
890
|
+
}
|
|
891
|
+
|
|
892
|
+
if (systemProxy && socksPort) {
|
|
893
|
+
setSystemProxy(socksPort, state);
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
progress(onProgress, logFn, 'verify', 'V2Ray reconnected from cached credentials!');
|
|
897
|
+
const sessionIdStr = saved.sessionId;
|
|
898
|
+
saveState({ sessionId: sessionIdStr, serviceType: 'v2ray', v2rayPid: state.v2rayProc?.pid, socksPort, systemProxySet: state.systemProxy, nodeAddress: opts.nodeAddress });
|
|
899
|
+
state.connection = { sessionId: sessionIdStr, serviceType: 'v2ray', nodeAddress: opts.nodeAddress, socksPort, connectedAt: Date.now() };
|
|
900
|
+
events.emit('connected', { sessionId: BigInt(sessionIdStr), serviceType: 'v2ray', nodeAddress: opts.nodeAddress, cached: true });
|
|
901
|
+
return {
|
|
902
|
+
sessionId: sessionIdStr,
|
|
903
|
+
serviceType: 'v2ray',
|
|
904
|
+
nodeAddress: opts.nodeAddress,
|
|
905
|
+
socksPort,
|
|
906
|
+
outbound: workingOutbound.tag,
|
|
907
|
+
cached: true,
|
|
908
|
+
cleanup: async () => {
|
|
909
|
+
if (state.v2rayProc) { state.v2rayProc.kill(); state.v2rayProc = null; await sleep(500); }
|
|
910
|
+
if (state.systemProxy) clearSystemProxy(state);
|
|
911
|
+
// End session on chain (fire-and-forget)
|
|
912
|
+
if (sessionIdStr && state._mnemonic) {
|
|
913
|
+
_endSessionOnChain(sessionIdStr, state._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
914
|
+
}
|
|
915
|
+
state.connection = null;
|
|
916
|
+
state._mnemonic = null;
|
|
917
|
+
clearState();
|
|
918
|
+
},
|
|
919
|
+
};
|
|
920
|
+
}
|
|
921
|
+
} catch (err) {
|
|
922
|
+
// Fast reconnect failed — clear stale credentials, fall back to normal flow
|
|
923
|
+
progress(onProgress, logFn, 'cache', `Fast reconnect failed (${err.message}) — falling back to normal flow`);
|
|
924
|
+
clearCredentials(opts.nodeAddress);
|
|
925
|
+
return null;
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
return null;
|
|
929
|
+
}
|
|
930
|
+
|
|
931
|
+
// ─── Direct Connection (Pay per GB) ─────────────────────────────────────────
|
|
932
|
+
|
|
933
|
+
/**
|
|
934
|
+
* Connect to a node by paying directly per GB.
|
|
935
|
+
*
|
|
936
|
+
* Flow: check existing session → pay for new session → handshake → tunnel
|
|
937
|
+
*
|
|
938
|
+
* @param {object} opts
|
|
939
|
+
* @param {string} opts.mnemonic - BIP39 mnemonic
|
|
940
|
+
* @param {string} opts.nodeAddress - sentnode1... address
|
|
941
|
+
* @param {string} opts.rpcUrl - Chain RPC (default: https://rpc.sentinel.co:443)
|
|
942
|
+
* @param {string} opts.lcdUrl - Chain LCD (default: https://lcd.sentinel.co)
|
|
943
|
+
* @param {number} opts.gigabytes - Bandwidth to purchase (default: 1)
|
|
944
|
+
* @param {boolean} opts.preferHourly - Prefer hourly sessions when cheaper than per-GB (default: false).
|
|
945
|
+
* When true, checks if the node offers hourly_prices with udvpn denom and uses { hours: 1 } if cheaper.
|
|
946
|
+
* @param {string} opts.v2rayExePath - Path to v2ray.exe (auto-detected if missing)
|
|
947
|
+
* @param {boolean} opts.fullTunnel - WireGuard: route ALL traffic through VPN (default: true). Set false for split tunnel.
|
|
948
|
+
* Set to true for production VPN apps that need full IP masking.
|
|
949
|
+
* @param {string[]} opts.splitIPs - WireGuard split tunnel IPs. Overrides fullTunnel.
|
|
950
|
+
* Pass specific IPs to route only those through VPN. Ignored if fullTunnel is true.
|
|
951
|
+
* @param {boolean} opts.systemProxy - V2Ray: auto-set Windows system SOCKS proxy (default: false).
|
|
952
|
+
* Set to true for production VPN apps. Caution: if V2Ray crashes, system proxy points to dead port.
|
|
953
|
+
* @param {boolean} opts.killSwitch - Enable kill switch — blocks all traffic if tunnel drops (default: false). Windows only.
|
|
954
|
+
* @param {boolean} opts.forceNewSession - Always pay for a new session, skip findExistingSession (default: false).
|
|
955
|
+
* Use when multiple apps share one wallet to avoid "already exists" errors from stale sessions.
|
|
956
|
+
* @param {function} opts.onProgress - Optional callback: (step, detail) => void
|
|
957
|
+
* Steps: 'wallet' | 'node-check' | 'validate' | 'session' | 'handshake' | 'tunnel' | 'verify' | 'proxy'
|
|
958
|
+
* @param {function} opts.log - Optional log function (default: console.log). All SDK output goes through this.
|
|
959
|
+
* Pass a custom function to route logs to your app's logging system.
|
|
960
|
+
* @returns {{ sessionId, serviceType, socksPort?, cleanup() }}
|
|
961
|
+
*/
|
|
962
|
+
export async function connectDirect(opts) {
|
|
963
|
+
warnIfNoCleanup('connectDirect');
|
|
964
|
+
// ── Input validation (fail fast before any network/chain calls) ──
|
|
965
|
+
validateConnectOpts(opts, 'connectDirect');
|
|
966
|
+
if (opts.gigabytes != null) {
|
|
967
|
+
const g = Number(opts.gigabytes);
|
|
968
|
+
if (!Number.isInteger(g) || g < 1 || g > 100) throw new ValidationError(ErrorCodes.INVALID_GIGABYTES, 'gigabytes must be a positive integer (1-100)', { value: opts.gigabytes });
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
// ── Connection mutex (prevent concurrent connects) ──
|
|
972
|
+
const ownsLock = !opts._skipLock && !_connectLock;
|
|
973
|
+
if (!opts._skipLock && _connectLock) throw new SentinelError(ErrorCodes.ALREADY_CONNECTED, 'Connection already in progress');
|
|
974
|
+
if (ownsLock) _connectLock = true;
|
|
975
|
+
try {
|
|
976
|
+
|
|
977
|
+
const gigabytes = opts.gigabytes || 1;
|
|
978
|
+
const forceNewSession = !!opts.forceNewSession;
|
|
979
|
+
|
|
980
|
+
// ── Fast Reconnect: check for saved credentials ──
|
|
981
|
+
if (!forceNewSession) {
|
|
982
|
+
// Set mnemonic on state BEFORE fast reconnect — needed for _endSessionOnChain() on disconnect
|
|
983
|
+
(opts._state || _defaultState)._mnemonic = opts.mnemonic;
|
|
984
|
+
const fast = await tryFastReconnect(opts, opts._state || _defaultState);
|
|
985
|
+
if (fast) {
|
|
986
|
+
_circuitBreaker.delete(opts.nodeAddress);
|
|
987
|
+
return fast;
|
|
988
|
+
}
|
|
989
|
+
}
|
|
990
|
+
|
|
991
|
+
// Payment strategy for direct pay-per-GB
|
|
992
|
+
async function directPayment(ctx) {
|
|
993
|
+
const { client, account, nodeInfo, lcd, logFn, onProgress, signal } = ctx;
|
|
994
|
+
|
|
995
|
+
// Check for existing session (avoid double-pay) — skip if forceNewSession
|
|
996
|
+
let sessionId = null;
|
|
997
|
+
if (!forceNewSession) {
|
|
998
|
+
progress(onProgress, logFn, 'session', 'Checking for existing session...');
|
|
999
|
+
checkAborted(signal);
|
|
1000
|
+
sessionId = await findExistingSession(lcd, account.address, opts.nodeAddress);
|
|
1001
|
+
if (sessionId && isSessionPoisoned(String(sessionId))) {
|
|
1002
|
+
progress(onProgress, logFn, 'session', `Session ${sessionId} previously failed — skipping`);
|
|
1003
|
+
sessionId = null;
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
|
|
1007
|
+
if (sessionId) {
|
|
1008
|
+
progress(onProgress, logFn, 'session', `Reusing existing session: ${sessionId}`);
|
|
1009
|
+
return { sessionId: BigInt(sessionId) };
|
|
1010
|
+
}
|
|
1011
|
+
|
|
1012
|
+
// Pay for new session — choose hourly vs per-GB pricing
|
|
1013
|
+
const udvpnPrice = nodeInfo.gigabyte_prices.find(p => p.denom === 'udvpn');
|
|
1014
|
+
if (!udvpnPrice) throw new NodeError(ErrorCodes.NODE_NO_UDVPN, 'Node does not accept udvpn', { nodeAddress: opts.nodeAddress });
|
|
1015
|
+
|
|
1016
|
+
// Determine pricing model: explicit hours > preferHourly > default GB
|
|
1017
|
+
// preferHourly = use hourly if node offers it. No cross-unit price comparison
|
|
1018
|
+
// (GB price vs hour price are different units — comparing them is meaningless).
|
|
1019
|
+
const hourlyPrice = (nodeInfo.hourly_prices || []).find(p => p.denom === 'udvpn');
|
|
1020
|
+
const explicitHours = opts.hours > 0 ? opts.hours : 0;
|
|
1021
|
+
const useHourly = explicitHours > 0 || (opts.preferHourly && !!hourlyPrice);
|
|
1022
|
+
|
|
1023
|
+
if (useHourly && !hourlyPrice) {
|
|
1024
|
+
throw new NodeError(ErrorCodes.NODE_OFFLINE, `Node ${opts.nodeAddress} has no hourly pricing — cannot use hours-based session. Use gigabytes instead.`);
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
const sessionGigabytes = useHourly ? 0 : gigabytes;
|
|
1028
|
+
const sessionHours = useHourly ? (explicitHours || 1) : 0;
|
|
1029
|
+
const sessionMaxPrice = useHourly ? hourlyPrice : udvpnPrice;
|
|
1030
|
+
|
|
1031
|
+
const msg = {
|
|
1032
|
+
typeUrl: MSG_TYPES.START_SESSION,
|
|
1033
|
+
value: {
|
|
1034
|
+
from: account.address,
|
|
1035
|
+
node_address: opts.nodeAddress,
|
|
1036
|
+
gigabytes: sessionGigabytes,
|
|
1037
|
+
hours: sessionHours,
|
|
1038
|
+
max_price: { denom: 'udvpn', base_value: sessionMaxPrice.base_value, quote_value: sessionMaxPrice.quote_value },
|
|
1039
|
+
},
|
|
1040
|
+
};
|
|
1041
|
+
|
|
1042
|
+
checkAborted(signal);
|
|
1043
|
+
const pricingMode = useHourly ? 'hourly' : 'per-GB';
|
|
1044
|
+
progress(onProgress, logFn, 'session', `Broadcasting session TX (${pricingMode})...`);
|
|
1045
|
+
const result = await broadcastWithInactiveRetry(client, account.address, [msg], logFn, onProgress);
|
|
1046
|
+
const extractedId = extractId(result, /session/i, ['session_id', 'id']);
|
|
1047
|
+
if (!extractedId) throw new ChainError(ErrorCodes.SESSION_EXTRACT_FAILED, 'Failed to extract session ID from TX result — check TX events', { txHash: result.transactionHash });
|
|
1048
|
+
sessionId = BigInt(extractedId);
|
|
1049
|
+
progress(onProgress, logFn, 'session', `Session created: ${sessionId} (${pricingMode}, tx: ${result.transactionHash})`);
|
|
1050
|
+
return { sessionId };
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
// Retry strategy: if handshake fails with "already exists", pay for fresh session
|
|
1054
|
+
async function retryPayment(ctx, _hsErr) {
|
|
1055
|
+
const { client, account, nodeInfo, logFn, onProgress, signal } = ctx;
|
|
1056
|
+
const udvpnPrice = nodeInfo.gigabyte_prices.find(p => p.denom === 'udvpn');
|
|
1057
|
+
if (!udvpnPrice) throw new NodeError(ErrorCodes.NODE_NO_UDVPN, 'Node does not accept udvpn', { nodeAddress: opts.nodeAddress });
|
|
1058
|
+
|
|
1059
|
+
// Retry uses same hourly logic as directPayment
|
|
1060
|
+
const hourlyPrice = (nodeInfo.hourly_prices || []).find(p => p.denom === 'udvpn');
|
|
1061
|
+
const explicitHours = opts.hours > 0 ? opts.hours : 0;
|
|
1062
|
+
const useHourly = explicitHours > 0 || (opts.preferHourly && !!hourlyPrice);
|
|
1063
|
+
|
|
1064
|
+
const retryGigabytes = useHourly ? 0 : gigabytes;
|
|
1065
|
+
const retryHours = useHourly ? (explicitHours || 1) : 0;
|
|
1066
|
+
const retryMaxPrice = useHourly ? hourlyPrice : udvpnPrice;
|
|
1067
|
+
|
|
1068
|
+
const msg = {
|
|
1069
|
+
typeUrl: MSG_TYPES.START_SESSION,
|
|
1070
|
+
value: {
|
|
1071
|
+
from: account.address,
|
|
1072
|
+
node_address: opts.nodeAddress,
|
|
1073
|
+
gigabytes: retryGigabytes,
|
|
1074
|
+
hours: retryHours,
|
|
1075
|
+
max_price: { denom: 'udvpn', base_value: retryMaxPrice.base_value, quote_value: retryMaxPrice.quote_value },
|
|
1076
|
+
},
|
|
1077
|
+
};
|
|
1078
|
+
checkAborted(signal);
|
|
1079
|
+
const result = await broadcastWithInactiveRetry(client, account.address, [msg], logFn, onProgress);
|
|
1080
|
+
const retryExtracted = extractId(result, /session/i, ['session_id', 'id']);
|
|
1081
|
+
if (!retryExtracted) throw new ChainError(ErrorCodes.SESSION_EXTRACT_FAILED, 'Failed to extract session ID from retry TX result — check TX events', { txHash: result.transactionHash });
|
|
1082
|
+
const sessionId = BigInt(retryExtracted);
|
|
1083
|
+
progress(onProgress, logFn, 'session', `Fresh session: ${sessionId} (tx: ${result.transactionHash})`);
|
|
1084
|
+
return { sessionId };
|
|
1085
|
+
}
|
|
1086
|
+
|
|
1087
|
+
const result = await connectInternal(opts, directPayment, retryPayment, opts._state || _defaultState);
|
|
1088
|
+
// Record success — clear circuit breaker for this node
|
|
1089
|
+
_circuitBreaker.delete(opts.nodeAddress);
|
|
1090
|
+
return result;
|
|
1091
|
+
|
|
1092
|
+
} finally { if (ownsLock) _connectLock = false; }
|
|
1093
|
+
}
|
|
1094
|
+
|
|
1095
|
+
/**
|
|
1096
|
+
* Connect with auto-fallback: on failure, try next best node automatically.
|
|
1097
|
+
* Uses queryOnlineNodes to find candidates, then tries up to `maxAttempts` nodes.
|
|
1098
|
+
*
|
|
1099
|
+
* v25: Supports filtering by countries, maxPriceDvpn, minScore, excludeCountries.
|
|
1100
|
+
*
|
|
1101
|
+
* @param {object} opts - Same as connectDirect, plus:
|
|
1102
|
+
* @param {number} opts.maxAttempts - Max nodes to try (default: 3)
|
|
1103
|
+
* @param {string} opts.serviceType - Filter nodes by type: 'wireguard' | 'v2ray' (optional)
|
|
1104
|
+
* @param {string[]} opts.countries - Only try nodes in these countries (optional)
|
|
1105
|
+
* @param {string[]} opts.excludeCountries - Skip nodes in these countries (optional)
|
|
1106
|
+
* @param {number} opts.maxPriceDvpn - Max price in P2P per GB (optional)
|
|
1107
|
+
* @param {number} opts.minScore - Minimum quality score (optional)
|
|
1108
|
+
* @param {{ threshold?: number, ttlMs?: number }} opts.circuitBreaker - Per-call circuit breaker config (optional)
|
|
1109
|
+
* @returns {{ sessionId, serviceType, socksPort?, cleanup(), nodeAddress }}
|
|
1110
|
+
*/
|
|
1111
|
+
export async function connectAuto(opts) {
|
|
1112
|
+
warnIfNoCleanup('connectAuto');
|
|
1113
|
+
if (!opts || typeof opts !== 'object') throw new ValidationError(ErrorCodes.INVALID_OPTIONS, 'connectAuto() requires an options object');
|
|
1114
|
+
if (typeof opts.mnemonic !== 'string' || opts.mnemonic.trim().split(/\s+/).length < 12) {
|
|
1115
|
+
throw new ValidationError(ErrorCodes.INVALID_MNEMONIC, 'mnemonic must be a 12+ word BIP39 string');
|
|
1116
|
+
}
|
|
1117
|
+
if (opts.maxAttempts != null && (!Number.isInteger(opts.maxAttempts) || opts.maxAttempts < 1)) {
|
|
1118
|
+
throw new ValidationError(ErrorCodes.INVALID_OPTIONS, 'maxAttempts must be a positive integer');
|
|
1119
|
+
}
|
|
1120
|
+
|
|
1121
|
+
// ── Connection mutex (prevent concurrent connects) ──
|
|
1122
|
+
if (_connectLock) throw new SentinelError(ErrorCodes.ALREADY_CONNECTED, 'Connection already in progress');
|
|
1123
|
+
_connectLock = true;
|
|
1124
|
+
_abortConnect = false; // v30: reset abort flag at start of new connection attempt
|
|
1125
|
+
try {
|
|
1126
|
+
|
|
1127
|
+
// v25: per-call circuit breaker config
|
|
1128
|
+
if (opts.circuitBreaker) configureCircuitBreaker(opts.circuitBreaker);
|
|
1129
|
+
|
|
1130
|
+
const maxAttempts = opts.maxAttempts || 3;
|
|
1131
|
+
const logFn = opts.log || console.log;
|
|
1132
|
+
const errors = [];
|
|
1133
|
+
|
|
1134
|
+
// If nodeAddress specified, try it first (skip circuit breaker check for explicit choice)
|
|
1135
|
+
if (opts.nodeAddress) {
|
|
1136
|
+
// v30: Check abort flag before each attempt
|
|
1137
|
+
if (_abortConnect) {
|
|
1138
|
+
_abortConnect = false;
|
|
1139
|
+
throw new SentinelError(ErrorCodes.ABORTED, 'Connection was cancelled by disconnect');
|
|
1140
|
+
}
|
|
1141
|
+
try {
|
|
1142
|
+
return await connectDirect({ ...opts, _skipLock: true });
|
|
1143
|
+
} catch (err) {
|
|
1144
|
+
recordNodeFailure(opts.nodeAddress);
|
|
1145
|
+
errors.push({ address: opts.nodeAddress, error: err.message });
|
|
1146
|
+
logFn(`[connectAuto] ${opts.nodeAddress} failed: ${err.message} — trying fallback nodes...`);
|
|
1147
|
+
}
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
// Find online nodes, excluding circuit-broken ones
|
|
1151
|
+
logFn('[connectAuto] Scanning for online nodes...');
|
|
1152
|
+
const nodes = await queryOnlineNodes({
|
|
1153
|
+
serviceType: opts.serviceType,
|
|
1154
|
+
maxNodes: maxAttempts * 3,
|
|
1155
|
+
onNodeProbed: opts.onNodeProbed,
|
|
1156
|
+
});
|
|
1157
|
+
|
|
1158
|
+
// v30: Check abort after slow queryOnlineNodes call
|
|
1159
|
+
if (_abortConnect) {
|
|
1160
|
+
_abortConnect = false;
|
|
1161
|
+
throw new SentinelError(ErrorCodes.ABORTED, 'Connection was cancelled by disconnect');
|
|
1162
|
+
}
|
|
1163
|
+
|
|
1164
|
+
// v25: Apply filters using filterNodes + custom exclusions
|
|
1165
|
+
let filtered = nodes.filter(n => n.address !== opts.nodeAddress && !isCircuitOpen(n.address));
|
|
1166
|
+
if (opts.countries || opts.maxPriceDvpn != null || opts.minScore != null) {
|
|
1167
|
+
filtered = filterNodes(filtered, {
|
|
1168
|
+
country: opts.countries?.[0], // filterNodes supports single country
|
|
1169
|
+
maxPriceDvpn: opts.maxPriceDvpn,
|
|
1170
|
+
minScore: opts.minScore,
|
|
1171
|
+
});
|
|
1172
|
+
// Multi-country support (filterNodes does single, we handle array)
|
|
1173
|
+
if (opts.countries && opts.countries.length > 1) {
|
|
1174
|
+
const lc = opts.countries.map(c => c.toLowerCase());
|
|
1175
|
+
filtered = filtered.filter(n => lc.some(c => (n.country || '').toLowerCase().includes(c)));
|
|
1176
|
+
}
|
|
1177
|
+
}
|
|
1178
|
+
if (opts.excludeCountries?.length) {
|
|
1179
|
+
const exc = opts.excludeCountries.map(c => c.toLowerCase());
|
|
1180
|
+
filtered = filtered.filter(n => !exc.some(c => (n.country || '').toLowerCase().includes(c)));
|
|
1181
|
+
}
|
|
1182
|
+
|
|
1183
|
+
// v25: Emit events for skipped nodes (clock drift, circuit breaker)
|
|
1184
|
+
const skipped = nodes.filter(n => !filtered.includes(n) && n.address !== opts.nodeAddress);
|
|
1185
|
+
for (const n of skipped) {
|
|
1186
|
+
if (isCircuitOpen(n.address)) {
|
|
1187
|
+
events.emit('progress', { event: 'node.skipped', reason: 'circuit_breaker', nodeAddress: n.address, ts: Date.now() });
|
|
1188
|
+
}
|
|
1189
|
+
if (n.clockDriftSec !== null && Math.abs(n.clockDriftSec) > 120 && n.serviceType === 'v2ray') {
|
|
1190
|
+
events.emit('progress', { event: 'node.skipped', reason: 'clock_drift', nodeAddress: n.address, driftSeconds: n.clockDriftSec, ts: Date.now() });
|
|
1191
|
+
}
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
// v28: nodePool — restrict to a specific set of node addresses
|
|
1195
|
+
if (opts.nodePool?.length) {
|
|
1196
|
+
const poolSet = new Set(opts.nodePool);
|
|
1197
|
+
filtered = filtered.filter(n => poolSet.has(n.address));
|
|
1198
|
+
}
|
|
1199
|
+
|
|
1200
|
+
const candidates = filtered;
|
|
1201
|
+
// Retry budget: limit total spend to maxSpend (default: 2x cheapest node price)
|
|
1202
|
+
const cheapestPrice = Math.min(...candidates.map(n => {
|
|
1203
|
+
const p = (n.gigabyte_prices || []).find(p => p.denom === 'udvpn');
|
|
1204
|
+
return p ? parseInt(p.quote_value || '0', 10) : Infinity;
|
|
1205
|
+
}).filter(p => p < Infinity));
|
|
1206
|
+
const maxSpend = opts.maxSpend || (cheapestPrice > 0 ? cheapestPrice * 2 + 1000000 : 100_000_000);
|
|
1207
|
+
let totalSpent = 0;
|
|
1208
|
+
|
|
1209
|
+
for (let i = 0; i < Math.min(candidates.length, maxAttempts); i++) {
|
|
1210
|
+
// Check abort flag before each retry — disconnect() sets this
|
|
1211
|
+
if (_abortConnect) {
|
|
1212
|
+
_abortConnect = false;
|
|
1213
|
+
throw new SentinelError(ErrorCodes.ABORTED, 'Connection was cancelled by disconnect');
|
|
1214
|
+
}
|
|
1215
|
+
// Check retry budget — stop if we've spent too much
|
|
1216
|
+
const nodePrice = (() => {
|
|
1217
|
+
const p = (candidates[i].gigabyte_prices || []).find(p => p.denom === 'udvpn');
|
|
1218
|
+
return p ? parseInt(p.quote_value || '0', 10) : 50_000_000;
|
|
1219
|
+
})();
|
|
1220
|
+
if (totalSpent > 0 && totalSpent + nodePrice > maxSpend) {
|
|
1221
|
+
logFn(`[connectAuto] Retry budget exhausted (spent ${(totalSpent / 1e6).toFixed(1)} P2P, next would cost ${(nodePrice / 1e6).toFixed(1)} P2P, max ${(maxSpend / 1e6).toFixed(1)} P2P). Stopping.`);
|
|
1222
|
+
break;
|
|
1223
|
+
}
|
|
1224
|
+
const node = candidates[i];
|
|
1225
|
+
logFn(`[connectAuto] Trying ${node.address} (${i + 1}/${Math.min(candidates.length, maxAttempts)})...`);
|
|
1226
|
+
try {
|
|
1227
|
+
// Pass _cachedWallet from first attempt to skip wallet+RPC re-creation on retries
|
|
1228
|
+
const retryOpts = { ...opts, nodeAddress: node.address, _skipLock: true };
|
|
1229
|
+
if (i > 0 && opts._cachedWallet) retryOpts._cachedWallet = opts._cachedWallet;
|
|
1230
|
+
return await connectDirect(retryOpts);
|
|
1231
|
+
} catch (err) {
|
|
1232
|
+
recordNodeFailure(node.address);
|
|
1233
|
+
// Track spend: if error is AFTER payment (tunnel failure), count the cost
|
|
1234
|
+
if (err.code !== 'INSUFFICIENT_BALANCE' && err.code !== 'NODE_OFFLINE' && err.code !== 'NODE_NOT_FOUND') {
|
|
1235
|
+
totalSpent += nodePrice;
|
|
1236
|
+
}
|
|
1237
|
+
errors.push({ address: node.address, error: err.message, spent: nodePrice });
|
|
1238
|
+
logFn(`[connectAuto] ${node.address} failed: ${err.message}`);
|
|
1239
|
+
}
|
|
1240
|
+
}
|
|
1241
|
+
|
|
1242
|
+
throw new SentinelError(ErrorCodes.ALL_NODES_FAILED,
|
|
1243
|
+
`All ${errors.length} nodes failed (spent ~${(totalSpent / 1e6).toFixed(1)} P2P)`,
|
|
1244
|
+
{ attempts: errors, totalSpent });
|
|
1245
|
+
|
|
1246
|
+
} finally { _connectLock = false; }
|
|
1247
|
+
}
|
|
1248
|
+
|
|
1249
|
+
// ─── Plan Connection (Subscribe to existing plan) ────────────────────────────
|
|
1250
|
+
|
|
1251
|
+
/**
|
|
1252
|
+
* Connect via a plan subscription.
|
|
1253
|
+
*
|
|
1254
|
+
* Flow: subscribe to plan → start session via subscription → handshake → tunnel
|
|
1255
|
+
*
|
|
1256
|
+
* @param {object} opts
|
|
1257
|
+
* @param {string} opts.mnemonic - BIP39 mnemonic
|
|
1258
|
+
* @param {number|string} opts.planId - Plan ID to subscribe to
|
|
1259
|
+
* @param {string} opts.nodeAddress - sentnode1... address (must be linked to plan)
|
|
1260
|
+
* @param {string} opts.rpcUrl - Chain RPC
|
|
1261
|
+
* @param {string} opts.lcdUrl - Chain LCD
|
|
1262
|
+
* @param {string} opts.v2rayExePath - Path to v2ray.exe (auto-detected if missing)
|
|
1263
|
+
* @param {boolean} opts.fullTunnel - WireGuard: route ALL traffic (default: true)
|
|
1264
|
+
* @param {string[]} opts.splitIPs - WireGuard split tunnel IPs (overrides fullTunnel)
|
|
1265
|
+
* @param {boolean} opts.systemProxy - V2Ray: auto-set Windows system proxy (default: false)
|
|
1266
|
+
* @param {boolean} opts.killSwitch - Enable kill switch — blocks all traffic if tunnel drops (default: false)
|
|
1267
|
+
* @param {function} opts.onProgress - Optional callback: (step, detail) => void
|
|
1268
|
+
* @param {function} opts.log - Optional log function (default: console.log)
|
|
1269
|
+
*/
|
|
1270
|
+
export async function connectViaPlan(opts) {
|
|
1271
|
+
warnIfNoCleanup('connectViaPlan');
|
|
1272
|
+
// ── Input validation ──
|
|
1273
|
+
validateConnectOpts(opts, 'connectViaPlan');
|
|
1274
|
+
if (opts.planId == null || opts.planId === '' || opts.planId === 0 || opts.planId === '0') {
|
|
1275
|
+
throw new ValidationError(ErrorCodes.INVALID_PLAN_ID, 'connectViaPlan requires opts.planId (number or string)', { value: opts.planId });
|
|
1276
|
+
}
|
|
1277
|
+
let planIdBigInt;
|
|
1278
|
+
try {
|
|
1279
|
+
planIdBigInt = BigInt(opts.planId);
|
|
1280
|
+
} catch {
|
|
1281
|
+
throw new ValidationError(ErrorCodes.INVALID_PLAN_ID, `Invalid planId: "${opts.planId}" — must be a numeric value`, { value: opts.planId });
|
|
1282
|
+
}
|
|
1283
|
+
|
|
1284
|
+
// ── Connection mutex (prevent concurrent connects) ──
|
|
1285
|
+
if (_connectLock) throw new SentinelError(ErrorCodes.ALREADY_CONNECTED, 'Connection already in progress');
|
|
1286
|
+
_connectLock = true;
|
|
1287
|
+
try {
|
|
1288
|
+
|
|
1289
|
+
// Payment strategy for plan subscription
|
|
1290
|
+
async function planPayment(ctx) {
|
|
1291
|
+
const { client, account, lcd: lcdUrl, logFn, onProgress, signal } = ctx;
|
|
1292
|
+
const msg = {
|
|
1293
|
+
typeUrl: MSG_TYPES.PLAN_START_SESSION,
|
|
1294
|
+
value: {
|
|
1295
|
+
from: account.address,
|
|
1296
|
+
id: planIdBigInt,
|
|
1297
|
+
denom: 'udvpn',
|
|
1298
|
+
renewalPricePolicy: 0,
|
|
1299
|
+
nodeAddress: opts.nodeAddress,
|
|
1300
|
+
},
|
|
1301
|
+
};
|
|
1302
|
+
|
|
1303
|
+
checkAborted(signal);
|
|
1304
|
+
|
|
1305
|
+
// Fee grant: the app passes the plan owner's address as feeGranter.
|
|
1306
|
+
// The plan operator is responsible for granting fee allowance to subscribers.
|
|
1307
|
+
// We just include it in the TX — if the grant exists on-chain, gas is free.
|
|
1308
|
+
// If it doesn't exist, the chain rejects and we fall back to user-paid gas.
|
|
1309
|
+
const feeGranter = opts.feeGranter || null;
|
|
1310
|
+
|
|
1311
|
+
progress(onProgress, logFn, 'session', `Subscribing to plan ${opts.planId} + starting session${feeGranter ? ' (fee granted)' : ''}...`);
|
|
1312
|
+
|
|
1313
|
+
let result;
|
|
1314
|
+
if (feeGranter) {
|
|
1315
|
+
try {
|
|
1316
|
+
result = await broadcastWithFeeGrant(client, account.address, [msg], feeGranter);
|
|
1317
|
+
} catch (feeErr) {
|
|
1318
|
+
// Fee grant TX failed (grant expired, revoked, or never existed) — fall back to user-paid
|
|
1319
|
+
progress(onProgress, logFn, 'session', 'Fee grant failed, paying gas from wallet...');
|
|
1320
|
+
result = await broadcastWithInactiveRetry(client, account.address, [msg], logFn, onProgress);
|
|
1321
|
+
}
|
|
1322
|
+
} else {
|
|
1323
|
+
result = await broadcastWithInactiveRetry(client, account.address, [msg], logFn, onProgress);
|
|
1324
|
+
}
|
|
1325
|
+
const planExtracted = extractId(result, /session/i, ['session_id', 'id']);
|
|
1326
|
+
if (!planExtracted) throw new ChainError(ErrorCodes.SESSION_EXTRACT_FAILED, 'Failed to extract session ID from plan TX result — check TX events', { txHash: result.transactionHash });
|
|
1327
|
+
const sessionId = BigInt(planExtracted);
|
|
1328
|
+
const subscriptionId = extractId(result, /subscription/i, ['subscription_id', 'id']);
|
|
1329
|
+
progress(onProgress, logFn, 'session', `Session: ${sessionId}${subscriptionId ? `, Subscription: ${subscriptionId}` : ''}`);
|
|
1330
|
+
return { sessionId, subscriptionId };
|
|
1331
|
+
}
|
|
1332
|
+
|
|
1333
|
+
// No retry for plan connections (plan payment is idempotent)
|
|
1334
|
+
const result = await connectInternal(opts, planPayment, null, opts._state || _defaultState);
|
|
1335
|
+
return result;
|
|
1336
|
+
|
|
1337
|
+
} finally { _connectLock = false; }
|
|
1338
|
+
}
|
|
1339
|
+
|
|
1340
|
+
// ─── Subscription Connection (Use existing subscription) ─────────────────
|
|
1341
|
+
|
|
1342
|
+
/**
|
|
1343
|
+
* Connect via an existing subscription.
|
|
1344
|
+
*
|
|
1345
|
+
* Flow: start session via subscription → handshake → tunnel
|
|
1346
|
+
* Unlike connectViaPlan, this reuses an existing subscription instead of creating a new one.
|
|
1347
|
+
*
|
|
1348
|
+
* @param {object} opts
|
|
1349
|
+
* @param {string} opts.mnemonic - BIP39 mnemonic
|
|
1350
|
+
* @param {number|string} opts.subscriptionId - Existing subscription ID
|
|
1351
|
+
* @param {string} opts.nodeAddress - sentnode1... address (must be linked to subscription's plan)
|
|
1352
|
+
* @param {string} opts.rpcUrl - Chain RPC
|
|
1353
|
+
* @param {string} opts.lcdUrl - Chain LCD
|
|
1354
|
+
* @param {string} opts.v2rayExePath - Path to v2ray.exe (auto-detected if missing)
|
|
1355
|
+
* @param {boolean} opts.fullTunnel - WireGuard: route ALL traffic (default: true)
|
|
1356
|
+
* @param {string[]} opts.splitIPs - WireGuard split tunnel IPs (overrides fullTunnel)
|
|
1357
|
+
* @param {boolean} opts.systemProxy - V2Ray: auto-set Windows system proxy (default: false)
|
|
1358
|
+
* @param {boolean} opts.killSwitch - Enable kill switch — blocks all traffic if tunnel drops (default: false)
|
|
1359
|
+
* @param {function} opts.onProgress - Optional callback: (step, detail) => void
|
|
1360
|
+
* @param {function} opts.log - Optional log function (default: console.log)
|
|
1361
|
+
*/
|
|
1362
|
+
export async function connectViaSubscription(opts) {
|
|
1363
|
+
warnIfNoCleanup('connectViaSubscription');
|
|
1364
|
+
validateConnectOpts(opts, 'connectViaSubscription');
|
|
1365
|
+
if (opts.subscriptionId == null || opts.subscriptionId === '') {
|
|
1366
|
+
throw new ValidationError(ErrorCodes.INVALID_OPTIONS, 'connectViaSubscription requires opts.subscriptionId (number or string)', { value: opts.subscriptionId });
|
|
1367
|
+
}
|
|
1368
|
+
let subIdBigInt;
|
|
1369
|
+
try {
|
|
1370
|
+
subIdBigInt = BigInt(opts.subscriptionId);
|
|
1371
|
+
} catch {
|
|
1372
|
+
throw new ValidationError(ErrorCodes.INVALID_OPTIONS, `Invalid subscriptionId: "${opts.subscriptionId}" — must be a numeric value`, { value: opts.subscriptionId });
|
|
1373
|
+
}
|
|
1374
|
+
|
|
1375
|
+
// ── Connection mutex (prevent concurrent connects) ──
|
|
1376
|
+
if (_connectLock) throw new SentinelError(ErrorCodes.ALREADY_CONNECTED, 'Connection already in progress');
|
|
1377
|
+
_connectLock = true;
|
|
1378
|
+
try {
|
|
1379
|
+
|
|
1380
|
+
async function subPayment(ctx) {
|
|
1381
|
+
const { client, account, logFn, onProgress, signal } = ctx;
|
|
1382
|
+
const msg = {
|
|
1383
|
+
typeUrl: MSG_TYPES.SUB_START_SESSION,
|
|
1384
|
+
value: {
|
|
1385
|
+
from: account.address,
|
|
1386
|
+
id: subIdBigInt,
|
|
1387
|
+
nodeAddress: opts.nodeAddress,
|
|
1388
|
+
},
|
|
1389
|
+
};
|
|
1390
|
+
|
|
1391
|
+
checkAborted(signal);
|
|
1392
|
+
progress(onProgress, logFn, 'session', `Starting session via subscription ${opts.subscriptionId}...`);
|
|
1393
|
+
const result = await broadcastWithInactiveRetry(client, account.address, [msg], logFn, onProgress);
|
|
1394
|
+
const extracted = extractId(result, /session/i, ['session_id', 'id']);
|
|
1395
|
+
if (!extracted) throw new ChainError(ErrorCodes.SESSION_EXTRACT_FAILED, 'Failed to extract session ID from subscription TX result', { txHash: result.transactionHash });
|
|
1396
|
+
const sessionId = BigInt(extracted);
|
|
1397
|
+
progress(onProgress, logFn, 'session', `Session: ${sessionId} (subscription ${opts.subscriptionId})`);
|
|
1398
|
+
return { sessionId, subscriptionId: opts.subscriptionId };
|
|
1399
|
+
}
|
|
1400
|
+
|
|
1401
|
+
const result = await connectInternal(opts, subPayment, null, opts._state || _defaultState);
|
|
1402
|
+
return result;
|
|
1403
|
+
|
|
1404
|
+
} finally { _connectLock = false; }
|
|
1405
|
+
}
|
|
1406
|
+
|
|
1407
|
+
// ─── Shared Validation ───────────────────────────────────────────────────────
|
|
1408
|
+
|
|
1409
|
+
function validateConnectOpts(opts, fnName) {
|
|
1410
|
+
if (!opts || typeof opts !== 'object') throw new ValidationError(ErrorCodes.INVALID_OPTIONS, `${fnName}() requires an options object`);
|
|
1411
|
+
if (typeof opts.mnemonic !== 'string' || opts.mnemonic.trim().split(/\s+/).length < 12) {
|
|
1412
|
+
throw new ValidationError(ErrorCodes.INVALID_MNEMONIC, 'mnemonic must be a 12+ word BIP39 string', { wordCount: typeof opts.mnemonic === 'string' ? opts.mnemonic.trim().split(/\s+/).length : 0 });
|
|
1413
|
+
}
|
|
1414
|
+
if (typeof opts.nodeAddress !== 'string' || !/^sentnode1[a-z0-9]{38}$/.test(opts.nodeAddress)) {
|
|
1415
|
+
throw new ValidationError(ErrorCodes.INVALID_NODE_ADDRESS, 'nodeAddress must be a valid sentnode1... bech32 address (47 characters)', { value: opts.nodeAddress });
|
|
1416
|
+
}
|
|
1417
|
+
if (opts.rpcUrl != null && typeof opts.rpcUrl !== 'string') throw new ValidationError(ErrorCodes.INVALID_URL, 'rpcUrl must be a string URL', { value: opts.rpcUrl });
|
|
1418
|
+
if (opts.lcdUrl != null && typeof opts.lcdUrl !== 'string') throw new ValidationError(ErrorCodes.INVALID_URL, 'lcdUrl must be a string URL', { value: opts.lcdUrl });
|
|
1419
|
+
}
|
|
1420
|
+
|
|
1421
|
+
// ─── Shared Connect Flow (eliminates connectDirect/connectViaPlan duplication) ─
|
|
1422
|
+
|
|
1423
|
+
async function connectInternal(opts, paymentStrategy, retryStrategy, state = _defaultState) {
|
|
1424
|
+
const signal = opts.signal; // AbortController support
|
|
1425
|
+
const _connectStart = Date.now(); // v25: metrics timing
|
|
1426
|
+
checkAborted(signal);
|
|
1427
|
+
|
|
1428
|
+
// Handle existing connection
|
|
1429
|
+
if (state.isConnected) {
|
|
1430
|
+
if (opts.allowReconnect === false) {
|
|
1431
|
+
throw new SentinelError(ErrorCodes.ALREADY_CONNECTED,
|
|
1432
|
+
'Already connected. Disconnect first or set allowReconnect: true.',
|
|
1433
|
+
{ nodeAddress: state.connection?.nodeAddress });
|
|
1434
|
+
}
|
|
1435
|
+
const prev = state.connection;
|
|
1436
|
+
await disconnectState(state);
|
|
1437
|
+
if (opts.log || defaultLog) (opts.log || defaultLog)(`[connect] Disconnected from ${prev?.nodeAddress || 'previous node'}`);
|
|
1438
|
+
}
|
|
1439
|
+
|
|
1440
|
+
const onProgress = opts.onProgress || null;
|
|
1441
|
+
const logFn = opts.log || defaultLog;
|
|
1442
|
+
const fullTunnel = opts.fullTunnel !== false; // v26c: default TRUE (was false — caused "IP didn't change" confusion)
|
|
1443
|
+
const systemProxy = opts.systemProxy === true;
|
|
1444
|
+
const killSwitch = opts.killSwitch === true;
|
|
1445
|
+
const timeouts = { ...DEFAULT_TIMEOUTS, ...opts.timeouts };
|
|
1446
|
+
const tlsTrust = opts.tlsTrust || 'tofu'; // 'tofu' (default) | 'none' (insecure)
|
|
1447
|
+
|
|
1448
|
+
events.emit('connecting', { nodeAddress: opts.nodeAddress });
|
|
1449
|
+
|
|
1450
|
+
// 1. Wallet + key derivation in parallel (both derive from same mnemonic, independent)
|
|
1451
|
+
// v21: parallelized — saves ~300ms (was sequential)
|
|
1452
|
+
progress(onProgress, logFn, 'wallet', 'Setting up wallet...');
|
|
1453
|
+
checkAborted(signal);
|
|
1454
|
+
const [{ wallet, account }, privKey] = await Promise.all([
|
|
1455
|
+
cachedCreateWallet(opts.mnemonic),
|
|
1456
|
+
privKeyFromMnemonic(opts.mnemonic),
|
|
1457
|
+
]);
|
|
1458
|
+
|
|
1459
|
+
// Store mnemonic on state for session-end TX on disconnect (fire-and-forget cleanup)
|
|
1460
|
+
state._mnemonic = opts.mnemonic;
|
|
1461
|
+
|
|
1462
|
+
// 2. RPC connect + LCD lookup in parallel (independent network calls)
|
|
1463
|
+
// v21: parallelized — saves 1-3s (was sequential)
|
|
1464
|
+
progress(onProgress, logFn, 'wallet', 'Connecting to chain endpoints...');
|
|
1465
|
+
checkAborted(signal);
|
|
1466
|
+
|
|
1467
|
+
const rpcPromise = opts.rpcUrl
|
|
1468
|
+
? createClient(opts.rpcUrl, wallet).then(client => ({ client, rpc: opts.rpcUrl, name: 'user-provided' }))
|
|
1469
|
+
: tryWithFallback(RPC_ENDPOINTS, async (url) => createClient(url, wallet), 'RPC connect')
|
|
1470
|
+
.then(({ result, endpoint, endpointName }) => ({ client: result, rpc: endpoint, name: endpointName }));
|
|
1471
|
+
|
|
1472
|
+
const lcdPromise = opts.lcdUrl
|
|
1473
|
+
? queryNode(opts.nodeAddress, { lcdUrl: opts.lcdUrl }).then(info => ({ nodeInfo: info, lcd: opts.lcdUrl }))
|
|
1474
|
+
: queryNode(opts.nodeAddress).then(info => ({ nodeInfo: info, lcd: DEFAULT_LCD }));
|
|
1475
|
+
|
|
1476
|
+
const [rpcResult, lcdResult] = await Promise.all([rpcPromise, lcdPromise]);
|
|
1477
|
+
const { client, rpc } = rpcResult;
|
|
1478
|
+
if (rpcResult.name !== 'user-provided') progress(onProgress, logFn, 'wallet', `RPC: ${rpcResult.name} (${rpc})`);
|
|
1479
|
+
let { nodeInfo, lcd } = lcdResult;
|
|
1480
|
+
|
|
1481
|
+
// Balance check — verify wallet has enough P2P before paying for session
|
|
1482
|
+
// Dry-run mode skips balance enforcement (wallet may be unfunded)
|
|
1483
|
+
checkAborted(signal);
|
|
1484
|
+
try {
|
|
1485
|
+
const bal = await getBalance(client, account.address);
|
|
1486
|
+
progress(onProgress, logFn, 'wallet', `${account.address} | ${bal.dvpn.toFixed(1)} P2P`);
|
|
1487
|
+
// Check balance against actual node price + gas (not just 0.1 P2P)
|
|
1488
|
+
const nodePriceUdvpn = (nodeInfo.gigabyte_prices || []).find(p => p.denom === 'udvpn');
|
|
1489
|
+
const minRequired = nodePriceUdvpn ? parseInt(nodePriceUdvpn.quote_value || '0', 10) + 500000 : 1000000; // price + 0.5 P2P gas
|
|
1490
|
+
if (!opts.dryRun && bal.udvpn < minRequired) {
|
|
1491
|
+
throw new ChainError(ErrorCodes.INSUFFICIENT_BALANCE,
|
|
1492
|
+
`Wallet has ${bal.dvpn.toFixed(2)} P2P — need at least ${(minRequired / 1e6).toFixed(2)} P2P (node price + gas) for a session. Fund address ${account.address} with P2P tokens.`,
|
|
1493
|
+
{ balance: bal, address: account.address, required: minRequired }
|
|
1494
|
+
);
|
|
1495
|
+
}
|
|
1496
|
+
} catch (balErr) {
|
|
1497
|
+
if (balErr.code === ErrorCodes.INSUFFICIENT_BALANCE) throw balErr;
|
|
1498
|
+
// Non-fatal: balance check failed (network issue) — continue and let chain reject if needed
|
|
1499
|
+
progress(onProgress, logFn, 'wallet', `${account.address} | balance check skipped (${balErr.message})`);
|
|
1500
|
+
}
|
|
1501
|
+
|
|
1502
|
+
// 3. Check node status
|
|
1503
|
+
progress(onProgress, logFn, 'node-check', `Checking node ${opts.nodeAddress}...`);
|
|
1504
|
+
const nodeAgent = createNodeHttpsAgent(opts.nodeAddress, tlsTrust);
|
|
1505
|
+
const status = await nodeStatusV3(nodeInfo.remote_url, nodeAgent);
|
|
1506
|
+
progress(onProgress, logFn, 'node-check', `${status.moniker} (${status.type}) - ${status.location.city}, ${status.location.country}`);
|
|
1507
|
+
|
|
1508
|
+
// Pre-verify: node's address must match what we're paying for.
|
|
1509
|
+
// Prevents wasting tokens when remote URL serves a different node.
|
|
1510
|
+
if (status.address && status.address !== opts.nodeAddress) {
|
|
1511
|
+
throw new NodeError(ErrorCodes.NODE_NOT_FOUND, `Node address mismatch: remote URL serves ${status.address}, not ${opts.nodeAddress}. Aborting before payment.`, { expected: opts.nodeAddress, actual: status.address });
|
|
1512
|
+
}
|
|
1513
|
+
|
|
1514
|
+
const extremeDrift = status.type === 'v2ray' && status.clockDriftSec !== null && Math.abs(status.clockDriftSec) > 120;
|
|
1515
|
+
if (extremeDrift) {
|
|
1516
|
+
logFn?.(`Warning: clock drift ${status.clockDriftSec}s — VMess will fail but VLess may work`);
|
|
1517
|
+
}
|
|
1518
|
+
|
|
1519
|
+
// 2b. PRE-VALIDATE tunnel requirements BEFORE paying
|
|
1520
|
+
progress(onProgress, logFn, 'validate', 'Checking tunnel requirements...');
|
|
1521
|
+
const resolvedV2rayPath = validateTunnelRequirements(status.type, opts.v2rayExePath);
|
|
1522
|
+
|
|
1523
|
+
// Note: node reachability is already proven by nodeStatusV3() above (line ~1502).
|
|
1524
|
+
// If the status probe succeeded, the node's HTTPS endpoint is live.
|
|
1525
|
+
// WG tunnel failures are transport-level (UDP), not reachability — TCP pre-checks can't predict them.
|
|
1526
|
+
|
|
1527
|
+
// ── DRY-RUN: return mock result without paying, handshaking, or tunneling ──
|
|
1528
|
+
if (opts.dryRun) {
|
|
1529
|
+
privKey.fill(0);
|
|
1530
|
+
progress(onProgress, logFn, 'dry-run', 'Dry-run complete — no TX broadcast, no tunnel created');
|
|
1531
|
+
events.emit('connected', { sessionId: BigInt(0), serviceType: status.type, nodeAddress: opts.nodeAddress, dryRun: true });
|
|
1532
|
+
return {
|
|
1533
|
+
dryRun: true,
|
|
1534
|
+
sessionId: BigInt(0),
|
|
1535
|
+
serviceType: status.type,
|
|
1536
|
+
nodeAddress: opts.nodeAddress,
|
|
1537
|
+
nodeMoniker: status.moniker,
|
|
1538
|
+
nodeLocation: status.location,
|
|
1539
|
+
walletAddress: account.address,
|
|
1540
|
+
rpcUsed: rpc,
|
|
1541
|
+
lcdUsed: lcd,
|
|
1542
|
+
cleanup: async () => {},
|
|
1543
|
+
};
|
|
1544
|
+
}
|
|
1545
|
+
|
|
1546
|
+
// 3. Payment (strategy-specific)
|
|
1547
|
+
checkAborted(signal);
|
|
1548
|
+
const payCtx = { client, account, nodeInfo, lcd, logFn, onProgress, signal, timeouts };
|
|
1549
|
+
const { sessionId: paidSessionId, subscriptionId } = await paymentStrategy(payCtx);
|
|
1550
|
+
let sessionId = paidSessionId;
|
|
1551
|
+
|
|
1552
|
+
// 4. Handshake & tunnel
|
|
1553
|
+
// Wait 5s after session TX for node to index the session on-chain.
|
|
1554
|
+
// Without this, the node may return 409 "already exists" because it's still
|
|
1555
|
+
// processing the previous block's state changes.
|
|
1556
|
+
progress(onProgress, logFn, 'handshake', 'Waiting for node to index session...');
|
|
1557
|
+
await sleep(5000);
|
|
1558
|
+
progress(onProgress, logFn, 'handshake', 'Starting handshake...');
|
|
1559
|
+
checkAborted(signal);
|
|
1560
|
+
const tunnelOpts = {
|
|
1561
|
+
serviceType: status.type,
|
|
1562
|
+
remoteUrl: nodeInfo.remote_url,
|
|
1563
|
+
serverHost: new URL(nodeInfo.remote_url).hostname,
|
|
1564
|
+
sessionId,
|
|
1565
|
+
privKey,
|
|
1566
|
+
v2rayExePath: resolvedV2rayPath,
|
|
1567
|
+
fullTunnel,
|
|
1568
|
+
splitIPs: opts.splitIPs,
|
|
1569
|
+
systemProxy,
|
|
1570
|
+
killSwitch,
|
|
1571
|
+
dns: opts.dns,
|
|
1572
|
+
onProgress,
|
|
1573
|
+
logFn,
|
|
1574
|
+
extremeDrift,
|
|
1575
|
+
clockDriftSec: status.clockDriftSec,
|
|
1576
|
+
nodeAddress: opts.nodeAddress,
|
|
1577
|
+
timeouts,
|
|
1578
|
+
signal,
|
|
1579
|
+
nodeAgent,
|
|
1580
|
+
state,
|
|
1581
|
+
};
|
|
1582
|
+
|
|
1583
|
+
// ─── Handshake with "already exists" (409) retry ───
|
|
1584
|
+
// After session TX confirms, the node may still be indexing. Handshake can
|
|
1585
|
+
// return 409 "already exists" if the node hasn't finished processing.
|
|
1586
|
+
// Retry schedule: wait 15s, then 20s. If still fails, fall back to
|
|
1587
|
+
// retryStrategy (pay for fresh session) or throw.
|
|
1588
|
+
const _isAlreadyExists = (err) => {
|
|
1589
|
+
const msg = String(err?.message || '');
|
|
1590
|
+
const status = err?.details?.status;
|
|
1591
|
+
return msg.includes('already exists') || status === 409;
|
|
1592
|
+
};
|
|
1593
|
+
|
|
1594
|
+
let handshakeResult = null;
|
|
1595
|
+
let handshakeErr = null;
|
|
1596
|
+
const alreadyExistsDelays = [15000, 20000]; // retry delays for 409 "already exists"
|
|
1597
|
+
let alreadyExistsAttempt = 0;
|
|
1598
|
+
|
|
1599
|
+
for (;;) {
|
|
1600
|
+
try {
|
|
1601
|
+
handshakeResult = await performHandshake(tunnelOpts);
|
|
1602
|
+
break; // success
|
|
1603
|
+
} catch (err) {
|
|
1604
|
+
if (_isAlreadyExists(err) && alreadyExistsAttempt < alreadyExistsDelays.length) {
|
|
1605
|
+
const delayMs = alreadyExistsDelays[alreadyExistsAttempt];
|
|
1606
|
+
progress(onProgress, logFn, 'handshake', `Session indexing race (409) — retrying in ${delayMs / 1000}s (attempt ${alreadyExistsAttempt + 1}/${alreadyExistsDelays.length})...`);
|
|
1607
|
+
await sleep(delayMs);
|
|
1608
|
+
checkAborted(signal);
|
|
1609
|
+
alreadyExistsAttempt++;
|
|
1610
|
+
continue;
|
|
1611
|
+
}
|
|
1612
|
+
handshakeErr = err;
|
|
1613
|
+
break;
|
|
1614
|
+
}
|
|
1615
|
+
}
|
|
1616
|
+
|
|
1617
|
+
try {
|
|
1618
|
+
if (handshakeResult) {
|
|
1619
|
+
markSessionActive(String(sessionId), opts.nodeAddress);
|
|
1620
|
+
if (subscriptionId) handshakeResult.subscriptionId = subscriptionId;
|
|
1621
|
+
_recordMetric(opts.nodeAddress, true, Date.now() - _connectStart); // v25: metrics
|
|
1622
|
+
events.emit('connected', { sessionId, serviceType: status.type, nodeAddress: opts.nodeAddress });
|
|
1623
|
+
return handshakeResult;
|
|
1624
|
+
}
|
|
1625
|
+
|
|
1626
|
+
// Handshake failed
|
|
1627
|
+
const hsErr = handshakeErr;
|
|
1628
|
+
_recordMetric(opts.nodeAddress, false, Date.now() - _connectStart); // v25: metrics
|
|
1629
|
+
markSessionPoisoned(String(sessionId), opts.nodeAddress, hsErr.message);
|
|
1630
|
+
|
|
1631
|
+
// v25: Attach partial connection state for recovery (#2)
|
|
1632
|
+
if (!hsErr.details) hsErr.details = {};
|
|
1633
|
+
hsErr.details.sessionId = String(sessionId);
|
|
1634
|
+
hsErr.details.nodeAddress = opts.nodeAddress;
|
|
1635
|
+
hsErr.details.failedAt = 'handshake';
|
|
1636
|
+
hsErr.details.serviceType = status.type;
|
|
1637
|
+
|
|
1638
|
+
// "already exists" final fallback: pay for fresh session and retry handshake
|
|
1639
|
+
if (retryStrategy && _isAlreadyExists(hsErr)) {
|
|
1640
|
+
progress(onProgress, logFn, 'session', `Session ${sessionId} stale on node — paying for fresh session...`);
|
|
1641
|
+
checkAborted(signal);
|
|
1642
|
+
const retry = await retryStrategy(payCtx, hsErr);
|
|
1643
|
+
sessionId = retry.sessionId;
|
|
1644
|
+
tunnelOpts.sessionId = sessionId;
|
|
1645
|
+
try {
|
|
1646
|
+
const retryResult = await performHandshake(tunnelOpts);
|
|
1647
|
+
markSessionActive(String(sessionId), opts.nodeAddress);
|
|
1648
|
+
events.emit('connected', { sessionId, serviceType: status.type, nodeAddress: opts.nodeAddress });
|
|
1649
|
+
return retryResult;
|
|
1650
|
+
} catch (retryErr) {
|
|
1651
|
+
// Clean up any partially-installed tunnel before re-throwing
|
|
1652
|
+
if (state.wgTunnel) {
|
|
1653
|
+
try { await disconnectWireGuard(); } catch {} // cleanup: best-effort
|
|
1654
|
+
state.wgTunnel = null;
|
|
1655
|
+
}
|
|
1656
|
+
if (state.v2rayProc) {
|
|
1657
|
+
try { killV2RayProc(state.v2rayProc); } catch {} // cleanup: best-effort
|
|
1658
|
+
state.v2rayProc = null;
|
|
1659
|
+
}
|
|
1660
|
+
markSessionPoisoned(String(sessionId), opts.nodeAddress, retryErr.message);
|
|
1661
|
+
if (!retryErr.details) retryErr.details = {};
|
|
1662
|
+
retryErr.details.sessionId = String(sessionId);
|
|
1663
|
+
retryErr.details.nodeAddress = opts.nodeAddress;
|
|
1664
|
+
retryErr.details.failedAt = 'handshake_retry';
|
|
1665
|
+
events.emit('error', retryErr);
|
|
1666
|
+
throw retryErr;
|
|
1667
|
+
}
|
|
1668
|
+
}
|
|
1669
|
+
events.emit('error', hsErr);
|
|
1670
|
+
throw hsErr;
|
|
1671
|
+
} finally {
|
|
1672
|
+
// Zero mnemonic-derived private key — guaranteed even if exceptions thrown
|
|
1673
|
+
privKey.fill(0);
|
|
1674
|
+
}
|
|
1675
|
+
}
|
|
1676
|
+
|
|
1677
|
+
// ─── Handshake & Tunnel Setup ────────────────────────────────────────────────
|
|
1678
|
+
|
|
1679
|
+
async function performHandshake({ serviceType, remoteUrl, serverHost, sessionId, privKey, v2rayExePath, fullTunnel, splitIPs, systemProxy, killSwitch, dns, onProgress, logFn, extremeDrift, clockDriftSec, nodeAddress, timeouts, signal, nodeAgent, state }) {
|
|
1680
|
+
if (serviceType === 'wireguard') {
|
|
1681
|
+
return await setupWireGuard({ remoteUrl, sessionId, privKey, fullTunnel, splitIPs, killSwitch, dns, onProgress, logFn, nodeAddress, timeouts, signal, nodeAgent, state });
|
|
1682
|
+
} else {
|
|
1683
|
+
return await setupV2Ray({ remoteUrl, serverHost, sessionId, privKey, v2rayExePath, systemProxy, dns, onProgress, logFn, extremeDrift, clockDriftSec, nodeAddress, timeouts, signal, nodeAgent, state });
|
|
1684
|
+
}
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
async function setupWireGuard({ remoteUrl, sessionId, privKey, fullTunnel, splitIPs, killSwitch, dns, onProgress, logFn, nodeAddress, timeouts, signal, nodeAgent, state }) {
|
|
1688
|
+
// Generate WireGuard keys
|
|
1689
|
+
const wgKeys = generateWgKeyPair();
|
|
1690
|
+
|
|
1691
|
+
// Handshake with node
|
|
1692
|
+
checkAborted(signal);
|
|
1693
|
+
progress(onProgress, logFn, 'handshake', 'WireGuard handshake...');
|
|
1694
|
+
const hs = await initHandshakeV3(remoteUrl, sessionId, privKey, wgKeys.publicKey, nodeAgent);
|
|
1695
|
+
|
|
1696
|
+
// NOTE: Credentials are saved AFTER verified connectivity (not here).
|
|
1697
|
+
// Saving before verification causes stale credentials to persist on retry
|
|
1698
|
+
// when the tunnel fails — the node doesn't route traffic with old UUID/keys.
|
|
1699
|
+
|
|
1700
|
+
// Resolve AllowedIPs based on fullTunnel flag:
|
|
1701
|
+
// - fullTunnel=true (default): 0.0.0.0/0 — routes ALL traffic, changes your IP
|
|
1702
|
+
// - fullTunnel=false: only speedtest IPs — safe for testing, IP unchanged
|
|
1703
|
+
// - splitIPs=[...]: explicit IPs override everything
|
|
1704
|
+
let resolvedSplitIPs = null;
|
|
1705
|
+
if (splitIPs && Array.isArray(splitIPs) && splitIPs.length > 0) {
|
|
1706
|
+
// Explicit split IPs provided — use them as-is
|
|
1707
|
+
resolvedSplitIPs = splitIPs;
|
|
1708
|
+
progress(onProgress, logFn, 'tunnel', `Split tunnel: routing ${resolvedSplitIPs.length} explicit IPs`);
|
|
1709
|
+
} else if (fullTunnel) {
|
|
1710
|
+
// Full tunnel: pass null to writeWgConfig → generates 0.0.0.0/0, ::/0
|
|
1711
|
+
resolvedSplitIPs = null;
|
|
1712
|
+
progress(onProgress, logFn, 'tunnel', 'Full tunnel mode (0.0.0.0/0) — all traffic through VPN');
|
|
1713
|
+
} else {
|
|
1714
|
+
// Safe split tunnel: only route speedtest IPs
|
|
1715
|
+
try {
|
|
1716
|
+
resolvedSplitIPs = await resolveSpeedtestIPs();
|
|
1717
|
+
progress(onProgress, logFn, 'tunnel', `Split tunnel: routing ${resolvedSplitIPs.length} speedtest IPs`);
|
|
1718
|
+
} catch {
|
|
1719
|
+
// Can't resolve speedtest IPs, fall back to full tunnel
|
|
1720
|
+
resolvedSplitIPs = null;
|
|
1721
|
+
progress(onProgress, logFn, 'tunnel', 'Warning: could not resolve speedtest IPs, falling back to full tunnel');
|
|
1722
|
+
}
|
|
1723
|
+
}
|
|
1724
|
+
|
|
1725
|
+
// v28: VERIFY-BEFORE-CAPTURE — install with safe split IPs first, verify tunnel works,
|
|
1726
|
+
// THEN switch to full tunnel (0.0.0.0/0). This prevents killing the user's internet
|
|
1727
|
+
// if the node is broken. Previously, fullTunnel captured ALL traffic before verification,
|
|
1728
|
+
// causing up to ~78s of dead internet on failure.
|
|
1729
|
+
const VERIFY_IPS = ['1.1.1.1/32', '1.0.0.1/32'];
|
|
1730
|
+
const VERIFY_TARGETS = ['https://1.1.1.1', 'https://1.0.0.1'];
|
|
1731
|
+
const needsFullTunnelSwitch = fullTunnel && (!resolvedSplitIPs || resolvedSplitIPs.length === 0);
|
|
1732
|
+
|
|
1733
|
+
const initialSplitIPs = needsFullTunnelSwitch ? VERIFY_IPS : resolvedSplitIPs;
|
|
1734
|
+
const confPath = writeWgConfig(
|
|
1735
|
+
wgKeys.privateKey,
|
|
1736
|
+
hs.assignedAddrs,
|
|
1737
|
+
hs.serverPubKey,
|
|
1738
|
+
hs.serverEndpoint,
|
|
1739
|
+
initialSplitIPs,
|
|
1740
|
+
{ dns: resolveDnsServers(dns) },
|
|
1741
|
+
);
|
|
1742
|
+
|
|
1743
|
+
// DON'T zero private key yet — may need to rewrite config for full tunnel switch
|
|
1744
|
+
// wgKeys.privateKey.fill(0); // deferred to after potential second config write
|
|
1745
|
+
|
|
1746
|
+
// Wait for node to register peer then install + verify tunnel.
|
|
1747
|
+
// v20: Fixed 5s sleep. v21: Exponential retry — try install at 1.5s, then 3s, 5s.
|
|
1748
|
+
// Most nodes register the peer within 1-2s. Saves ~3s on average.
|
|
1749
|
+
progress(onProgress, logFn, 'tunnel', 'Waiting for node to register peer...');
|
|
1750
|
+
const installDelays = [1500, 1500, 2000]; // total budget: 5s (same as before but tries earlier)
|
|
1751
|
+
let tunnelInstalled = false;
|
|
1752
|
+
for (let i = 0; i < installDelays.length; i++) {
|
|
1753
|
+
await sleep(installDelays[i]);
|
|
1754
|
+
checkAborted(signal);
|
|
1755
|
+
try {
|
|
1756
|
+
progress(onProgress, logFn, 'tunnel', `Installing WireGuard tunnel (attempt ${i + 1}/${installDelays.length})...`);
|
|
1757
|
+
await installWgTunnel(confPath);
|
|
1758
|
+
state.wgTunnel = 'wgsent0';
|
|
1759
|
+
tunnelInstalled = true;
|
|
1760
|
+
break;
|
|
1761
|
+
} catch (installErr) {
|
|
1762
|
+
if (i === installDelays.length - 1) {
|
|
1763
|
+
wgKeys.privateKey.fill(0);
|
|
1764
|
+
throw installErr; // last attempt — propagate
|
|
1765
|
+
}
|
|
1766
|
+
progress(onProgress, logFn, 'tunnel', `Tunnel install attempt ${i + 1} failed, retrying...`);
|
|
1767
|
+
}
|
|
1768
|
+
}
|
|
1769
|
+
|
|
1770
|
+
// Verify actual connectivity through the tunnel.
|
|
1771
|
+
// A RUNNING service doesn't guarantee packets flow — the peer might reject us,
|
|
1772
|
+
// the endpoint might be firewalled, or the handshake may have been for a stale session.
|
|
1773
|
+
// v28: When fullTunnel, we're still on safe split IPs — user's internet is unaffected.
|
|
1774
|
+
progress(onProgress, logFn, 'verify', 'Verifying tunnel connectivity...');
|
|
1775
|
+
// v29: 1 attempt x 2 targets x 5s = ~10s max exposure. Tear down immediately on failure.
|
|
1776
|
+
const verifyTargets = needsFullTunnelSwitch ? VERIFY_TARGETS : null;
|
|
1777
|
+
const tunnelWorks = await verifyWgConnectivity(1, verifyTargets);
|
|
1778
|
+
if (!tunnelWorks) {
|
|
1779
|
+
wgKeys.privateKey.fill(0);
|
|
1780
|
+
clearCredentials(nodeAddress); // Clear stale handshake credentials so retry gets fresh ones
|
|
1781
|
+
progress(onProgress, logFn, 'verify', 'WireGuard tunnel installed but no traffic flows. Tearing down immediately...');
|
|
1782
|
+
try { await disconnectWireGuard(); } catch (e) { logFn?.(`[cleanup] WG disconnect warning: ${e.message}`); }
|
|
1783
|
+
state.wgTunnel = null;
|
|
1784
|
+
throw new TunnelError(ErrorCodes.WG_NO_CONNECTIVITY, 'WireGuard tunnel installed (service RUNNING) but connectivity check failed — no traffic flows through the tunnel. The node may have rejected the peer or the session may be stale.', { nodeAddress, sessionId: String(sessionId) });
|
|
1785
|
+
}
|
|
1786
|
+
|
|
1787
|
+
// Capture private key base64 BEFORE zeroing — needed for credential save after verification.
|
|
1788
|
+
const wgPrivKeyB64 = wgKeys.privateKey.toString('base64');
|
|
1789
|
+
|
|
1790
|
+
// v28: Tunnel verified! If fullTunnel, switch from safe split IPs to 0.0.0.0/0
|
|
1791
|
+
// Don't manually disconnect — installWgTunnel() handles its own force-remove + 1s wait.
|
|
1792
|
+
// Double-uninstall races with Windows Service Manager and causes "failed to start" errors.
|
|
1793
|
+
if (needsFullTunnelSwitch) {
|
|
1794
|
+
progress(onProgress, logFn, 'tunnel', 'Verified! Switching to full tunnel (0.0.0.0/0)...');
|
|
1795
|
+
const fullConfPath = writeWgConfig(
|
|
1796
|
+
wgKeys.privateKey,
|
|
1797
|
+
hs.assignedAddrs,
|
|
1798
|
+
hs.serverPubKey,
|
|
1799
|
+
hs.serverEndpoint,
|
|
1800
|
+
null, // null = 0.0.0.0/0, ::/0
|
|
1801
|
+
{ dns: resolveDnsServers(dns) },
|
|
1802
|
+
);
|
|
1803
|
+
wgKeys.privateKey.fill(0); // Zero AFTER final config write
|
|
1804
|
+
state.wgTunnel = null;
|
|
1805
|
+
await installWgTunnel(fullConfPath);
|
|
1806
|
+
state.wgTunnel = 'wgsent0';
|
|
1807
|
+
} else {
|
|
1808
|
+
wgKeys.privateKey.fill(0); // Zero for non-fullTunnel path
|
|
1809
|
+
}
|
|
1810
|
+
|
|
1811
|
+
progress(onProgress, logFn, 'verify', 'WireGuard connected and verified!');
|
|
1812
|
+
|
|
1813
|
+
// Save credentials AFTER verified connectivity — prevents stale credentials
|
|
1814
|
+
// from persisting when handshake succeeds but tunnel fails to route traffic.
|
|
1815
|
+
saveCredentials(nodeAddress, String(sessionId), {
|
|
1816
|
+
serviceType: 'wireguard',
|
|
1817
|
+
wgPrivateKey: wgPrivKeyB64,
|
|
1818
|
+
wgServerPubKey: hs.serverPubKey,
|
|
1819
|
+
wgAssignedAddrs: hs.assignedAddrs,
|
|
1820
|
+
wgServerEndpoint: hs.serverEndpoint,
|
|
1821
|
+
});
|
|
1822
|
+
|
|
1823
|
+
// Enable kill switch if opts.killSwitch is true
|
|
1824
|
+
if (killSwitch) {
|
|
1825
|
+
try {
|
|
1826
|
+
enableKillSwitch(hs.serverEndpoint);
|
|
1827
|
+
logFn?.('[kill-switch] Enabled — all non-tunnel traffic blocked');
|
|
1828
|
+
} catch (e) { logFn?.(`[kill-switch] Warning: ${e.message}`); }
|
|
1829
|
+
}
|
|
1830
|
+
|
|
1831
|
+
saveState({ sessionId: String(sessionId), serviceType: 'wireguard', wgTunnelName: 'wgsent0', confPath, systemProxySet: false });
|
|
1832
|
+
const sessionIdStr = String(sessionId); // String, not BigInt — safe for JSON.stringify
|
|
1833
|
+
state.connection = { sessionId: sessionIdStr, serviceType: 'wireguard', nodeAddress, connectedAt: Date.now() };
|
|
1834
|
+
return {
|
|
1835
|
+
sessionId: sessionIdStr,
|
|
1836
|
+
serviceType: 'wireguard',
|
|
1837
|
+
nodeAddress,
|
|
1838
|
+
confPath,
|
|
1839
|
+
cleanup: async () => {
|
|
1840
|
+
if (_killSwitchEnabled) disableKillSwitch();
|
|
1841
|
+
try { await disconnectWireGuard(); } catch {} // tunnel may already be down
|
|
1842
|
+
// End session on chain (fire-and-forget)
|
|
1843
|
+
if (sessionIdStr && state._mnemonic) {
|
|
1844
|
+
_endSessionOnChain(sessionIdStr, state._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
1845
|
+
}
|
|
1846
|
+
state.wgTunnel = null;
|
|
1847
|
+
state.connection = null;
|
|
1848
|
+
state._mnemonic = null;
|
|
1849
|
+
clearState();
|
|
1850
|
+
},
|
|
1851
|
+
};
|
|
1852
|
+
}
|
|
1853
|
+
|
|
1854
|
+
/**
|
|
1855
|
+
* Verify that traffic actually flows through the WireGuard tunnel.
|
|
1856
|
+
* Tries HEAD requests to reliable targets. For full tunnel (0.0.0.0/0) all
|
|
1857
|
+
* traffic goes through it. For split tunnel, the speedtest IPs are routed.
|
|
1858
|
+
*/
|
|
1859
|
+
async function verifyWgConnectivity(maxAttempts = 1, customTargets = null) {
|
|
1860
|
+
// v29: Reduced from 3 attempts x 3 targets x 8s to 1 attempt x 2 targets x 5s.
|
|
1861
|
+
// Old config: worst case ~78s of dead internet if node is broken with fullTunnel.
|
|
1862
|
+
// New config: worst case ~10s exposure. Tunnel is torn down immediately on failure.
|
|
1863
|
+
const targets = customTargets || ['https://1.1.1.1', 'https://www.cloudflare.com'];
|
|
1864
|
+
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
|
1865
|
+
if (attempt > 0) await sleep(2000);
|
|
1866
|
+
for (const target of targets) {
|
|
1867
|
+
try {
|
|
1868
|
+
await axios.get(target, { timeout: 5000, maxRedirects: 2, validateStatus: () => true });
|
|
1869
|
+
return true;
|
|
1870
|
+
} catch {} // expected: target may be unreachable through tunnel
|
|
1871
|
+
}
|
|
1872
|
+
}
|
|
1873
|
+
return false;
|
|
1874
|
+
}
|
|
1875
|
+
|
|
1876
|
+
/** Extract transport rate key from a V2Ray outbound for dynamic rate recording. */
|
|
1877
|
+
function _transportRateKey(ob) {
|
|
1878
|
+
const network = ob.streamSettings?.network;
|
|
1879
|
+
const security = ob.streamSettings?.security || 'none';
|
|
1880
|
+
if (!network) return null;
|
|
1881
|
+
if (security === 'tls') return `${network}/tls`;
|
|
1882
|
+
if (network === 'grpc') return 'grpc/none';
|
|
1883
|
+
return network;
|
|
1884
|
+
}
|
|
1885
|
+
|
|
1886
|
+
async function setupV2Ray({ remoteUrl, serverHost, sessionId, privKey, v2rayExePath, systemProxy, dns, onProgress, logFn, extremeDrift, clockDriftSec, nodeAddress, timeouts, signal, nodeAgent, state }) {
|
|
1887
|
+
if (!v2rayExePath) throw new TunnelError(ErrorCodes.V2RAY_NOT_FOUND, 'v2rayExePath required for V2Ray nodes');
|
|
1888
|
+
|
|
1889
|
+
// Generate UUID for V2Ray session
|
|
1890
|
+
const uuid = generateV2RayUUID();
|
|
1891
|
+
|
|
1892
|
+
// Handshake with node
|
|
1893
|
+
checkAborted(signal);
|
|
1894
|
+
progress(onProgress, logFn, 'handshake', 'V2Ray handshake...');
|
|
1895
|
+
const hs = await initHandshakeV3V2Ray(remoteUrl, sessionId, privKey, uuid, nodeAgent);
|
|
1896
|
+
|
|
1897
|
+
// NOTE: Credentials are saved AFTER verified connectivity (not here).
|
|
1898
|
+
// Saving before verification causes stale credentials to persist on retry
|
|
1899
|
+
// when the tunnel fails — the node doesn't route traffic with old UUID/keys.
|
|
1900
|
+
|
|
1901
|
+
// Wait for node to register UUID.
|
|
1902
|
+
// v20: Fixed 5s sleep. v21: Reduced to 2s — V2Ray outbound loop has its own
|
|
1903
|
+
// readiness checks (waitForPort + SOCKS5 connectivity test). ~8% of V2Ray
|
|
1904
|
+
// nodes need 5-10s to register UUID internally (node-tester-learnings-2026-03-20).
|
|
1905
|
+
progress(onProgress, logFn, 'tunnel', 'Waiting for node to register UUID...');
|
|
1906
|
+
await sleep(5000);
|
|
1907
|
+
|
|
1908
|
+
// Post-handshake viability checks (before spending time on outbound tests)
|
|
1909
|
+
const allMeta = JSON.parse(hs.config).metadata || [];
|
|
1910
|
+
|
|
1911
|
+
// VMess-only nodes with extreme clock drift → guaranteed AEAD failure.
|
|
1912
|
+
// VLess (proxy_protocol=1) is immune to clock drift; only VMess (proxy_protocol=2) fails.
|
|
1913
|
+
if (extremeDrift) {
|
|
1914
|
+
const hasVless = allMeta.some(m => m.proxy_protocol === 1);
|
|
1915
|
+
if (!hasVless) {
|
|
1916
|
+
throw new NodeError(ErrorCodes.NODE_CLOCK_DRIFT, `VMess-only node with clock drift ${clockDriftSec}s (AEAD tolerance ±120s, no VLess available)`, { clockDriftSec, nodeAddress });
|
|
1917
|
+
}
|
|
1918
|
+
logFn?.('VLess available — testing despite clock drift (VLess ignores clock drift)');
|
|
1919
|
+
}
|
|
1920
|
+
|
|
1921
|
+
// Build config — rotating port to avoid Windows TIME_WAIT conflicts
|
|
1922
|
+
// Sequential increment from random start avoids repeated collisions
|
|
1923
|
+
// with TIME_WAIT ports that pure random retries can hit.
|
|
1924
|
+
const startPort = 10800 + Math.floor(Math.random() * 1000);
|
|
1925
|
+
let socksPort = startPort;
|
|
1926
|
+
for (let i = 0; i < 5; i++) {
|
|
1927
|
+
socksPort = startPort + i;
|
|
1928
|
+
if (await checkPortFree(socksPort)) break;
|
|
1929
|
+
}
|
|
1930
|
+
const config = buildV2RayClientConfig(serverHost, hs.config, uuid, socksPort, { dns: resolveDnsServers(dns), systemProxy, clockDriftSec: clockDriftSec || 0 });
|
|
1931
|
+
|
|
1932
|
+
// When clock drift is extreme (>120s), prefer VLess outbounds over VMess.
|
|
1933
|
+
// VLess doesn't use AEAD timestamps so it's immune to clock drift.
|
|
1934
|
+
// VMess AEAD rejects packets with >120s drift — guaranteed failure.
|
|
1935
|
+
if (extremeDrift && config.outbounds.length > 1) {
|
|
1936
|
+
config.outbounds.sort((a, b) => {
|
|
1937
|
+
const aIsVless = a.protocol === 'vless' ? 0 : 1;
|
|
1938
|
+
const bIsVless = b.protocol === 'vless' ? 0 : 1;
|
|
1939
|
+
return aIsVless - bIsVless;
|
|
1940
|
+
});
|
|
1941
|
+
// Update routing to point to the first (now VLess) outbound
|
|
1942
|
+
const proxyRule = config.routing.rules.find(r => r.inboundTag?.includes('proxy'));
|
|
1943
|
+
if (proxyRule) proxyRule.outboundTag = config.outbounds[0].tag;
|
|
1944
|
+
logFn?.(`Clock drift ${clockDriftSec}s: reordered outbounds — VLess first (immune to drift)`);
|
|
1945
|
+
}
|
|
1946
|
+
|
|
1947
|
+
// Write config and start V2Ray, testing each outbound individually
|
|
1948
|
+
// (NEVER use balancer — causes session poisoning, see known-issues.md)
|
|
1949
|
+
const tmpDir = path.join(os.tmpdir(), 'sentinel-v2ray');
|
|
1950
|
+
mkdirSync(tmpDir, { recursive: true, mode: 0o700 });
|
|
1951
|
+
const cfgPath = path.join(tmpDir, 'config.json');
|
|
1952
|
+
|
|
1953
|
+
let workingOutbound = null;
|
|
1954
|
+
try {
|
|
1955
|
+
for (const ob of config.outbounds) {
|
|
1956
|
+
checkAborted(signal);
|
|
1957
|
+
// Pre-connection TCP probe for TCP-based transports — skip dead ports in 3s
|
|
1958
|
+
// instead of wasting 30s on a full V2Ray start+test cycle
|
|
1959
|
+
const obNet = ob.streamSettings?.network;
|
|
1960
|
+
if (['tcp', 'websocket', 'grpc', 'gun', 'http'].includes(obNet)) {
|
|
1961
|
+
const obPort = ob.settings?.vnext?.[0]?.port;
|
|
1962
|
+
const obHost = ob.settings?.vnext?.[0]?.address || serverHost;
|
|
1963
|
+
if (obPort) {
|
|
1964
|
+
const portOpen = await waitForPort(obPort, 3000, obHost);
|
|
1965
|
+
if (!portOpen) {
|
|
1966
|
+
const rk = _transportRateKey(ob);
|
|
1967
|
+
if (rk) recordTransportResult(rk, false);
|
|
1968
|
+
progress(onProgress, logFn, 'tunnel', ` ${ob.tag}: port ${obPort} not reachable, skipping`);
|
|
1969
|
+
continue;
|
|
1970
|
+
}
|
|
1971
|
+
}
|
|
1972
|
+
}
|
|
1973
|
+
|
|
1974
|
+
// Kill previous v2ray process by PID (NOT taskkill /IM which kills ALL v2ray.exe system-wide)
|
|
1975
|
+
if (state.v2rayProc) {
|
|
1976
|
+
state.v2rayProc.kill();
|
|
1977
|
+
state.v2rayProc = null;
|
|
1978
|
+
await sleep(2000);
|
|
1979
|
+
}
|
|
1980
|
+
|
|
1981
|
+
// Config with single outbound (no balancer) — only include the outbound being tested
|
|
1982
|
+
const attempt = {
|
|
1983
|
+
...config,
|
|
1984
|
+
outbounds: [ob],
|
|
1985
|
+
routing: {
|
|
1986
|
+
domainStrategy: 'IPIfNonMatch',
|
|
1987
|
+
rules: [
|
|
1988
|
+
{ inboundTag: ['api'], outboundTag: 'api', type: 'field' },
|
|
1989
|
+
{ inboundTag: ['proxy'], outboundTag: ob.tag, type: 'field' },
|
|
1990
|
+
],
|
|
1991
|
+
},
|
|
1992
|
+
};
|
|
1993
|
+
|
|
1994
|
+
writeFileSync(cfgPath, JSON.stringify(attempt, null, 2), { mode: 0o600 });
|
|
1995
|
+
// Restrict ACL on Windows (temp dir is user-scoped but readable by same-user processes)
|
|
1996
|
+
if (process.platform === 'win32') {
|
|
1997
|
+
try { execFileSync('icacls', [cfgPath, '/inheritance:r', '/grant:r', `${process.env.USERNAME || 'BUILTIN\\Users'}:F`, '/grant:r', 'SYSTEM:F'], { stdio: 'pipe', timeout: 3000 }); } catch {}
|
|
1998
|
+
}
|
|
1999
|
+
const proc = spawn(v2rayExePath, ['run', '-config', cfgPath], { stdio: 'pipe' });
|
|
2000
|
+
// Capture V2Ray stderr for diagnostics — filter out known noise lines
|
|
2001
|
+
// "proxy/socks: insufficient header" appears on every port probe (100% of runs), not a real error.
|
|
2002
|
+
if (proc.stderr) {
|
|
2003
|
+
proc.stderr.on('data', (chunk) => {
|
|
2004
|
+
const lines = chunk.toString().split('\n');
|
|
2005
|
+
for (const line of lines) {
|
|
2006
|
+
const trimmed = line.trim();
|
|
2007
|
+
if (!trimmed) continue;
|
|
2008
|
+
if (trimmed.includes('insufficient header')) continue; // port probe noise
|
|
2009
|
+
logFn?.(`[v2ray stderr] ${trimmed}`);
|
|
2010
|
+
}
|
|
2011
|
+
});
|
|
2012
|
+
}
|
|
2013
|
+
// Delete config after V2Ray reads it (contains UUID credentials)
|
|
2014
|
+
setTimeout(() => { try { unlinkSync(cfgPath); } catch {} }, 2000);
|
|
2015
|
+
|
|
2016
|
+
// Wait for SOCKS5 port to accept connections instead of fixed sleep.
|
|
2017
|
+
// V2Ray binding is async — fixed 6s sleep causes false failures on slow starts.
|
|
2018
|
+
const ready = await waitForPort(socksPort, timeouts.v2rayReady);
|
|
2019
|
+
if (!ready || proc.exitCode !== null) {
|
|
2020
|
+
progress(onProgress, logFn, 'tunnel', ` ${ob.tag}: v2ray ${proc.exitCode !== null ? `exited (code ${proc.exitCode})` : 'SOCKS5 port not ready'}, skipping`);
|
|
2021
|
+
proc.kill();
|
|
2022
|
+
continue;
|
|
2023
|
+
}
|
|
2024
|
+
|
|
2025
|
+
// Test connectivity through SOCKS5 — use reliable targets, not httpbin.org
|
|
2026
|
+
const TARGETS = ['https://www.google.com', 'https://www.cloudflare.com'];
|
|
2027
|
+
let connected = false;
|
|
2028
|
+
try {
|
|
2029
|
+
const { SocksProxyAgent } = await import('socks-proxy-agent');
|
|
2030
|
+
for (const target of TARGETS) {
|
|
2031
|
+
const auth = config._socksAuth;
|
|
2032
|
+
const proxyUrl = (auth?.user && auth?.pass)
|
|
2033
|
+
? `socks5://${auth.user}:${auth.pass}@127.0.0.1:${socksPort}`
|
|
2034
|
+
: `socks5://127.0.0.1:${socksPort}`;
|
|
2035
|
+
const agent = new SocksProxyAgent(proxyUrl);
|
|
2036
|
+
try {
|
|
2037
|
+
await axios.get(target, { httpAgent: agent, httpsAgent: agent, timeout: 10000, maxRedirects: 2, validateStatus: () => true });
|
|
2038
|
+
connected = true;
|
|
2039
|
+
break;
|
|
2040
|
+
} catch {} finally { agent.destroy(); }
|
|
2041
|
+
}
|
|
2042
|
+
if (connected) {
|
|
2043
|
+
const rk = _transportRateKey(ob);
|
|
2044
|
+
if (rk) recordTransportResult(rk, true);
|
|
2045
|
+
progress(onProgress, logFn, 'verify', `${ob.tag}: connected!`);
|
|
2046
|
+
workingOutbound = ob;
|
|
2047
|
+
state.v2rayProc = proc;
|
|
2048
|
+
break;
|
|
2049
|
+
}
|
|
2050
|
+
} catch {}
|
|
2051
|
+
if (!connected) {
|
|
2052
|
+
const rk = _transportRateKey(ob);
|
|
2053
|
+
if (rk) recordTransportResult(rk, false);
|
|
2054
|
+
progress(onProgress, logFn, 'tunnel', ` ${ob.tag}: failed (no connectivity)`);
|
|
2055
|
+
proc.kill();
|
|
2056
|
+
}
|
|
2057
|
+
}
|
|
2058
|
+
} catch (err) {
|
|
2059
|
+
// Kill any lingering V2Ray process on loop exit (abort, unexpected throw, etc.)
|
|
2060
|
+
if (state.v2rayProc) {
|
|
2061
|
+
try { killV2RayProc(state.v2rayProc); } catch {} // cleanup: best-effort
|
|
2062
|
+
state.v2rayProc = null;
|
|
2063
|
+
}
|
|
2064
|
+
throw err;
|
|
2065
|
+
}
|
|
2066
|
+
|
|
2067
|
+
if (!workingOutbound) {
|
|
2068
|
+
clearCredentials(nodeAddress); // Clear stale handshake credentials so retry gets fresh ones
|
|
2069
|
+
throw new TunnelError(ErrorCodes.V2RAY_ALL_FAILED, 'All V2Ray transport/protocol combinations failed', { nodeAddress, sessionId: String(sessionId) });
|
|
2070
|
+
}
|
|
2071
|
+
|
|
2072
|
+
// Save credentials AFTER verified connectivity — prevents stale credentials
|
|
2073
|
+
// from persisting when handshake succeeds but tunnel fails to route traffic.
|
|
2074
|
+
saveCredentials(nodeAddress, String(sessionId), {
|
|
2075
|
+
serviceType: 'v2ray',
|
|
2076
|
+
v2rayUuid: uuid,
|
|
2077
|
+
v2rayConfig: hs.config,
|
|
2078
|
+
});
|
|
2079
|
+
|
|
2080
|
+
// Auto-set Windows system proxy so browser traffic goes through the SOCKS5 tunnel.
|
|
2081
|
+
// Without this, V2Ray creates a local proxy but nothing uses it — the user's IP doesn't change.
|
|
2082
|
+
if (systemProxy && socksPort) {
|
|
2083
|
+
progress(onProgress, logFn, 'proxy', `Setting system SOCKS proxy → 127.0.0.1:${socksPort}`);
|
|
2084
|
+
setSystemProxy(socksPort);
|
|
2085
|
+
}
|
|
2086
|
+
|
|
2087
|
+
const sessionIdStr = String(sessionId); // String, not BigInt — safe for JSON.stringify
|
|
2088
|
+
// Expose SOCKS5 auth credentials so external apps can use the proxy for split tunneling.
|
|
2089
|
+
// Default is noauth (no credentials needed), but if socksAuth=true was passed, return creds.
|
|
2090
|
+
const socksAuth = config._socksAuth?.user
|
|
2091
|
+
? { user: config._socksAuth.user, pass: config._socksAuth.pass }
|
|
2092
|
+
: null;
|
|
2093
|
+
saveState({ sessionId: sessionIdStr, serviceType: 'v2ray', v2rayPid: state.v2rayProc?.pid, socksPort, systemProxySet: state.systemProxy, nodeAddress });
|
|
2094
|
+
state.connection = { sessionId: sessionIdStr, serviceType: 'v2ray', nodeAddress, socksPort, connectedAt: Date.now() };
|
|
2095
|
+
return {
|
|
2096
|
+
sessionId: sessionIdStr,
|
|
2097
|
+
serviceType: 'v2ray',
|
|
2098
|
+
nodeAddress,
|
|
2099
|
+
socksPort,
|
|
2100
|
+
socksAuth,
|
|
2101
|
+
outbound: workingOutbound.tag,
|
|
2102
|
+
cleanup: async () => {
|
|
2103
|
+
if (state.v2rayProc) { state.v2rayProc.kill(); state.v2rayProc = null; await sleep(500); }
|
|
2104
|
+
if (state.systemProxy) clearSystemProxy();
|
|
2105
|
+
// End session on chain (fire-and-forget)
|
|
2106
|
+
if (sessionIdStr && state._mnemonic) {
|
|
2107
|
+
_endSessionOnChain(sessionIdStr, state._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
2108
|
+
}
|
|
2109
|
+
state.connection = null;
|
|
2110
|
+
state._mnemonic = null;
|
|
2111
|
+
clearState();
|
|
2112
|
+
},
|
|
2113
|
+
};
|
|
2114
|
+
}
|
|
2115
|
+
|
|
2116
|
+
// ─── Connection Status (VPN UX: user must always know if they're connected) ─
|
|
2117
|
+
|
|
2118
|
+
/**
|
|
2119
|
+
* Check if a VPN tunnel is currently active.
|
|
2120
|
+
* Use this to show connected/disconnected state in UI — like the VPN icon.
|
|
2121
|
+
*/
|
|
2122
|
+
export function isConnected() {
|
|
2123
|
+
return _defaultState.isConnected;
|
|
2124
|
+
}
|
|
2125
|
+
|
|
2126
|
+
/**
|
|
2127
|
+
* Get current connection status. Returns null if disconnected.
|
|
2128
|
+
* Apps should poll this (e.g. every 5s) to update UI — like NordVPN's status bar.
|
|
2129
|
+
* v25: Includes healthChecks for tunnel/proxy liveness.
|
|
2130
|
+
*/
|
|
2131
|
+
export function getStatus() {
|
|
2132
|
+
if (!_defaultState.connection) return null;
|
|
2133
|
+
|
|
2134
|
+
// v29: Cross-check tunnel liveness FIRST — if connection object exists but neither
|
|
2135
|
+
// tunnel handle is truthy, the state is phantom (tunnel torn down, connection stale).
|
|
2136
|
+
// This prevents IP leak where user thinks they're connected but traffic goes direct.
|
|
2137
|
+
if (!_defaultState.wgTunnel && !_defaultState.v2rayProc) {
|
|
2138
|
+
const stale = _defaultState.connection;
|
|
2139
|
+
_defaultState.connection = null;
|
|
2140
|
+
// End session on chain (fire-and-forget) to prevent stale session leaks
|
|
2141
|
+
if (stale?.sessionId && _defaultState._mnemonic) {
|
|
2142
|
+
_endSessionOnChain(stale.sessionId, _defaultState._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
2143
|
+
}
|
|
2144
|
+
clearState();
|
|
2145
|
+
events.emit('disconnected', { nodeAddress: stale.nodeAddress, serviceType: stale.serviceType, reason: 'phantom_state' });
|
|
2146
|
+
return null;
|
|
2147
|
+
}
|
|
2148
|
+
|
|
2149
|
+
const conn = _defaultState.connection;
|
|
2150
|
+
const uptimeMs = Date.now() - conn.connectedAt;
|
|
2151
|
+
|
|
2152
|
+
// v25: Health checks — distinguish tunnel states
|
|
2153
|
+
const healthChecks = {
|
|
2154
|
+
tunnelActive: false,
|
|
2155
|
+
proxyListening: false,
|
|
2156
|
+
systemProxyValid: _defaultState.systemProxy,
|
|
2157
|
+
};
|
|
2158
|
+
|
|
2159
|
+
if (_defaultState.wgTunnel) {
|
|
2160
|
+
// WireGuard: check if adapter exists
|
|
2161
|
+
if (process.platform === 'win32') {
|
|
2162
|
+
try {
|
|
2163
|
+
const out = execFileSync('netsh', ['interface', 'show', 'interface', 'name=wgsent0'], { encoding: 'utf8', stdio: 'pipe', timeout: 3000 });
|
|
2164
|
+
healthChecks.tunnelActive = out.includes('Connected');
|
|
2165
|
+
} catch {
|
|
2166
|
+
// Adapter gone — tunnel is dead
|
|
2167
|
+
healthChecks.tunnelActive = false;
|
|
2168
|
+
}
|
|
2169
|
+
} else {
|
|
2170
|
+
// Non-Windows: trust state (no easy check)
|
|
2171
|
+
healthChecks.tunnelActive = true;
|
|
2172
|
+
}
|
|
2173
|
+
}
|
|
2174
|
+
|
|
2175
|
+
if (_defaultState.v2rayProc) {
|
|
2176
|
+
// V2Ray: check if process is alive
|
|
2177
|
+
healthChecks.tunnelActive = !_defaultState.v2rayProc.killed && _defaultState.v2rayProc.exitCode === null;
|
|
2178
|
+
// Proxy listening = process alive (async port check removed — was broken, fired after return)
|
|
2179
|
+
healthChecks.proxyListening = healthChecks.tunnelActive;
|
|
2180
|
+
if (conn.socksPort) {
|
|
2181
|
+
}
|
|
2182
|
+
}
|
|
2183
|
+
|
|
2184
|
+
// v28: Auto-clear phantom state — if connection exists but tunnel is dead,
|
|
2185
|
+
// clean up stale state. Prevents ghost "connected" status after tunnel dies.
|
|
2186
|
+
if (!healthChecks.tunnelActive && !_defaultState.v2rayProc && !_defaultState.wgTunnel) {
|
|
2187
|
+
// Both tunnel handles are null — connection state is stale
|
|
2188
|
+
if (conn?.sessionId && _defaultState._mnemonic) {
|
|
2189
|
+
_endSessionOnChain(conn.sessionId, _defaultState._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
2190
|
+
}
|
|
2191
|
+
_defaultState.connection = null;
|
|
2192
|
+
clearState();
|
|
2193
|
+
return null;
|
|
2194
|
+
}
|
|
2195
|
+
if (_defaultState.wgTunnel && !healthChecks.tunnelActive) {
|
|
2196
|
+
// WireGuard state says connected but tunnel is dead — auto-cleanup
|
|
2197
|
+
if (conn?.sessionId && _defaultState._mnemonic) {
|
|
2198
|
+
_endSessionOnChain(conn.sessionId, _defaultState._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
2199
|
+
}
|
|
2200
|
+
_defaultState.wgTunnel = null;
|
|
2201
|
+
_defaultState.connection = null;
|
|
2202
|
+
clearState();
|
|
2203
|
+
events.emit('disconnected', { nodeAddress: conn.nodeAddress, serviceType: conn.serviceType, reason: 'tunnel_died' });
|
|
2204
|
+
return null;
|
|
2205
|
+
}
|
|
2206
|
+
if (_defaultState.v2rayProc && !healthChecks.tunnelActive) {
|
|
2207
|
+
// V2Ray process died — auto-cleanup
|
|
2208
|
+
if (conn?.sessionId && _defaultState._mnemonic) {
|
|
2209
|
+
_endSessionOnChain(conn.sessionId, _defaultState._mnemonic).then(r => events.emit('sessionEnded', { txHash: r?.transactionHash })).catch(e => events.emit('sessionEndFailed', { error: e.message }));
|
|
2210
|
+
}
|
|
2211
|
+
_defaultState.v2rayProc = null;
|
|
2212
|
+
_defaultState.connection = null;
|
|
2213
|
+
clearState();
|
|
2214
|
+
events.emit('disconnected', { nodeAddress: conn.nodeAddress, serviceType: conn.serviceType, reason: 'tunnel_died' });
|
|
2215
|
+
return null;
|
|
2216
|
+
}
|
|
2217
|
+
|
|
2218
|
+
return {
|
|
2219
|
+
connected: _defaultState.isConnected,
|
|
2220
|
+
...conn,
|
|
2221
|
+
uptimeMs,
|
|
2222
|
+
uptimeFormatted: formatUptime(uptimeMs),
|
|
2223
|
+
healthChecks,
|
|
2224
|
+
};
|
|
2225
|
+
}
|
|
2226
|
+
|
|
2227
|
+
function formatUptime(ms) {
|
|
2228
|
+
const s = Math.floor(ms / 1000);
|
|
2229
|
+
const h = Math.floor(s / 3600);
|
|
2230
|
+
const m = Math.floor((s % 3600) / 60);
|
|
2231
|
+
if (h > 0) return `${h}h ${m}m`;
|
|
2232
|
+
if (m > 0) return `${m}m ${s % 60}s`;
|
|
2233
|
+
return `${s}s`;
|
|
2234
|
+
}
|
|
2235
|
+
|
|
2236
|
+
// ─── Disconnect ──────────────────────────────────────────────────────────────
|
|
2237
|
+
|
|
2238
|
+
/**
|
|
2239
|
+
* Clean up all active tunnels and system proxy.
|
|
2240
|
+
* ALWAYS call this on exit — a stale WireGuard tunnel will kill your internet.
|
|
2241
|
+
*/
|
|
2242
|
+
/** Disconnect a specific state instance (internal). */
|
|
2243
|
+
export async function disconnectState(state) {
|
|
2244
|
+
// v30: Signal any running connectAuto() retry loop to abort, and release the
|
|
2245
|
+
// connection lock so the user can reconnect after disconnect completes.
|
|
2246
|
+
_abortConnect = true;
|
|
2247
|
+
_connectLock = false;
|
|
2248
|
+
|
|
2249
|
+
const prev = state.connection;
|
|
2250
|
+
// v29: try/finally ensures state.connection is ALWAYS cleared, even if
|
|
2251
|
+
// disableKillSwitch() or clearSystemProxy() throw. Previously, an exception
|
|
2252
|
+
// here left state.connection set → phantom "connected" status (IP leak).
|
|
2253
|
+
try {
|
|
2254
|
+
if (_killSwitchEnabled) {
|
|
2255
|
+
try { disableKillSwitch(); } catch (e) { console.warn('[sentinel-sdk] Kill switch disable warning:', e.message); }
|
|
2256
|
+
}
|
|
2257
|
+
if (state.systemProxy) {
|
|
2258
|
+
try { clearSystemProxy(); } catch (e) { console.warn('[sentinel-sdk] System proxy clear warning:', e.message); }
|
|
2259
|
+
}
|
|
2260
|
+
if (state.v2rayProc) {
|
|
2261
|
+
killV2RayProc(state.v2rayProc);
|
|
2262
|
+
state.v2rayProc = null;
|
|
2263
|
+
}
|
|
2264
|
+
if (state.wgTunnel) {
|
|
2265
|
+
try { await disconnectWireGuard(); } catch (e) { console.warn('[sentinel-sdk] WireGuard disconnect warning:', e.message); }
|
|
2266
|
+
state.wgTunnel = null;
|
|
2267
|
+
}
|
|
2268
|
+
|
|
2269
|
+
// End session on chain (best-effort, fire-and-forget — never blocks disconnect)
|
|
2270
|
+
if (prev?.sessionId && state._mnemonic) {
|
|
2271
|
+
_endSessionOnChain(prev.sessionId, state._mnemonic).catch(e => {
|
|
2272
|
+
console.warn(`[sentinel-sdk] Failed to end session ${prev.sessionId} on chain: ${e.message}`);
|
|
2273
|
+
});
|
|
2274
|
+
}
|
|
2275
|
+
} finally {
|
|
2276
|
+
// ALWAYS clear connection state — even if teardown threw
|
|
2277
|
+
state._mnemonic = null;
|
|
2278
|
+
state.connection = null;
|
|
2279
|
+
clearState();
|
|
2280
|
+
clearWalletCache(); // v34: Clear cached wallet objects (private keys) from memory
|
|
2281
|
+
flushSpeedTestDnsCache(); // v25: Clear stale DNS cache between connections (#14)
|
|
2282
|
+
if (prev) events.emit('disconnected', { nodeAddress: prev.nodeAddress, serviceType: prev.serviceType, reason: 'user' });
|
|
2283
|
+
}
|
|
2284
|
+
}
|
|
2285
|
+
|
|
2286
|
+
export async function disconnect() {
|
|
2287
|
+
return disconnectState(_defaultState);
|
|
2288
|
+
}
|
|
2289
|
+
|
|
2290
|
+
// ─── Session End (on-chain cleanup) ──────────────────────────────────────────
|
|
2291
|
+
|
|
2292
|
+
/**
|
|
2293
|
+
* End a session on-chain. Best-effort, fire-and-forget.
|
|
2294
|
+
* Prevents stale session accumulation on nodes.
|
|
2295
|
+
* @param {string|bigint} sessionId - Session ID to end
|
|
2296
|
+
* @param {string} mnemonic - BIP39 mnemonic for signing the TX
|
|
2297
|
+
* @private
|
|
2298
|
+
*/
|
|
2299
|
+
async function _endSessionOnChain(sessionId, mnemonic) {
|
|
2300
|
+
const { wallet, account } = await cachedCreateWallet(mnemonic);
|
|
2301
|
+
const client = await tryWithFallback(
|
|
2302
|
+
RPC_ENDPOINTS,
|
|
2303
|
+
async (url) => createClient(url, wallet),
|
|
2304
|
+
'RPC connect (session end)',
|
|
2305
|
+
).then(r => r.result);
|
|
2306
|
+
const msg = buildEndSessionMsg(account.address, sessionId);
|
|
2307
|
+
const fee = { amount: [{ denom: 'udvpn', amount: '20000' }], gas: '200000' };
|
|
2308
|
+
const result = await client.signAndBroadcast(account.address, [msg], fee);
|
|
2309
|
+
if (result.code !== 0) {
|
|
2310
|
+
console.warn(`[sentinel-sdk] End session TX failed (code ${result.code}): ${result.rawLog}`);
|
|
2311
|
+
} else {
|
|
2312
|
+
console.log(`[sentinel-sdk] Session ${sessionId} ended on chain (TX ${result.transactionHash})`);
|
|
2313
|
+
}
|
|
2314
|
+
}
|
|
2315
|
+
|
|
2316
|
+
// ─── Session Recovery (v25) ──────────────────────────────────────────────────
|
|
2317
|
+
|
|
2318
|
+
/**
|
|
2319
|
+
* Retry handshake on an already-paid session. Use when connect fails AFTER payment.
|
|
2320
|
+
* The error.details from a failed connect contains { sessionId, nodeAddress } — pass those here.
|
|
2321
|
+
*
|
|
2322
|
+
* @param {object} opts - Same as connectDirect (mnemonic, v2rayExePath, etc.)
|
|
2323
|
+
* @param {string|bigint} opts.sessionId - Session ID from the failed connection error
|
|
2324
|
+
* @param {string} opts.nodeAddress - Node address from the failed connection error
|
|
2325
|
+
* @returns {Promise<ConnectResult>}
|
|
2326
|
+
*/
|
|
2327
|
+
export async function recoverSession(opts) {
|
|
2328
|
+
if (!opts?.sessionId) throw new ValidationError(ErrorCodes.INVALID_OPTIONS, 'recoverSession requires opts.sessionId');
|
|
2329
|
+
if (!opts?.nodeAddress) throw new ValidationError(ErrorCodes.INVALID_OPTIONS, 'recoverSession requires opts.nodeAddress');
|
|
2330
|
+
if (!opts?.mnemonic) throw new ValidationError(ErrorCodes.INVALID_MNEMONIC, 'recoverSession requires opts.mnemonic');
|
|
2331
|
+
|
|
2332
|
+
const logFn = opts.log || console.log;
|
|
2333
|
+
const onProgress = opts.onProgress || null;
|
|
2334
|
+
const sessionId = BigInt(opts.sessionId);
|
|
2335
|
+
const timeouts = { ...DEFAULT_TIMEOUTS, ...opts.timeouts };
|
|
2336
|
+
const tlsTrust = opts.tlsTrust || 'tofu';
|
|
2337
|
+
const state = opts._state || _defaultState;
|
|
2338
|
+
|
|
2339
|
+
// Fetch node info
|
|
2340
|
+
progress(onProgress, logFn, 'recover', `Recovering session ${sessionId} on ${opts.nodeAddress}...`);
|
|
2341
|
+
const nodeAgent = createNodeHttpsAgent(opts.nodeAddress, tlsTrust);
|
|
2342
|
+
|
|
2343
|
+
// Get node status (we need serviceType and remote URL)
|
|
2344
|
+
const lcdUrl = opts.lcdUrl || DEFAULT_LCD;
|
|
2345
|
+
const nodeInfo = await queryNode(opts.nodeAddress, { lcdUrl });
|
|
2346
|
+
|
|
2347
|
+
const status = await nodeStatusV3(nodeInfo.remote_url, nodeAgent);
|
|
2348
|
+
const resolvedV2rayPath = validateTunnelRequirements(status.type, opts.v2rayExePath);
|
|
2349
|
+
|
|
2350
|
+
const privKey = await privKeyFromMnemonic(opts.mnemonic);
|
|
2351
|
+
const extremeDrift = status.type === 'v2ray' && status.clockDriftSec !== null && Math.abs(status.clockDriftSec) > 120;
|
|
2352
|
+
|
|
2353
|
+
try {
|
|
2354
|
+
const result = await performHandshake({
|
|
2355
|
+
serviceType: status.type,
|
|
2356
|
+
remoteUrl: nodeInfo.remote_url,
|
|
2357
|
+
serverHost: new URL(nodeInfo.remote_url).hostname,
|
|
2358
|
+
sessionId,
|
|
2359
|
+
privKey,
|
|
2360
|
+
v2rayExePath: resolvedV2rayPath,
|
|
2361
|
+
fullTunnel: opts.fullTunnel !== false,
|
|
2362
|
+
splitIPs: opts.splitIPs,
|
|
2363
|
+
systemProxy: opts.systemProxy === true,
|
|
2364
|
+
dns: opts.dns,
|
|
2365
|
+
onProgress,
|
|
2366
|
+
logFn,
|
|
2367
|
+
extremeDrift,
|
|
2368
|
+
clockDriftSec: status.clockDriftSec,
|
|
2369
|
+
nodeAddress: opts.nodeAddress,
|
|
2370
|
+
timeouts,
|
|
2371
|
+
signal: opts.signal,
|
|
2372
|
+
nodeAgent,
|
|
2373
|
+
state,
|
|
2374
|
+
});
|
|
2375
|
+
markSessionActive(String(sessionId), opts.nodeAddress);
|
|
2376
|
+
events.emit('connected', { sessionId, serviceType: status.type, nodeAddress: opts.nodeAddress });
|
|
2377
|
+
return result;
|
|
2378
|
+
} finally {
|
|
2379
|
+
privKey.fill(0);
|
|
2380
|
+
}
|
|
2381
|
+
}
|
|
2382
|
+
|
|
2383
|
+
/**
|
|
2384
|
+
* Register exit handlers to clean up tunnels on crash/exit.
|
|
2385
|
+
* Call this once at app startup.
|
|
2386
|
+
*/
|
|
2387
|
+
export function registerCleanupHandlers() {
|
|
2388
|
+
if (_cleanupRegistered) return; // prevent duplicate handler stacking
|
|
2389
|
+
_cleanupRegistered = true;
|
|
2390
|
+
const orphans = recoverOrphans(); // recover state-tracked orphans from crash
|
|
2391
|
+
if (orphans?.cleaned?.length) console.log('[sentinel-sdk] Recovered orphans:', orphans.cleaned.join(', '));
|
|
2392
|
+
emergencyCleanupSync(); // kill stale tunnels from previous crash
|
|
2393
|
+
killOrphanV2Ray(); // kill orphaned v2ray from previous crash
|
|
2394
|
+
process.on('exit', () => { if (_killSwitchEnabled) disableKillSwitch(); clearSystemProxy(); killOrphanV2Ray(); emergencyCleanupSync(); });
|
|
2395
|
+
process.on('SIGINT', () => { if (_killSwitchEnabled) disableKillSwitch(); clearSystemProxy(); killOrphanV2Ray(); emergencyCleanupSync(); process.exit(130); });
|
|
2396
|
+
process.on('SIGTERM', () => { if (_killSwitchEnabled) disableKillSwitch(); clearSystemProxy(); killOrphanV2Ray(); emergencyCleanupSync(); process.exit(143); });
|
|
2397
|
+
process.on('uncaughtException', (err) => {
|
|
2398
|
+
console.error('Uncaught exception:', err);
|
|
2399
|
+
if (_killSwitchEnabled) disableKillSwitch();
|
|
2400
|
+
clearSystemProxy();
|
|
2401
|
+
killOrphanV2Ray();
|
|
2402
|
+
emergencyCleanupSync();
|
|
2403
|
+
process.exit(1);
|
|
2404
|
+
});
|
|
2405
|
+
}
|
|
2406
|
+
|
|
2407
|
+
// ─── Quick Connect (v26c) ────────────────────────────────────────────────────
|
|
2408
|
+
|
|
2409
|
+
/**
|
|
2410
|
+
* One-call VPN connection. Handles everything: dependency check, cleanup registration,
|
|
2411
|
+
* node selection, connection, and IP verification. The simplest way to use the SDK.
|
|
2412
|
+
*
|
|
2413
|
+
* @param {object} opts
|
|
2414
|
+
* @param {string} opts.mnemonic - BIP39 wallet mnemonic (12 or 24 words)
|
|
2415
|
+
* @param {string[]} [opts.countries] - Preferred countries (e.g. ['DE', 'NL'])
|
|
2416
|
+
* @param {string} [opts.serviceType] - 'wireguard' | 'v2ray' | null (both)
|
|
2417
|
+
* @param {number} [opts.maxAttempts=3] - Max nodes to try
|
|
2418
|
+
* @param {function} [opts.onProgress] - Progress callback
|
|
2419
|
+
* @param {function} [opts.log] - Logger function
|
|
2420
|
+
* @returns {Promise<ConnectResult & { vpnIp?: string }>}
|
|
2421
|
+
*/
|
|
2422
|
+
export async function quickConnect(opts) {
|
|
2423
|
+
if (!opts?.mnemonic) {
|
|
2424
|
+
throw new ValidationError(ErrorCodes.INVALID_MNEMONIC, 'quickConnect() requires opts.mnemonic');
|
|
2425
|
+
}
|
|
2426
|
+
|
|
2427
|
+
// Auto-register cleanup (idempotent)
|
|
2428
|
+
registerCleanupHandlers();
|
|
2429
|
+
|
|
2430
|
+
// Check dependencies
|
|
2431
|
+
const deps = verifyDependencies({ v2rayExePath: opts.v2rayExePath });
|
|
2432
|
+
if (!deps.ok) {
|
|
2433
|
+
const logFn = opts.log || console.warn;
|
|
2434
|
+
for (const err of deps.errors) logFn(`[quickConnect] Warning: ${err}`);
|
|
2435
|
+
}
|
|
2436
|
+
|
|
2437
|
+
// Connect with auto-fallback
|
|
2438
|
+
const connectOpts = {
|
|
2439
|
+
...opts,
|
|
2440
|
+
fullTunnel: opts.fullTunnel !== false, // default true
|
|
2441
|
+
systemProxy: opts.systemProxy !== false, // default true for V2Ray
|
|
2442
|
+
killSwitch: opts.killSwitch === true,
|
|
2443
|
+
};
|
|
2444
|
+
|
|
2445
|
+
const result = await connectAuto(connectOpts);
|
|
2446
|
+
|
|
2447
|
+
// Verify IP changed
|
|
2448
|
+
try {
|
|
2449
|
+
const { vpnIp } = await verifyConnection({ timeoutMs: 6000 });
|
|
2450
|
+
result.vpnIp = vpnIp;
|
|
2451
|
+
} catch { /* IP check is best-effort */ }
|
|
2452
|
+
|
|
2453
|
+
return result;
|
|
2454
|
+
}
|
|
2455
|
+
|
|
2456
|
+
// ─── Auto-Reconnect (v26c) ───────────────────────────────────────────────────
|
|
2457
|
+
|
|
2458
|
+
/**
|
|
2459
|
+
* Monitor connection and auto-reconnect on failure.
|
|
2460
|
+
* Returns an object with .stop() to cancel monitoring.
|
|
2461
|
+
*
|
|
2462
|
+
* @param {object} opts - Same as connectAuto() options, plus:
|
|
2463
|
+
* @param {number} [opts.pollIntervalMs=5000] - Health check interval
|
|
2464
|
+
* @param {number} [opts.maxRetries=5] - Max consecutive reconnect attempts
|
|
2465
|
+
* @param {number[]} [opts.backoffMs=[1000,2000,5000,10000,30000]] - Backoff delays
|
|
2466
|
+
* @param {function} [opts.onReconnecting] - (attempt: number) => void
|
|
2467
|
+
* @param {function} [opts.onReconnected] - (result: ConnectResult) => void
|
|
2468
|
+
* @param {function} [opts.onGaveUp] - (errors: Error[]) => void
|
|
2469
|
+
* @returns {{ stop: () => void }}
|
|
2470
|
+
*/
|
|
2471
|
+
export function autoReconnect(opts) {
|
|
2472
|
+
const pollMs = opts.pollIntervalMs || 5000;
|
|
2473
|
+
const maxRetries = opts.maxRetries || 5;
|
|
2474
|
+
const backoff = opts.backoffMs || [1000, 2000, 5000, 10000, 30000];
|
|
2475
|
+
let wasConnected = false;
|
|
2476
|
+
let retries = 0;
|
|
2477
|
+
let timer = null;
|
|
2478
|
+
let stopped = false;
|
|
2479
|
+
|
|
2480
|
+
const check = async () => {
|
|
2481
|
+
if (stopped) return;
|
|
2482
|
+
const status = getStatus();
|
|
2483
|
+
const connected = !!status; // v28 fix: getStatus() returns null when disconnected, not { connected: false }
|
|
2484
|
+
|
|
2485
|
+
if (connected) {
|
|
2486
|
+
wasConnected = true;
|
|
2487
|
+
retries = 0;
|
|
2488
|
+
return;
|
|
2489
|
+
}
|
|
2490
|
+
|
|
2491
|
+
if (!wasConnected) return; // never connected yet, don't auto-reconnect
|
|
2492
|
+
|
|
2493
|
+
// Lost connection — attempt reconnect
|
|
2494
|
+
if (retries >= maxRetries) {
|
|
2495
|
+
if (opts.onGaveUp) try { opts.onGaveUp([]); } catch {}
|
|
2496
|
+
return;
|
|
2497
|
+
}
|
|
2498
|
+
|
|
2499
|
+
retries++;
|
|
2500
|
+
if (opts.onReconnecting) try { opts.onReconnecting(retries); } catch {}
|
|
2501
|
+
|
|
2502
|
+
const delay = backoff[Math.min(retries - 1, backoff.length - 1)];
|
|
2503
|
+
await sleep(delay);
|
|
2504
|
+
|
|
2505
|
+
if (stopped) return;
|
|
2506
|
+
try {
|
|
2507
|
+
const result = await connectAuto(opts);
|
|
2508
|
+
retries = 0;
|
|
2509
|
+
wasConnected = true;
|
|
2510
|
+
if (opts.onReconnected) try { opts.onReconnected(result); } catch {}
|
|
2511
|
+
} catch (err) {
|
|
2512
|
+
// Don't count lock contention or aborts as real failures
|
|
2513
|
+
if (err?.code === 'ALREADY_CONNECTED' || err?.code === 'ABORTED') {
|
|
2514
|
+
retries--; // undo the increment — not a real connection failure
|
|
2515
|
+
return;
|
|
2516
|
+
}
|
|
2517
|
+
events.emit('error', err);
|
|
2518
|
+
}
|
|
2519
|
+
};
|
|
2520
|
+
|
|
2521
|
+
timer = setInterval(check, pollMs);
|
|
2522
|
+
if (timer.unref) timer.unref();
|
|
2523
|
+
|
|
2524
|
+
return {
|
|
2525
|
+
stop: () => { stopped = true; if (timer) { clearInterval(timer); timer = null; } },
|
|
2526
|
+
};
|
|
2527
|
+
}
|
|
2528
|
+
|
|
2529
|
+
// ─── Verify Connection (v26c) ────────────────────────────────────────────────
|
|
2530
|
+
|
|
2531
|
+
/**
|
|
2532
|
+
* Verify VPN is working by checking if IP has changed.
|
|
2533
|
+
* Fetches public IP via ipify.org and compares to a direct (non-VPN) fetch.
|
|
2534
|
+
*
|
|
2535
|
+
* @param {object} [opts]
|
|
2536
|
+
* @param {number} [opts.timeoutMs=8000]
|
|
2537
|
+
* @returns {Promise<{ working: boolean, vpnIp: string|null, error?: string }>}
|
|
2538
|
+
*/
|
|
2539
|
+
export async function verifyConnection(opts = {}) {
|
|
2540
|
+
const timeout = opts.timeoutMs || 8000;
|
|
2541
|
+
try {
|
|
2542
|
+
const res = await axios.get('https://api.ipify.org?format=json', { timeout });
|
|
2543
|
+
const vpnIp = res.data?.ip || null;
|
|
2544
|
+
return { working: !!vpnIp, vpnIp };
|
|
2545
|
+
} catch (err) {
|
|
2546
|
+
return { working: false, vpnIp: null, error: err.message };
|
|
2547
|
+
}
|
|
2548
|
+
}
|
|
2549
|
+
|
|
2550
|
+
// ─── ConnectOptions Builder (v25) ────────────────────────────────────────────
|
|
2551
|
+
|
|
2552
|
+
/**
|
|
2553
|
+
* Create a reusable base config. Override per-call with .with().
|
|
2554
|
+
* @param {object} baseOpts - Default ConnectOptions (mnemonic, rpcUrl, etc.)
|
|
2555
|
+
* @returns {{ ...baseOpts, with(overrides): object }}
|
|
2556
|
+
*/
|
|
2557
|
+
export function createConnectConfig(baseOpts) {
|
|
2558
|
+
const config = { ...baseOpts };
|
|
2559
|
+
config.with = (overrides) => ({ ...config, ...overrides });
|
|
2560
|
+
// Remove .with from spread results (non-enumerable)
|
|
2561
|
+
Object.defineProperty(config, 'with', { enumerable: false });
|
|
2562
|
+
return Object.freeze(config);
|
|
2563
|
+
}
|
|
2564
|
+
|
|
2565
|
+
/**
|
|
2566
|
+
* Pre-flight check: verify all required binaries and permissions.
|
|
2567
|
+
*
|
|
2568
|
+
* Call this at app startup to surface clear, human-readable errors
|
|
2569
|
+
* instead of cryptic ENOENT crashes mid-connection.
|
|
2570
|
+
*
|
|
2571
|
+
* @param {object} [opts]
|
|
2572
|
+
* @param {string} [opts.v2rayExePath] - Explicit V2Ray binary path
|
|
2573
|
+
* @returns {{ ok: boolean, v2ray: { available: boolean, path: string|null, version: string|null, error: string|null }, wireguard: { available: boolean, path: string|null, isAdmin: boolean, error: string|null }, platform: string, arch: string, nodeVersion: string, errors: string[] }}
|
|
2574
|
+
*/
|
|
2575
|
+
export function verifyDependencies(opts = {}) {
|
|
2576
|
+
const errors = [];
|
|
2577
|
+
const result = {
|
|
2578
|
+
ok: true,
|
|
2579
|
+
v2ray: { available: false, path: null, version: null, error: null },
|
|
2580
|
+
wireguard: { available: false, path: null, isAdmin: IS_ADMIN, error: null },
|
|
2581
|
+
platform: process.platform,
|
|
2582
|
+
arch: process.arch,
|
|
2583
|
+
nodeVersion: process.version,
|
|
2584
|
+
errors,
|
|
2585
|
+
};
|
|
2586
|
+
|
|
2587
|
+
// V2Ray check
|
|
2588
|
+
const v2path = findV2RayExe(opts.v2rayExePath);
|
|
2589
|
+
if (v2path) {
|
|
2590
|
+
result.v2ray.available = true;
|
|
2591
|
+
result.v2ray.path = v2path;
|
|
2592
|
+
try {
|
|
2593
|
+
const ver = execFileSync(v2path, ['version'], { encoding: 'utf8', timeout: 5000, stdio: 'pipe' });
|
|
2594
|
+
const match = ver.match(/V2Ray\s+([\d.]+)/i) || ver.match(/([\d]+\.[\d]+\.[\d]+)/);
|
|
2595
|
+
result.v2ray.version = match ? match[1] : ver.trim().split('\n')[0];
|
|
2596
|
+
} catch {
|
|
2597
|
+
result.v2ray.version = 'unknown (binary exists but version check failed)';
|
|
2598
|
+
}
|
|
2599
|
+
} else {
|
|
2600
|
+
result.v2ray.error = process.platform === 'win32'
|
|
2601
|
+
? 'V2Ray not found. Place v2ray.exe + geoip.dat + geosite.dat in a bin/ folder next to your app, or set the V2RAY_PATH environment variable.'
|
|
2602
|
+
: process.platform === 'darwin'
|
|
2603
|
+
? 'V2Ray not found. Install via: brew install v2ray, or place the v2ray binary in ./bin/ or /usr/local/bin/'
|
|
2604
|
+
: 'V2Ray not found. Install via your package manager (apt install v2ray), or place the v2ray binary in ./bin/ or /usr/local/bin/';
|
|
2605
|
+
errors.push(result.v2ray.error);
|
|
2606
|
+
}
|
|
2607
|
+
|
|
2608
|
+
// WireGuard check
|
|
2609
|
+
if (WG_AVAILABLE) {
|
|
2610
|
+
result.wireguard.available = true;
|
|
2611
|
+
result.wireguard.path = process.platform === 'win32'
|
|
2612
|
+
? ['C:\\Program Files\\WireGuard\\wireguard.exe', 'C:\\Program Files (x86)\\WireGuard\\wireguard.exe'].find(p => existsSync(p)) || 'in PATH'
|
|
2613
|
+
: (() => { try { return execSync('which wg-quick', { encoding: 'utf8', stdio: 'pipe' }).trim(); } catch { return 'in PATH'; } })();
|
|
2614
|
+
if (!IS_ADMIN) {
|
|
2615
|
+
result.wireguard.error = process.platform === 'win32'
|
|
2616
|
+
? 'WireGuard requires Administrator privileges. Run your app as Admin, or use V2Ray nodes (no admin needed).'
|
|
2617
|
+
: 'WireGuard requires root/sudo. Run with sudo, or use V2Ray nodes (no root needed).';
|
|
2618
|
+
errors.push(result.wireguard.error);
|
|
2619
|
+
}
|
|
2620
|
+
} else {
|
|
2621
|
+
result.wireguard.error = process.platform === 'win32'
|
|
2622
|
+
? 'WireGuard not installed. Download from https://download.wireguard.com/windows-client/wireguard-installer.exe — V2Ray nodes still work without it.'
|
|
2623
|
+
: process.platform === 'darwin'
|
|
2624
|
+
? 'WireGuard not installed. Install via: brew install wireguard-tools — V2Ray nodes still work without it.'
|
|
2625
|
+
: 'WireGuard not installed. Install via: sudo apt install wireguard (or equivalent) — V2Ray nodes still work without it.';
|
|
2626
|
+
errors.push(result.wireguard.error);
|
|
2627
|
+
}
|
|
2628
|
+
|
|
2629
|
+
result.ok = errors.length === 0;
|
|
2630
|
+
return result;
|
|
2631
|
+
}
|
|
2632
|
+
|
|
2633
|
+
/**
|
|
2634
|
+
* Kill a V2Ray process with SIGTERM, falling back to SIGKILL if it doesn't exit.
|
|
2635
|
+
*/
|
|
2636
|
+
function killV2RayProc(proc) {
|
|
2637
|
+
if (!proc) return;
|
|
2638
|
+
try { proc.kill('SIGTERM'); } catch (e) { console.warn('[sentinel-sdk] V2Ray SIGTERM warning:', e.message); }
|
|
2639
|
+
// Give 2s for graceful shutdown, then force kill
|
|
2640
|
+
setTimeout(() => {
|
|
2641
|
+
try { if (!proc.killed) proc.kill('SIGKILL'); } catch {} // SIGKILL can't be caught, truly final
|
|
2642
|
+
}, 2000).unref();
|
|
2643
|
+
}
|
|
2644
|
+
|
|
2645
|
+
/**
|
|
2646
|
+
* Kill orphaned v2ray process if one exists from a previous crash.
|
|
2647
|
+
* Only kills the process tracked by this module (by PID), NOT all v2ray.exe.
|
|
2648
|
+
*/
|
|
2649
|
+
function killOrphanV2Ray() {
|
|
2650
|
+
for (const s of _activeStates) {
|
|
2651
|
+
if (s.v2rayProc) {
|
|
2652
|
+
killV2RayProc(s.v2rayProc);
|
|
2653
|
+
s.v2rayProc = null;
|
|
2654
|
+
}
|
|
2655
|
+
}
|
|
2656
|
+
}
|
|
2657
|
+
|
|
2658
|
+
/**
|
|
2659
|
+
* Check if a port is available. Use this at startup to detect port conflicts
|
|
2660
|
+
* from zombie processes (e.g., old server still running on the same port).
|
|
2661
|
+
* @param {number} port - Port to check
|
|
2662
|
+
* @returns {Promise<boolean>} true if port is free
|
|
2663
|
+
*/
|
|
2664
|
+
export function checkPortFree(port) {
|
|
2665
|
+
return new Promise((resolve) => {
|
|
2666
|
+
const server = createServer();
|
|
2667
|
+
server.once('error', () => resolve(false));
|
|
2668
|
+
server.once('listening', () => { server.close(() => resolve(true)); });
|
|
2669
|
+
server.listen(port, '127.0.0.1');
|
|
2670
|
+
});
|
|
2671
|
+
}
|
|
2672
|
+
|
|
2673
|
+
// ─── V2Ray binary detection ──────────────────────────────────────────────────
|
|
2674
|
+
// Search common locations so apps can find an existing v2ray.exe instead of
|
|
2675
|
+
// requiring every project to bundle its own copy.
|
|
2676
|
+
|
|
2677
|
+
function findV2RayExe(hint) {
|
|
2678
|
+
const binary = process.platform === 'win32' ? 'v2ray.exe' : 'v2ray';
|
|
2679
|
+
|
|
2680
|
+
// 1. Explicit path (if provided and exists)
|
|
2681
|
+
if (hint && existsSync(hint)) return hint;
|
|
2682
|
+
|
|
2683
|
+
// 2. Environment variable
|
|
2684
|
+
if (process.env.V2RAY_PATH && existsSync(process.env.V2RAY_PATH)) {
|
|
2685
|
+
return process.env.V2RAY_PATH;
|
|
2686
|
+
}
|
|
2687
|
+
|
|
2688
|
+
// 3. Search common locations (cross-platform)
|
|
2689
|
+
const home = os.homedir();
|
|
2690
|
+
const searchPaths = [
|
|
2691
|
+
// ─── Relative to CWD (works for any project layout) ───
|
|
2692
|
+
path.join(process.cwd(), 'bin', binary),
|
|
2693
|
+
path.join(process.cwd(), 'resources', 'bin', binary),
|
|
2694
|
+
|
|
2695
|
+
// ─── Relative to SDK code dir (npm install or git clone) ───
|
|
2696
|
+
path.join(path.dirname(new URL(import.meta.url).pathname.replace(/^\/([A-Z]:)/, '$1')), 'bin', binary),
|
|
2697
|
+
|
|
2698
|
+
// ─── Electron / bundled app paths ───
|
|
2699
|
+
// process.resourcesPath is set by Electron for packaged apps
|
|
2700
|
+
...(typeof process.resourcesPath === 'string' ? [
|
|
2701
|
+
path.join(process.resourcesPath, 'bin', binary),
|
|
2702
|
+
path.join(process.resourcesPath, 'app.asar.unpacked', 'bin', binary),
|
|
2703
|
+
path.join(process.resourcesPath, 'extraResources', binary),
|
|
2704
|
+
] : []),
|
|
2705
|
+
|
|
2706
|
+
// ─── Windows ───
|
|
2707
|
+
...(process.platform === 'win32' ? [
|
|
2708
|
+
'C:\\Program Files\\V2Ray\\v2ray.exe',
|
|
2709
|
+
'C:\\Program Files (x86)\\V2Ray\\v2ray.exe',
|
|
2710
|
+
path.join(home, 'AppData', 'Local', 'v2ray', 'v2ray.exe'),
|
|
2711
|
+
path.join(home, 'AppData', 'Local', 'Programs', 'v2ray', 'v2ray.exe'),
|
|
2712
|
+
path.join(home, 'scoop', 'apps', 'v2ray', 'current', 'v2ray.exe'),
|
|
2713
|
+
] : []),
|
|
2714
|
+
|
|
2715
|
+
// ─── macOS ───
|
|
2716
|
+
...(process.platform === 'darwin' ? [
|
|
2717
|
+
'/usr/local/bin/v2ray',
|
|
2718
|
+
'/opt/homebrew/bin/v2ray',
|
|
2719
|
+
path.join(home, 'Library', 'Application Support', 'v2ray', 'v2ray'),
|
|
2720
|
+
'/Applications/V2Ray.app/Contents/MacOS/v2ray',
|
|
2721
|
+
] : []),
|
|
2722
|
+
|
|
2723
|
+
// ─── Linux ───
|
|
2724
|
+
...(process.platform === 'linux' ? [
|
|
2725
|
+
'/usr/local/bin/v2ray',
|
|
2726
|
+
'/usr/bin/v2ray',
|
|
2727
|
+
path.join(home, '.local', 'bin', 'v2ray'),
|
|
2728
|
+
'/snap/v2ray/current/bin/v2ray',
|
|
2729
|
+
path.join(home, '.config', 'v2ray', 'v2ray'),
|
|
2730
|
+
] : []),
|
|
2731
|
+
];
|
|
2732
|
+
|
|
2733
|
+
for (const p of searchPaths) {
|
|
2734
|
+
try { if (existsSync(p)) return p; } catch {} // catch invalid paths on non-matching platforms
|
|
2735
|
+
}
|
|
2736
|
+
|
|
2737
|
+
// 4. Check system PATH
|
|
2738
|
+
try {
|
|
2739
|
+
const cmd = process.platform === 'win32' ? 'where' : 'which';
|
|
2740
|
+
const arg = process.platform === 'win32' ? 'v2ray.exe' : 'v2ray';
|
|
2741
|
+
const result = execFileSync(cmd, [arg], { encoding: 'utf8', stdio: 'pipe' }).trim();
|
|
2742
|
+
if (result) return result.split('\n')[0].trim();
|
|
2743
|
+
} catch {}
|
|
2744
|
+
|
|
2745
|
+
return null;
|
|
2746
|
+
}
|
|
2747
|
+
|
|
2748
|
+
// ─── Pre-validation (MUST run before paying for session) ─────────────────────
|
|
2749
|
+
|
|
2750
|
+
/**
|
|
2751
|
+
* Validate that tunnel requirements are met BEFORE paying for a session.
|
|
2752
|
+
* Prevents burning P2P on sessions that can never produce a working tunnel.
|
|
2753
|
+
*
|
|
2754
|
+
* For V2Ray: searches system for an existing v2ray.exe if the provided path
|
|
2755
|
+
* doesn't exist. Returns the resolved path so callers can use it.
|
|
2756
|
+
*
|
|
2757
|
+
* Throws with a clear error message if requirements are not met.
|
|
2758
|
+
*/
|
|
2759
|
+
function validateTunnelRequirements(serviceType, v2rayExePath) {
|
|
2760
|
+
if (serviceType === 'v2ray') {
|
|
2761
|
+
const resolved = findV2RayExe(v2rayExePath);
|
|
2762
|
+
if (!resolved) {
|
|
2763
|
+
const searched = v2rayExePath ? `Checked: ${v2rayExePath} (not found). ` : '';
|
|
2764
|
+
throw new TunnelError(ErrorCodes.V2RAY_NOT_FOUND, `${searched}V2Ray binary not found anywhere on this system. Either: (a) set v2rayExePath to the correct path, (b) set V2RAY_PATH env var, (c) add v2ray.exe to PATH, or (d) download v2ray-core v5.x from https://github.com/v2fly/v2ray-core/releases and place v2ray.exe + geoip.dat + geosite.dat in a bin/ directory.`, { checked: v2rayExePath });
|
|
2765
|
+
}
|
|
2766
|
+
if (resolved !== v2rayExePath) {
|
|
2767
|
+
console.log(`V2Ray binary found at: ${resolved} (auto-detected)`);
|
|
2768
|
+
}
|
|
2769
|
+
// V2Ray version check — 5.44.1+ has observatory bugs that break multi-outbound configs
|
|
2770
|
+
try {
|
|
2771
|
+
const verOut = execFileSync(resolved, ['version'], { encoding: 'utf8', timeout: 5000, stdio: 'pipe' });
|
|
2772
|
+
const verMatch = verOut.match(/V2Ray\s+(\d+\.\d+\.\d+)/i) || verOut.match(/(\d+\.\d+\.\d+)/);
|
|
2773
|
+
if (verMatch) {
|
|
2774
|
+
const [major, minor] = verMatch[1].split('.').map(Number);
|
|
2775
|
+
if (major >= 5 && minor >= 44) {
|
|
2776
|
+
console.warn(`[sentinel-sdk] WARNING: V2Ray ${verMatch[1]} detected — v5.44.1+ has observatory bugs. Recommended: v5.2.1 exactly.`);
|
|
2777
|
+
}
|
|
2778
|
+
}
|
|
2779
|
+
} catch { /* version check is best-effort */ }
|
|
2780
|
+
return resolved;
|
|
2781
|
+
} else if (serviceType === 'wireguard') {
|
|
2782
|
+
if (!WG_AVAILABLE) {
|
|
2783
|
+
throw new TunnelError(ErrorCodes.WG_NOT_AVAILABLE, 'WireGuard node selected but WireGuard is not installed. Download from https://download.wireguard.com/windows-client/wireguard-installer.exe');
|
|
2784
|
+
}
|
|
2785
|
+
if (!IS_ADMIN) {
|
|
2786
|
+
const hint = process.platform === 'win32'
|
|
2787
|
+
? 'Restart your application as Administrator.'
|
|
2788
|
+
: 'Run with sudo, or use V2Ray nodes instead (no root needed).';
|
|
2789
|
+
throw new TunnelError(ErrorCodes.TUNNEL_SETUP_FAILED, `WireGuard requires ${process.platform === 'win32' ? 'administrator' : 'root'} privileges. ${hint}`);
|
|
2790
|
+
}
|
|
2791
|
+
}
|
|
2792
|
+
return v2rayExePath;
|
|
2793
|
+
}
|
|
2794
|
+
|
|
2795
|
+
// fetchNodeFromLcd removed — use queryNode() from cosmjs-setup.js instead
|
|
2796
|
+
|
|
2797
|
+
// ─── Kill Switch (Firewall / Packet Filter) ────────────────────────────────
|
|
2798
|
+
|
|
2799
|
+
let _killSwitchEnabled = false;
|
|
2800
|
+
|
|
2801
|
+
/**
|
|
2802
|
+
* Enable kill switch — blocks all non-tunnel traffic.
|
|
2803
|
+
* Windows: netsh advfirewall, macOS: pfctl, Linux: iptables.
|
|
2804
|
+
* Call after WireGuard tunnel is installed.
|
|
2805
|
+
* @param {string} serverEndpoint - WireGuard server "IP:PORT"
|
|
2806
|
+
* @param {string} [tunnelName='wgsent0'] - WireGuard interface name
|
|
2807
|
+
*/
|
|
2808
|
+
export function enableKillSwitch(serverEndpoint, tunnelName = 'wgsent0') {
|
|
2809
|
+
const [serverIp, serverPort] = serverEndpoint.split(':');
|
|
2810
|
+
|
|
2811
|
+
if (process.platform === 'win32') {
|
|
2812
|
+
// Windows: netsh advfirewall
|
|
2813
|
+
// Block all outbound by default
|
|
2814
|
+
execFileSync('netsh', ['advfirewall', 'set', 'allprofiles', 'firewallpolicy', 'blockinbound,blockoutbound'], { stdio: 'pipe' });
|
|
2815
|
+
|
|
2816
|
+
// Wrap allow rules in try-catch — if any fail after block-all, restore default policy
|
|
2817
|
+
// to prevent permanent internet loss from partial firewall state.
|
|
2818
|
+
try {
|
|
2819
|
+
// Allow tunnel interface
|
|
2820
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule', 'name=SentinelVPN-Allow-Tunnel', 'dir=out', `interface=${tunnelName}`, 'action=allow'], { stdio: 'pipe' });
|
|
2821
|
+
|
|
2822
|
+
// Allow WireGuard endpoint (UDP to server)
|
|
2823
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule', 'name=SentinelVPN-Allow-WG-Endpoint', 'dir=out', 'action=allow', 'protocol=udp', `remoteip=${serverIp}`, `remoteport=${serverPort}`], { stdio: 'pipe' });
|
|
2824
|
+
|
|
2825
|
+
// Allow loopback
|
|
2826
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule', 'name=SentinelVPN-Allow-Loopback', 'dir=out', 'action=allow', 'remoteip=127.0.0.1'], { stdio: 'pipe' });
|
|
2827
|
+
|
|
2828
|
+
// Allow DHCP
|
|
2829
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule', 'name=SentinelVPN-Allow-DHCP', 'dir=out', 'action=allow', 'protocol=udp', 'localport=68', 'remoteport=67'], { stdio: 'pipe' });
|
|
2830
|
+
|
|
2831
|
+
// Allow DNS only through tunnel
|
|
2832
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule', 'name=SentinelVPN-Allow-DNS-Tunnel', 'dir=out', 'action=allow', 'protocol=udp', 'remoteip=10.8.0.1', 'remoteport=53'], { stdio: 'pipe' });
|
|
2833
|
+
|
|
2834
|
+
// Block IPv6 (prevent leaks)
|
|
2835
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule', 'name=SentinelVPN-Block-IPv6', 'dir=out', 'action=block', 'protocol=any', 'remoteip=::/0'], { stdio: 'pipe' });
|
|
2836
|
+
} catch (err) {
|
|
2837
|
+
// Emergency restore — unblock outbound so user isn't locked out
|
|
2838
|
+
try { execFileSync('netsh', ['advfirewall', 'set', 'allprofiles', 'firewallpolicy', 'blockinbound,allowoutbound'], { stdio: 'pipe' }); } catch { /* last resort */ }
|
|
2839
|
+
_killSwitchEnabled = false;
|
|
2840
|
+
throw new TunnelError('KILL_SWITCH_FAILED', `Kill switch failed: ${err.message}`);
|
|
2841
|
+
}
|
|
2842
|
+
|
|
2843
|
+
} else if (process.platform === 'darwin') {
|
|
2844
|
+
// macOS: pfctl (packet filter)
|
|
2845
|
+
const pfRules = [
|
|
2846
|
+
'# Sentinel VPN Kill Switch',
|
|
2847
|
+
'block out all',
|
|
2848
|
+
`pass out on ${tunnelName} all`,
|
|
2849
|
+
`pass out proto udp from any to ${serverIp} port ${serverPort}`,
|
|
2850
|
+
'pass out on lo0 all',
|
|
2851
|
+
'pass out proto udp from any port 68 to any port 67',
|
|
2852
|
+
'pass out proto udp from any to 10.8.0.1 port 53',
|
|
2853
|
+
'block out inet6 all',
|
|
2854
|
+
].join('\n') + '\n';
|
|
2855
|
+
|
|
2856
|
+
const pfPath = '/tmp/sentinel-killswitch.conf';
|
|
2857
|
+
writeFileSync(pfPath, pfRules, { mode: 0o600 });
|
|
2858
|
+
|
|
2859
|
+
// Save current pf state for restore
|
|
2860
|
+
try { execFileSync('pfctl', ['-sr'], { encoding: 'utf8', stdio: 'pipe' }); } catch { /* may not have existing rules */ }
|
|
2861
|
+
|
|
2862
|
+
// Load rules and enable pf
|
|
2863
|
+
execFileSync('pfctl', ['-f', pfPath], { stdio: 'pipe' });
|
|
2864
|
+
execFileSync('pfctl', ['-e'], { stdio: 'pipe' });
|
|
2865
|
+
|
|
2866
|
+
} else {
|
|
2867
|
+
// Linux: iptables
|
|
2868
|
+
// Flush existing sentinel rules first
|
|
2869
|
+
try { execFileSync('iptables', ['-D', 'OUTPUT', '-m', 'comment', '--comment', 'sentinel-vpn', '-j', 'DROP'], { stdio: 'pipe' }); } catch { /* rule may not exist */ }
|
|
2870
|
+
|
|
2871
|
+
// Allow loopback
|
|
2872
|
+
execFileSync('iptables', ['-A', 'OUTPUT', '-o', 'lo', '-j', 'ACCEPT', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' });
|
|
2873
|
+
|
|
2874
|
+
// Allow tunnel interface
|
|
2875
|
+
execFileSync('iptables', ['-A', 'OUTPUT', '-o', tunnelName, '-j', 'ACCEPT', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' });
|
|
2876
|
+
|
|
2877
|
+
// Allow WireGuard server endpoint
|
|
2878
|
+
execFileSync('iptables', ['-A', 'OUTPUT', '-d', serverIp, '-p', 'udp', '--dport', serverPort, '-j', 'ACCEPT', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' });
|
|
2879
|
+
|
|
2880
|
+
// Allow DHCP
|
|
2881
|
+
execFileSync('iptables', ['-A', 'OUTPUT', '-p', 'udp', '--sport', '68', '--dport', '67', '-j', 'ACCEPT', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' });
|
|
2882
|
+
|
|
2883
|
+
// Allow DNS only through tunnel
|
|
2884
|
+
execFileSync('iptables', ['-A', 'OUTPUT', '-d', '10.8.0.1', '-p', 'udp', '--dport', '53', '-j', 'ACCEPT', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' });
|
|
2885
|
+
|
|
2886
|
+
// Block everything else
|
|
2887
|
+
execFileSync('iptables', ['-A', 'OUTPUT', '-j', 'DROP', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' });
|
|
2888
|
+
|
|
2889
|
+
// Block IPv6
|
|
2890
|
+
try { execFileSync('ip6tables', ['-A', 'OUTPUT', '-j', 'DROP', '-m', 'comment', '--comment', 'sentinel-vpn'], { stdio: 'pipe' }); } catch { /* ip6tables may not be available */ }
|
|
2891
|
+
}
|
|
2892
|
+
|
|
2893
|
+
_killSwitchEnabled = true;
|
|
2894
|
+
// Persist kill switch state — survives crash so recoverOrphans() can restore internet
|
|
2895
|
+
try {
|
|
2896
|
+
const conn = _defaultState.connection || {};
|
|
2897
|
+
saveState({ sessionId: conn.sessionId, serviceType: conn.serviceType, nodeAddress: conn.nodeAddress, killSwitchEnabled: true });
|
|
2898
|
+
} catch {} // best-effort
|
|
2899
|
+
}
|
|
2900
|
+
|
|
2901
|
+
/**
|
|
2902
|
+
* Disable kill switch — restore normal routing.
|
|
2903
|
+
* Windows: removes netsh rules, macOS: disables pfctl, Linux: removes iptables rules.
|
|
2904
|
+
*/
|
|
2905
|
+
export function disableKillSwitch() {
|
|
2906
|
+
if (!_killSwitchEnabled) return;
|
|
2907
|
+
|
|
2908
|
+
if (process.platform === 'win32') {
|
|
2909
|
+
// Windows: remove firewall rules
|
|
2910
|
+
const rules = [
|
|
2911
|
+
'SentinelVPN-Allow-Tunnel',
|
|
2912
|
+
'SentinelVPN-Allow-WG-Endpoint',
|
|
2913
|
+
'SentinelVPN-Allow-Loopback',
|
|
2914
|
+
'SentinelVPN-Allow-DHCP',
|
|
2915
|
+
'SentinelVPN-Allow-DNS-Tunnel',
|
|
2916
|
+
'SentinelVPN-Block-IPv6',
|
|
2917
|
+
];
|
|
2918
|
+
for (const rule of rules) {
|
|
2919
|
+
try { execFileSync('netsh', ['advfirewall', 'firewall', 'delete', 'rule', `name=${rule}`], { stdio: 'pipe' }); } catch { /* rule may not exist */ }
|
|
2920
|
+
}
|
|
2921
|
+
|
|
2922
|
+
// Restore default outbound policy
|
|
2923
|
+
try { execFileSync('netsh', ['advfirewall', 'set', 'allprofiles', 'firewallpolicy', 'blockinbound,allowoutbound'], { stdio: 'pipe' }); } catch { /* best effort */ }
|
|
2924
|
+
|
|
2925
|
+
} else if (process.platform === 'darwin') {
|
|
2926
|
+
// macOS: disable pf and remove temp rules
|
|
2927
|
+
try { execFileSync('pfctl', ['-d'], { stdio: 'pipe' }); } catch { /* pf may already be disabled */ }
|
|
2928
|
+
try { unlinkSync('/tmp/sentinel-killswitch.conf'); } catch { /* file may not exist */ }
|
|
2929
|
+
|
|
2930
|
+
} else {
|
|
2931
|
+
// Linux: remove all sentinel-vpn rules
|
|
2932
|
+
let hasRules = true;
|
|
2933
|
+
while (hasRules) {
|
|
2934
|
+
try {
|
|
2935
|
+
execFileSync('iptables', ['-D', 'OUTPUT', '-m', 'comment', '--comment', 'sentinel-vpn', '-j', 'ACCEPT'], { stdio: 'pipe' });
|
|
2936
|
+
} catch {
|
|
2937
|
+
hasRules = false;
|
|
2938
|
+
}
|
|
2939
|
+
}
|
|
2940
|
+
try { execFileSync('iptables', ['-D', 'OUTPUT', '-m', 'comment', '--comment', 'sentinel-vpn', '-j', 'DROP'], { stdio: 'pipe' }); } catch { /* rule may not exist */ }
|
|
2941
|
+
try { execFileSync('ip6tables', ['-D', 'OUTPUT', '-m', 'comment', '--comment', 'sentinel-vpn', '-j', 'DROP'], { stdio: 'pipe' }); } catch { /* rule may not exist */ }
|
|
2942
|
+
}
|
|
2943
|
+
|
|
2944
|
+
_killSwitchEnabled = false;
|
|
2945
|
+
// Persist cleared kill switch state
|
|
2946
|
+
try {
|
|
2947
|
+
const conn = _defaultState.connection || {};
|
|
2948
|
+
saveState({ sessionId: conn.sessionId, serviceType: conn.serviceType, nodeAddress: conn.nodeAddress, killSwitchEnabled: false });
|
|
2949
|
+
} catch {} // best-effort
|
|
2950
|
+
}
|
|
2951
|
+
|
|
2952
|
+
/** Check if kill switch is enabled */
|
|
2953
|
+
export function isKillSwitchEnabled() { return _killSwitchEnabled; }
|
|
2954
|
+
|
|
2955
|
+
// ─── DNS Leak Prevention ────────────────────────────────────────────────────
|
|
2956
|
+
|
|
2957
|
+
/**
|
|
2958
|
+
* Enable DNS leak prevention by forcing all DNS through the VPN tunnel.
|
|
2959
|
+
* Windows: netsh interface ipv4 set dnsservers + firewall rules
|
|
2960
|
+
* macOS: networksetup -setdnsservers
|
|
2961
|
+
* Linux: write /etc/resolv.conf
|
|
2962
|
+
* @param {string} [dnsServer='10.8.0.1'] - DNS server inside the tunnel
|
|
2963
|
+
* @param {string} [tunnelInterface='wgsent0'] - WireGuard tunnel interface name
|
|
2964
|
+
*/
|
|
2965
|
+
export function enableDnsLeakPrevention(dnsServer = '10.8.0.1', tunnelInterface = 'wgsent0') {
|
|
2966
|
+
const platform = process.platform;
|
|
2967
|
+
if (platform === 'win32') {
|
|
2968
|
+
// Set DNS on all interfaces to tunnel DNS
|
|
2969
|
+
execFileSync('netsh', ['interface', 'ipv4', 'set', 'dnsservers', tunnelInterface, 'static', dnsServer, 'primary'], { stdio: 'pipe' });
|
|
2970
|
+
// Block DNS on non-tunnel interfaces
|
|
2971
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule',
|
|
2972
|
+
'name=SentinelDNSBlock', 'dir=out', 'protocol=udp', 'remoteport=53',
|
|
2973
|
+
'action=block'], { stdio: 'pipe' });
|
|
2974
|
+
execFileSync('netsh', ['advfirewall', 'firewall', 'add', 'rule',
|
|
2975
|
+
'name=SentinelDNSAllow', 'dir=out', 'protocol=udp', 'remoteport=53',
|
|
2976
|
+
'interface=' + tunnelInterface, 'action=allow'], { stdio: 'pipe' });
|
|
2977
|
+
} else if (platform === 'darwin') {
|
|
2978
|
+
// macOS: set DNS via networksetup for all services
|
|
2979
|
+
const services = execFileSync('networksetup', ['-listallnetworkservices'], { encoding: 'utf8' })
|
|
2980
|
+
.split('\n').filter(s => s && !s.startsWith('*'));
|
|
2981
|
+
for (const svc of services) {
|
|
2982
|
+
try { execFileSync('networksetup', ['-setdnsservers', svc.trim(), dnsServer], { stdio: 'pipe' }); } catch { /* best effort */ }
|
|
2983
|
+
}
|
|
2984
|
+
} else {
|
|
2985
|
+
// Linux: backup and overwrite resolv.conf
|
|
2986
|
+
try { execFileSync('cp', ['/etc/resolv.conf', '/etc/resolv.conf.sentinel.bak'], { stdio: 'pipe' }); } catch { /* backup may fail if file missing */ }
|
|
2987
|
+
writeFileSync('/etc/resolv.conf', `nameserver ${dnsServer}\n`);
|
|
2988
|
+
}
|
|
2989
|
+
}
|
|
2990
|
+
|
|
2991
|
+
/**
|
|
2992
|
+
* Disable DNS leak prevention and restore normal DNS resolution.
|
|
2993
|
+
* Windows: removes firewall rules, resets DNS to DHCP
|
|
2994
|
+
* macOS: clears DNS overrides
|
|
2995
|
+
* Linux: restores /etc/resolv.conf from backup
|
|
2996
|
+
*/
|
|
2997
|
+
export function disableDnsLeakPrevention() {
|
|
2998
|
+
const platform = process.platform;
|
|
2999
|
+
if (platform === 'win32') {
|
|
3000
|
+
try { execFileSync('netsh', ['advfirewall', 'firewall', 'delete', 'rule', 'name=SentinelDNSBlock'], { stdio: 'pipe' }); } catch { /* rule may not exist */ }
|
|
3001
|
+
try { execFileSync('netsh', ['advfirewall', 'firewall', 'delete', 'rule', 'name=SentinelDNSAllow'], { stdio: 'pipe' }); } catch { /* rule may not exist */ }
|
|
3002
|
+
// Reset DNS to DHCP
|
|
3003
|
+
try { execFileSync('netsh', ['interface', 'ipv4', 'set', 'dnsservers', 'Wi-Fi', 'dhcp'], { stdio: 'pipe' }); } catch { /* interface may not exist */ }
|
|
3004
|
+
try { execFileSync('netsh', ['interface', 'ipv4', 'set', 'dnsservers', 'Ethernet', 'dhcp'], { stdio: 'pipe' }); } catch { /* interface may not exist */ }
|
|
3005
|
+
} else if (platform === 'darwin') {
|
|
3006
|
+
const services = execFileSync('networksetup', ['-listallnetworkservices'], { encoding: 'utf8' })
|
|
3007
|
+
.split('\n').filter(s => s && !s.startsWith('*'));
|
|
3008
|
+
for (const svc of services) {
|
|
3009
|
+
try { execFileSync('networksetup', ['-setdnsservers', svc.trim(), 'empty'], { stdio: 'pipe' }); } catch { /* best effort */ }
|
|
3010
|
+
}
|
|
3011
|
+
} else {
|
|
3012
|
+
try { execFileSync('cp', ['/etc/resolv.conf.sentinel.bak', '/etc/resolv.conf'], { stdio: 'pipe' }); } catch { /* backup may not exist */ }
|
|
3013
|
+
}
|
|
3014
|
+
}
|
|
3015
|
+
|