@hiveio/dhive 1.3.2 → 1.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/client.d.ts CHANGED
@@ -40,6 +40,7 @@ import { HivemindAPI } from './helpers/hivemind';
40
40
  import { AccountByKeyAPI } from './helpers/key';
41
41
  import { RCAPI } from './helpers/rc';
42
42
  import { TransactionStatusAPI } from './helpers/transaction';
43
+ import { NodeHealthTracker, HealthTrackerOptions } from './health-tracker';
43
44
  /**
44
45
  * Library version.
45
46
  */
@@ -102,6 +103,11 @@ export interface ClientOptions {
102
103
  * Deprecated - don't use
103
104
  */
104
105
  rebrandedApi?: boolean;
106
+ /**
107
+ * Options for the node health tracker.
108
+ * Controls cooldown periods, stale block thresholds, etc.
109
+ */
110
+ healthTrackerOptions?: HealthTrackerOptions;
105
111
  }
106
112
  /**
107
113
  * RPC Client
@@ -146,6 +152,11 @@ export declare class Client {
146
152
  * Transaction status API helper.
147
153
  */
148
154
  readonly transaction: TransactionStatusAPI;
155
+ /**
156
+ * Node health tracker for smart failover.
157
+ * Tracks per-node, per-API health and head block freshness.
158
+ */
159
+ readonly healthTracker: NodeHealthTracker;
149
160
  /**
150
161
  * Chain ID for current network.
151
162
  */
package/lib/client.js CHANGED
@@ -53,6 +53,7 @@ const hivemind_1 = require("./helpers/hivemind");
53
53
  const key_1 = require("./helpers/key");
54
54
  const rc_1 = require("./helpers/rc");
55
55
  const transaction_1 = require("./helpers/transaction");
56
+ const health_tracker_1 = require("./health-tracker");
56
57
  const utils_1 = require("./utils");
57
58
  /**
58
59
  * Library version.
@@ -94,6 +95,7 @@ class Client {
94
95
  this.backoff = options.backoff || defaultBackoff;
95
96
  this.failoverThreshold = options.failoverThreshold || 3;
96
97
  this.consoleOnFailover = options.consoleOnFailover || false;
98
+ this.healthTracker = new health_tracker_1.NodeHealthTracker(options.healthTrackerOptions);
97
99
  this.database = new database_1.DatabaseAPI(this);
98
100
  this.broadcast = new broadcast_1.BroadcastAPI(this);
99
101
  this.blockchain = new blockchain_1.Blockchain(this);
@@ -125,6 +127,8 @@ class Client {
125
127
  */
126
128
  call(api, method, params = []) {
127
129
  return __awaiter(this, void 0, void 0, function* () {
130
+ const isBroadcast = api === 'network_broadcast_api' ||
131
+ method.startsWith('broadcast_transaction');
128
132
  const request = {
129
133
  id: 0,
130
134
  jsonrpc: '2.0',
@@ -159,18 +163,32 @@ class Client {
159
163
  opts.agent = this.options.agent;
160
164
  }
161
165
  let fetchTimeout;
162
- if (api !== 'network_broadcast_api' &&
163
- !method.startsWith('broadcast_transaction')) {
166
+ if (!isBroadcast) {
164
167
  // bit of a hack to work around some nodes high error rates
165
168
  // only effective in node.js (until timeout spec lands in browsers)
166
169
  fetchTimeout = (tries) => (tries + 1) * 500;
167
170
  }
168
- const { response, currentAddress } = yield utils_1.retryingFetch(this.currentAddress, this.address, opts, this.timeout, this.failoverThreshold, this.consoleOnFailover, this.backoff, fetchTimeout);
171
+ const { response, currentAddress } = yield utils_1.retryingFetch(this.currentAddress, this.address, opts, this.timeout, this.failoverThreshold, this.consoleOnFailover, this.backoff, fetchTimeout, {
172
+ healthTracker: this.healthTracker,
173
+ api,
174
+ isBroadcast,
175
+ consoleOnFailover: this.consoleOnFailover,
176
+ });
169
177
  // After failover, change the currently active address
170
178
  if (currentAddress !== this.currentAddress) {
171
179
  this.currentAddress = currentAddress;
172
180
  }
173
- // resolve FC error messages into something more readable
181
+ // Passively track head block from get_dynamic_global_properties responses.
182
+ // This costs nothing — we just inspect data we already fetched.
183
+ if (response.result &&
184
+ method === 'get_dynamic_global_properties' &&
185
+ response.result.head_block_number) {
186
+ this.healthTracker.updateHeadBlock(currentAddress, response.result.head_block_number);
187
+ }
188
+ // Handle RPC-level errors.
189
+ // Unlike network errors, these mean the node responded but returned an error.
190
+ // We record it as an API-specific failure so the health tracker can
191
+ // deprioritize this node for this API in future calls.
174
192
  if (response.error) {
175
193
  const formatValue = (value) => {
176
194
  switch (typeof value) {
@@ -200,6 +218,17 @@ class Client {
200
218
  message += ' ' + unformattedData.join(' ');
201
219
  }
202
220
  }
221
+ // Track RPC errors that indicate node/plugin issues (not user errors).
222
+ // JSON-RPC error codes (response.error.code):
223
+ // -32601 = Method not found (plugin not enabled on this node)
224
+ // -32603 = Internal error (node issue)
225
+ // -32003 = Hive assertion error (user error — bad params, invalid account)
226
+ // Only API/plugin errors should be tracked, and only as API-specific failures
227
+ // (not global node failures) since other APIs on this node may work fine.
228
+ const rpcCode = response.error.code;
229
+ if (rpcCode === -32601 || rpcCode === -32603) {
230
+ this.healthTracker.recordApiFailure(currentAddress, api);
231
+ }
203
232
  throw new verror_1.VError({ info: data, name: 'RPCError' }, message);
204
233
  }
205
234
  assert.equal(response.id, request.id, 'got invalid response id');
@@ -0,0 +1,100 @@
1
+ /**
2
+ * @file Node health tracking for smart failover.
3
+ * @license BSD-3-Clause-No-Military-License
4
+ *
5
+ * Tracks per-node, per-API health to enable intelligent failover decisions.
6
+ * Nodes that fail for specific APIs are deprioritized for those APIs while
7
+ * remaining available for others. Stale nodes (behind on head block) are
8
+ * also deprioritized.
9
+ */
10
+ export interface HealthTrackerOptions {
11
+ /**
12
+ * How long (ms) to deprioritize a node after consecutive failures.
13
+ * Default: 30 seconds.
14
+ */
15
+ nodeCooldownMs?: number;
16
+ /**
17
+ * How long (ms) to deprioritize a node for a specific API after failures.
18
+ * Default: 60 seconds.
19
+ */
20
+ apiCooldownMs?: number;
21
+ /**
22
+ * Number of consecutive failures before a node enters cooldown.
23
+ * Default: 3.
24
+ */
25
+ maxFailuresBeforeCooldown?: number;
26
+ /**
27
+ * Number of API-specific failures before deprioritizing for that API.
28
+ * Default: 2.
29
+ */
30
+ maxApiFailuresBeforeCooldown?: number;
31
+ /**
32
+ * How many blocks behind the best known head block a node can be
33
+ * before being considered stale. Default: 30.
34
+ */
35
+ staleBlockThreshold?: number;
36
+ /**
37
+ * How long (ms) head block data remains valid for staleness checks.
38
+ * Default: 2 minutes.
39
+ */
40
+ headBlockTtlMs?: number;
41
+ }
42
+ export declare class NodeHealthTracker {
43
+ private health;
44
+ private bestKnownHeadBlock;
45
+ private bestKnownHeadBlockTime;
46
+ private readonly nodeCooldownMs;
47
+ private readonly apiCooldownMs;
48
+ private readonly maxFailuresBeforeCooldown;
49
+ private readonly maxApiFailuresBeforeCooldown;
50
+ private readonly staleBlockThreshold;
51
+ private readonly headBlockTtlMs;
52
+ constructor(options?: HealthTrackerOptions);
53
+ private getOrCreate;
54
+ /**
55
+ * Record a successful call to a node for a specific API.
56
+ * Clears consecutive failure counter and API-specific failures for this API.
57
+ */
58
+ recordSuccess(node: string, api: string): void;
59
+ /**
60
+ * Record a network-level failure (timeout, connection refused, HTTP error).
61
+ * Increments both the global consecutive failure counter and the API-specific counter.
62
+ */
63
+ recordFailure(node: string, api: string): void;
64
+ /**
65
+ * Record an API/plugin-specific failure (e.g. "method not found", "plugin not enabled").
66
+ * Only increments the per-API counter, NOT the global consecutive failure counter.
67
+ * This prevents a node with a disabled plugin from being penalized for all APIs.
68
+ */
69
+ recordApiFailure(node: string, api: string): void;
70
+ private incrementApiFailure;
71
+ /**
72
+ * Update head block number for a node.
73
+ * Called passively when get_dynamic_global_properties responses are observed.
74
+ */
75
+ updateHeadBlock(node: string, headBlock: number): void;
76
+ /**
77
+ * Check if a node is considered healthy for a given API.
78
+ */
79
+ isNodeHealthy(node: string, api?: string): boolean;
80
+ /**
81
+ * Return nodes ordered by health for a specific API call.
82
+ * Healthy nodes come first (preserving original order), then unhealthy nodes as fallback.
83
+ */
84
+ getOrderedNodes(allNodes: string[], api?: string): string[];
85
+ /**
86
+ * Reset all health tracking data.
87
+ */
88
+ reset(): void;
89
+ /**
90
+ * Get a snapshot of current health state for diagnostics.
91
+ */
92
+ getHealthSnapshot(): Map<string, {
93
+ consecutiveFailures: number;
94
+ headBlock: number;
95
+ apiFailures: Record<string, {
96
+ count: number;
97
+ }>;
98
+ healthy: boolean;
99
+ }>;
100
+ }
@@ -0,0 +1,167 @@
1
+ "use strict";
2
+ /**
3
+ * @file Node health tracking for smart failover.
4
+ * @license BSD-3-Clause-No-Military-License
5
+ *
6
+ * Tracks per-node, per-API health to enable intelligent failover decisions.
7
+ * Nodes that fail for specific APIs are deprioritized for those APIs while
8
+ * remaining available for others. Stale nodes (behind on head block) are
9
+ * also deprioritized.
10
+ */
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ class NodeHealthTracker {
13
+ constructor(options = {}) {
14
+ var _a, _b, _c, _d, _e, _f;
15
+ this.health = new Map();
16
+ this.bestKnownHeadBlock = 0;
17
+ this.bestKnownHeadBlockTime = 0;
18
+ this.nodeCooldownMs = (_a = options.nodeCooldownMs) !== null && _a !== void 0 ? _a : 30000;
19
+ this.apiCooldownMs = (_b = options.apiCooldownMs) !== null && _b !== void 0 ? _b : 60000;
20
+ this.maxFailuresBeforeCooldown = (_c = options.maxFailuresBeforeCooldown) !== null && _c !== void 0 ? _c : 3;
21
+ this.maxApiFailuresBeforeCooldown = (_d = options.maxApiFailuresBeforeCooldown) !== null && _d !== void 0 ? _d : 2;
22
+ this.staleBlockThreshold = (_e = options.staleBlockThreshold) !== null && _e !== void 0 ? _e : 30;
23
+ this.headBlockTtlMs = (_f = options.headBlockTtlMs) !== null && _f !== void 0 ? _f : 120000;
24
+ }
25
+ getOrCreate(node) {
26
+ let state = this.health.get(node);
27
+ if (!state) {
28
+ state = {
29
+ apiFailures: new Map(),
30
+ consecutiveFailures: 0,
31
+ lastFailure: 0,
32
+ headBlock: 0,
33
+ headBlockUpdatedAt: 0,
34
+ };
35
+ this.health.set(node, state);
36
+ }
37
+ return state;
38
+ }
39
+ /**
40
+ * Record a successful call to a node for a specific API.
41
+ * Clears consecutive failure counter and API-specific failures for this API.
42
+ */
43
+ recordSuccess(node, api) {
44
+ const state = this.getOrCreate(node);
45
+ state.consecutiveFailures = 0;
46
+ state.apiFailures.delete(api);
47
+ }
48
+ /**
49
+ * Record a network-level failure (timeout, connection refused, HTTP error).
50
+ * Increments both the global consecutive failure counter and the API-specific counter.
51
+ */
52
+ recordFailure(node, api) {
53
+ const state = this.getOrCreate(node);
54
+ state.consecutiveFailures++;
55
+ state.lastFailure = Date.now();
56
+ this.incrementApiFailure(state, api);
57
+ }
58
+ /**
59
+ * Record an API/plugin-specific failure (e.g. "method not found", "plugin not enabled").
60
+ * Only increments the per-API counter, NOT the global consecutive failure counter.
61
+ * This prevents a node with a disabled plugin from being penalized for all APIs.
62
+ */
63
+ recordApiFailure(node, api) {
64
+ const state = this.getOrCreate(node);
65
+ this.incrementApiFailure(state, api);
66
+ }
67
+ incrementApiFailure(state, api) {
68
+ const apiState = state.apiFailures.get(api) || { count: 0, lastFailure: 0 };
69
+ apiState.count++;
70
+ apiState.lastFailure = Date.now();
71
+ state.apiFailures.set(api, apiState);
72
+ }
73
+ /**
74
+ * Update head block number for a node.
75
+ * Called passively when get_dynamic_global_properties responses are observed.
76
+ */
77
+ updateHeadBlock(node, headBlock) {
78
+ if (!headBlock || headBlock <= 0)
79
+ return;
80
+ const state = this.getOrCreate(node);
81
+ state.headBlock = headBlock;
82
+ state.headBlockUpdatedAt = Date.now();
83
+ if (headBlock > this.bestKnownHeadBlock) {
84
+ this.bestKnownHeadBlock = headBlock;
85
+ this.bestKnownHeadBlockTime = Date.now();
86
+ }
87
+ }
88
+ /**
89
+ * Check if a node is considered healthy for a given API.
90
+ */
91
+ isNodeHealthy(node, api) {
92
+ const state = this.health.get(node);
93
+ if (!state)
94
+ return true; // Unknown nodes are assumed healthy
95
+ const now = Date.now();
96
+ // Check overall node health (consecutive failures)
97
+ if (state.consecutiveFailures >= this.maxFailuresBeforeCooldown) {
98
+ if (now - state.lastFailure < this.nodeCooldownMs) {
99
+ return false;
100
+ }
101
+ }
102
+ // Check API-specific health
103
+ if (api) {
104
+ const apiState = state.apiFailures.get(api);
105
+ if (apiState && apiState.count >= this.maxApiFailuresBeforeCooldown) {
106
+ if (now - apiState.lastFailure < this.apiCooldownMs) {
107
+ return false;
108
+ }
109
+ }
110
+ }
111
+ // Check head block staleness
112
+ if (state.headBlock > 0 &&
113
+ this.bestKnownHeadBlock > 0 &&
114
+ now - state.headBlockUpdatedAt < this.headBlockTtlMs &&
115
+ now - this.bestKnownHeadBlockTime < this.headBlockTtlMs) {
116
+ if (this.bestKnownHeadBlock - state.headBlock > this.staleBlockThreshold) {
117
+ return false;
118
+ }
119
+ }
120
+ return true;
121
+ }
122
+ /**
123
+ * Return nodes ordered by health for a specific API call.
124
+ * Healthy nodes come first (preserving original order), then unhealthy nodes as fallback.
125
+ */
126
+ getOrderedNodes(allNodes, api) {
127
+ const healthy = [];
128
+ const unhealthy = [];
129
+ for (const node of allNodes) {
130
+ if (this.isNodeHealthy(node, api)) {
131
+ healthy.push(node);
132
+ }
133
+ else {
134
+ unhealthy.push(node);
135
+ }
136
+ }
137
+ return [...healthy, ...unhealthy];
138
+ }
139
+ /**
140
+ * Reset all health tracking data.
141
+ */
142
+ reset() {
143
+ this.health.clear();
144
+ this.bestKnownHeadBlock = 0;
145
+ this.bestKnownHeadBlockTime = 0;
146
+ }
147
+ /**
148
+ * Get a snapshot of current health state for diagnostics.
149
+ */
150
+ getHealthSnapshot() {
151
+ const snapshot = new Map();
152
+ for (const [node, state] of this.health) {
153
+ const apiFailures = {};
154
+ for (const [api, failure] of state.apiFailures) {
155
+ apiFailures[api] = { count: failure.count };
156
+ }
157
+ snapshot.set(node, {
158
+ consecutiveFailures: state.consecutiveFailures,
159
+ headBlock: state.headBlock,
160
+ apiFailures,
161
+ healthy: this.isNodeHealthy(node),
162
+ });
163
+ }
164
+ return snapshot;
165
+ }
166
+ }
167
+ exports.NodeHealthTracker = NodeHealthTracker;
package/lib/index.d.ts CHANGED
@@ -34,6 +34,7 @@
34
34
  */
35
35
  import * as utils from './utils';
36
36
  export { utils };
37
+ export { NodeHealthTracker, HealthTrackerOptions } from './health-tracker';
37
38
  export * from './helpers/blockchain';
38
39
  export * from './helpers/database';
39
40
  export * from './helpers/rc';
package/lib/index.js CHANGED
@@ -39,6 +39,8 @@ function __export(m) {
39
39
  Object.defineProperty(exports, "__esModule", { value: true });
40
40
  const utils = require("./utils");
41
41
  exports.utils = utils;
42
+ var health_tracker_1 = require("./health-tracker");
43
+ exports.NodeHealthTracker = health_tracker_1.NodeHealthTracker;
42
44
  __export(require("./helpers/blockchain"));
43
45
  __export(require("./helpers/database"));
44
46
  __export(require("./helpers/rc"));
package/lib/utils.d.ts CHANGED
@@ -34,6 +34,20 @@
34
34
  */
35
35
  /// <reference types="node" />
36
36
  import { EventEmitter } from 'events';
37
+ import { NodeHealthTracker } from './health-tracker';
38
+ /**
39
+ * Context for smart retry/failover decisions.
40
+ */
41
+ export interface RetryContext {
42
+ /** Health tracker instance for per-node, per-API tracking */
43
+ healthTracker?: NodeHealthTracker;
44
+ /** The API being called (e.g. "bridge", "condenser_api", "database_api") */
45
+ api?: string;
46
+ /** Whether this is a broadcast operation — never retry after request may have been received */
47
+ isBroadcast?: boolean;
48
+ /** Whether to log failover events to console */
49
+ consoleOnFailover?: boolean;
50
+ }
37
51
  /**
38
52
  * Return a promise that will resove when a specific event is emitted.
39
53
  */
@@ -51,9 +65,19 @@ export declare function iteratorStream<T>(iterator: AsyncIterableIterator<T>): N
51
65
  */
52
66
  export declare function copy<T>(object: T): T;
53
67
  /**
54
- * Fetch API wrapper that retries until timeout is reached.
68
+ * Smart fetch with immediate failover and per-node health tracking.
69
+ *
70
+ * For read operations:
71
+ * - On failure, immediately try the next healthy node (no backoff within a round)
72
+ * - After trying all nodes once (one round), apply backoff before the next round
73
+ * - Stop after failoverThreshold rounds
74
+ *
75
+ * For broadcast operations:
76
+ * - Only retry on pre-connection errors (ECONNREFUSED, ENOTFOUND, etc.)
77
+ * where we know the request never reached the server
78
+ * - NEVER retry after timeout or response errors to prevent double-broadcasting
55
79
  */
56
- export declare function retryingFetch(currentAddress: string, allAddresses: string | string[], opts: any, timeout: number, failoverThreshold: number, consoleOnFailover: boolean, backoff: (tries: number) => number, fetchTimeout?: (tries: number) => number): Promise<{
80
+ export declare function retryingFetch(currentAddress: string, allAddresses: string | string[], opts: any, timeout: number, failoverThreshold: number, consoleOnFailover: boolean, backoff: (tries: number) => number, fetchTimeout?: (tries: number) => number, retryContext?: RetryContext): Promise<{
57
81
  response: any;
58
82
  currentAddress: string;
59
83
  }>;
package/lib/utils.js CHANGED
@@ -52,8 +52,10 @@ var __asyncValues = (this && this.__asyncValues) || function (o) {
52
52
  Object.defineProperty(exports, "__esModule", { value: true });
53
53
  const cross_fetch_1 = require("cross-fetch");
54
54
  const stream_1 = require("stream");
55
- // TODO: Add more errors that should trigger a failover
56
- const timeoutErrors = ['timeout', 'ENOTFOUND', 'ECONNREFUSED', 'database lock', 'CERT_HAS_EXPIRED', 'EHOSTUNREACH', 'ECONNRESET', 'ERR_TLS_CERT_ALTNAME_INVALID', 'EAI_AGAIN'];
55
+ // Errors that indicate the request never reached the server — safe to retry even for broadcasts
56
+ const PRE_CONNECTION_ERRORS = ['ECONNREFUSED', 'ENOTFOUND', 'EHOSTUNREACH', 'EAI_AGAIN'];
57
+ // All errors that should trigger failover for read operations
58
+ const FAILOVER_ERRORS = [...PRE_CONNECTION_ERRORS, 'timeout', 'database lock', 'CERT_HAS_EXPIRED', 'ECONNRESET', 'ERR_TLS_CERT_ALTNAME_INVALID', 'ETIMEDOUT', 'EPIPE', 'EPROTO'];
57
59
  /**
58
60
  * Return a promise that will resove when a specific event is emitted.
59
61
  */
@@ -114,70 +116,175 @@ function copy(object) {
114
116
  }
115
117
  exports.copy = copy;
116
118
  /**
117
- * Fetch API wrapper that retries until timeout is reached.
119
+ * Check if an error code indicates the request never reached the server.
118
120
  */
119
- function retryingFetch(currentAddress, allAddresses, opts, timeout, failoverThreshold, consoleOnFailover, backoff, fetchTimeout) {
121
+ function isPreConnectionError(error) {
122
+ if (!error || !error.code)
123
+ return false;
124
+ return PRE_CONNECTION_ERRORS.some((code) => error.code.includes(code));
125
+ }
126
+ /**
127
+ * Check if an error should trigger failover for read operations.
128
+ * Matches any known network/timeout error, or errors with no code (HTTP errors).
129
+ */
130
+ function shouldFailover(error) {
131
+ if (!error)
132
+ return true;
133
+ // HTTP errors (from !response.ok) have no .code — they should trigger failover
134
+ if (!error.code)
135
+ return true;
136
+ return FAILOVER_ERRORS.some((code) => error.code.includes(code));
137
+ }
138
+ /**
139
+ * Get the next node in the ordered list (wraps around).
140
+ */
141
+ function nextNode(nodes, currentIndex) {
142
+ return (currentIndex + 1) % nodes.length;
143
+ }
144
+ /**
145
+ * Smart fetch with immediate failover and per-node health tracking.
146
+ *
147
+ * For read operations:
148
+ * - On failure, immediately try the next healthy node (no backoff within a round)
149
+ * - After trying all nodes once (one round), apply backoff before the next round
150
+ * - Stop after failoverThreshold rounds
151
+ *
152
+ * For broadcast operations:
153
+ * - Only retry on pre-connection errors (ECONNREFUSED, ENOTFOUND, etc.)
154
+ * where we know the request never reached the server
155
+ * - NEVER retry after timeout or response errors to prevent double-broadcasting
156
+ */
157
+ function retryingFetch(currentAddress, allAddresses, opts, timeout, failoverThreshold, consoleOnFailover, backoff, fetchTimeout, retryContext) {
158
+ var _a;
120
159
  return __awaiter(this, void 0, void 0, function* () {
121
- let start = Date.now();
122
- let tries = 0;
160
+ const { healthTracker, api, isBroadcast } = retryContext || {};
161
+ const logFailover = (_a = retryContext === null || retryContext === void 0 ? void 0 : retryContext.consoleOnFailover) !== null && _a !== void 0 ? _a : consoleOnFailover;
162
+ // Build ordered node list: healthy nodes first, then unhealthy as fallback
163
+ let orderedNodes;
164
+ if (Array.isArray(allAddresses) && allAddresses.length > 1) {
165
+ orderedNodes = healthTracker
166
+ ? healthTracker.getOrderedNodes(allAddresses, api)
167
+ : [...allAddresses];
168
+ }
169
+ else {
170
+ orderedNodes = Array.isArray(allAddresses) ? allAddresses : [allAddresses];
171
+ }
172
+ // Always start from the healthiest node (index 0 of the ordered list).
173
+ // The health tracker already sorted nodes with healthy ones first,
174
+ // so starting from 0 ensures we use the best available node.
175
+ let nodeIndex = 0;
176
+ const totalNodes = orderedNodes.length;
177
+ const startTime = Date.now();
178
+ let nodesTriedInRound = 0;
123
179
  let round = 0;
124
- do {
180
+ let lastError;
181
+ // tslint:disable-next-line: no-constant-condition
182
+ while (true) {
183
+ const node = orderedNodes[nodeIndex];
125
184
  try {
126
185
  if (fetchTimeout) {
127
- opts.timeout = fetchTimeout(tries);
186
+ opts.timeout = fetchTimeout(nodesTriedInRound);
128
187
  }
129
- const response = yield cross_fetch_1.default(currentAddress, opts);
188
+ const response = yield cross_fetch_1.default(node, opts);
130
189
  if (!response.ok) {
190
+ // Support for Drone: HTTP 500 with valid JSON-RPC response
191
+ if (response.status === 500) {
192
+ try {
193
+ const resJson = yield response.json();
194
+ if (resJson.jsonrpc === '2.0') {
195
+ if (healthTracker && api)
196
+ healthTracker.recordSuccess(node, api);
197
+ return { response: resJson, currentAddress: node };
198
+ }
199
+ }
200
+ catch (_b) {
201
+ // JSON parse failed, fall through to error handling
202
+ }
203
+ }
131
204
  throw new Error(`HTTP ${response.status}: ${response.statusText}`);
132
205
  }
133
- return { response: yield response.json(), currentAddress };
206
+ const responseJson = yield response.json();
207
+ // Record success in health tracker
208
+ if (healthTracker && api) {
209
+ healthTracker.recordSuccess(node, api);
210
+ }
211
+ return { response: responseJson, currentAddress: node };
134
212
  }
135
213
  catch (error) {
136
- if (timeout !== 0 && Date.now() - start > timeout) {
137
- if ((!error || !error.code) && Array.isArray(allAddresses)) {
138
- // If error is empty or not code is present, it means rpc is down => switch
139
- currentAddress = failover(currentAddress, allAddresses, currentAddress, consoleOnFailover);
214
+ lastError = error;
215
+ // Record failure in health tracker
216
+ if (healthTracker && api) {
217
+ healthTracker.recordFailure(node, api);
218
+ }
219
+ // === BROADCAST SAFETY ===
220
+ // For broadcasts, only retry if the request definitely never reached the server.
221
+ // If there's any chance the server received it, throw immediately to prevent
222
+ // double-broadcasting (e.g. double transfers, double votes).
223
+ if (isBroadcast) {
224
+ if (isPreConnectionError(error) && totalNodes > 1) {
225
+ // Safe to try another node — request never left the client
226
+ nodeIndex = nextNode(orderedNodes, nodeIndex);
227
+ nodesTriedInRound++;
228
+ if (nodesTriedInRound >= totalNodes) {
229
+ // Tried all nodes, give up for broadcasts
230
+ throw error;
231
+ }
232
+ if (logFailover) {
233
+ // tslint:disable-next-line: no-console
234
+ console.log(`Broadcast failover to: ${orderedNodes[nodeIndex]} (${error.code}, request never sent)`);
235
+ }
236
+ continue;
140
237
  }
141
- else {
142
- const isFailoverError = timeoutErrors.filter((fe) => error && error.code && error.code.includes(fe)).length > 0;
143
- if (isFailoverError &&
144
- Array.isArray(allAddresses) &&
145
- allAddresses.length > 1) {
146
- if (round < failoverThreshold) {
147
- start = Date.now();
148
- tries = -1;
149
- if (failoverThreshold > 0) {
150
- round++;
151
- }
152
- currentAddress = failover(currentAddress, allAddresses, currentAddress, consoleOnFailover);
153
- }
154
- else {
155
- error.message = `[${error.code}] tried ${failoverThreshold} times with ${allAddresses.join(',')}`;
238
+ // Timeout, HTTP error, or unknown error — request may have been received.
239
+ // Do NOT retry. Throw immediately.
240
+ throw error;
241
+ }
242
+ // === READ OPERATION FAILOVER ===
243
+ if (!shouldFailover(error)) {
244
+ // Unrecognized error type — don't failover, throw immediately
245
+ throw error;
246
+ }
247
+ // Try next node immediately (no backoff within a round)
248
+ if (totalNodes > 1) {
249
+ nodeIndex = nextNode(orderedNodes, nodeIndex);
250
+ nodesTriedInRound++;
251
+ if (nodesTriedInRound >= totalNodes) {
252
+ // Completed a full round through all nodes
253
+ nodesTriedInRound = 0;
254
+ // failoverThreshold=0 means retry forever (only timeout can stop it)
255
+ if (failoverThreshold > 0) {
256
+ round++;
257
+ if (round >= failoverThreshold) {
258
+ error.message = `All ${totalNodes} nodes failed after ${failoverThreshold} rounds. ` +
259
+ `Last error: [${error.code || 'HTTP'}] ${error.message}. ` +
260
+ `Nodes: ${orderedNodes.join(', ')}`;
156
261
  throw error;
157
262
  }
158
263
  }
159
- else {
160
- // tslint:disable-next-line: no-console
161
- console.error(`Didn't failover for error ${error.code ? 'code' : 'message'}: [${error.code || error.message}]`);
264
+ // Check total timeout before starting next round
265
+ if (timeout !== 0 && Date.now() - startTime > timeout) {
162
266
  throw error;
163
267
  }
268
+ // Backoff between rounds (not between individual node attempts)
269
+ yield sleep(backoff(round));
270
+ }
271
+ if (logFailover) {
272
+ // tslint:disable-next-line: no-console
273
+ console.log(`Switched Hive RPC: ${orderedNodes[nodeIndex]} (previous: ${node}, error: ${error.code || error.message})`);
274
+ }
275
+ }
276
+ else {
277
+ // Single node: use backoff and retry same node (legacy behavior)
278
+ if (timeout !== 0 && Date.now() - startTime > timeout) {
279
+ throw error;
164
280
  }
281
+ yield sleep(backoff(nodesTriedInRound++));
165
282
  }
166
- yield sleep(backoff(tries++));
167
283
  }
168
- } while (true);
284
+ }
169
285
  });
170
286
  }
171
287
  exports.retryingFetch = retryingFetch;
172
- const failover = (url, urls, currentAddress, consoleOnFailover) => {
173
- const index = urls.indexOf(url);
174
- const targetUrl = urls.length === index + 1 ? urls[0] : urls[index + 1];
175
- if (consoleOnFailover) {
176
- // tslint:disable-next-line: no-console
177
- console.log(`Switched Hive RPC: ${targetUrl} (previous: ${currentAddress})`);
178
- }
179
- return targetUrl;
180
- };
181
288
  // Hack to be able to generate a valid witness_set_properties op
182
289
  // Can hopefully be removed when hived's JSON representation is fixed
183
290
  const ByteBuffer = require("@ecency/bytebuffer");