meeseeker 0.0.7 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +118 -89
- data/Rakefile +131 -49
- data/lib/meeseeker.rb +178 -6
- data/lib/meeseeker/block_follower_job.rb +111 -65
- data/lib/meeseeker/hive_engine.rb +20 -0
- data/lib/meeseeker/steem_engine/agent.rb +33 -19
- data/lib/meeseeker/steem_engine/follower_job.rb +88 -18
- data/lib/meeseeker/version.rb +1 -1
- data/lib/meeseeker/witness_schedule_job.rb +6 -2
- data/meeseeker.gemspec +4 -1
- data/test/meeseeker/meeseeker_test.rb +253 -16
- data/test/test_helper.rb +11 -0
- metadata +84 -24
data/lib/meeseeker.rb
CHANGED
@@ -1,28 +1,200 @@
|
|
1
1
|
require 'redis'
|
2
2
|
require 'steem'
|
3
|
+
require 'hive'
|
3
4
|
|
4
5
|
require 'meeseeker/version'
|
5
6
|
require 'meeseeker/block_follower_job'
|
6
7
|
require 'meeseeker/witness_schedule_job'
|
7
8
|
require 'meeseeker/steem_engine/agent'
|
8
9
|
require 'meeseeker/steem_engine/follower_job'
|
10
|
+
require 'meeseeker/hive_engine'
|
9
11
|
|
10
12
|
module Meeseeker
|
11
|
-
|
12
|
-
|
13
|
+
STEEM_CHAIN_ID = '0000000000000000000000000000000000000000000000000000000000000000'
|
14
|
+
HIVE_LEGACY_CHAIN_ID = '0000000000000000000000000000000000000000000000000000000000000000'
|
15
|
+
HIVE_CHAIN_ID = 'beeab0de00000000000000000000000000000000000000000000000000000000'
|
16
|
+
STEEM_CHAIN_KEY_PREFIX = 'steem'
|
17
|
+
HIVE_CHAIN_KEY_PREFIX = 'hive'
|
18
|
+
STEEM_ENGINE_CHAIN_KEY_PREFIX = 'steem_engine'
|
19
|
+
HIVE_ENGINE_CHAIN_KEY_PREFIX = 'hive_engine'
|
20
|
+
LAST_BLOCK_NUM_KEY_SUFFIX = ':meeseeker:last_block_num'
|
21
|
+
LAST_STEEM_ENGINE_BLOCK_NUM_KEY_SUFFIX = ':meeseeker:last_block_num'
|
13
22
|
BLOCKS_PER_DAY = 28800
|
14
23
|
VIRTUAL_TRX_ID = '0000000000000000000000000000000000000000'
|
24
|
+
BLOCK_INTERVAL = 3
|
25
|
+
SHUFFLE_URL = 'shuffle'
|
26
|
+
DEFAULT_STEEM_URL = 'https://api.steemit.com'
|
27
|
+
DEFAULT_STEEM_FAILOVER_URLS = [
|
28
|
+
DEFAULT_STEEM_URL,
|
29
|
+
# 'https://steemd.minnowsupportproject.org',
|
30
|
+
# 'https://anyx.io',
|
31
|
+
# 'http://anyx.io',
|
32
|
+
# 'https://steemd.privex.io',
|
33
|
+
# 'https://api.steem.house'
|
34
|
+
]
|
35
|
+
DEFAULT_HIVE_URL = 'https://api.openhive.network'
|
36
|
+
DEFAULT_HIVE_FAILOVER_URLS = [
|
37
|
+
DEFAULT_HIVE_URL,
|
38
|
+
'https://api.hivekings.com',
|
39
|
+
'https://anyx.io',
|
40
|
+
'http://anyx.io',
|
41
|
+
'https://techcoderx.com',
|
42
|
+
'https://rpc.esteem.app',
|
43
|
+
'https://hived.privex.io',
|
44
|
+
'https://api.pharesim.me',
|
45
|
+
'https://api.hive.blog',
|
46
|
+
'https://rpc.ausbit.dev'
|
47
|
+
]
|
48
|
+
|
49
|
+
def default_chain_key_prefix
|
50
|
+
ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', chain_key_prefix)
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.chain_key_prefix
|
54
|
+
@chain_key_prefix ||= {}
|
55
|
+
url = default_url(HIVE_CHAIN_KEY_PREFIX)
|
56
|
+
|
57
|
+
return @chain_key_prefix[url] if !!@chain_key_prefix[url]
|
58
|
+
|
59
|
+
# Just use the Hive API for either chain, until we know which one we're
|
60
|
+
# using.
|
61
|
+
api = Hive::DatabaseApi.new(url: url)
|
62
|
+
|
63
|
+
api.get_config do |config|
|
64
|
+
@chain_key_prefix[node_url] = if !!config.HIVE_CHAIN_ID && config.HIVE_CHAIN_ID == HIVE_CHAIN_ID
|
65
|
+
HIVE_CHAIN_KEY_PREFIX
|
66
|
+
elsif !!config.HIVE_CHAIN_ID && config.HIVE_CHAIN_ID == HIVE_LEGACY_CHAIN_ID
|
67
|
+
HIVE_CHAIN_KEY_PREFIX
|
68
|
+
elsif !!config.STEEM_CHAIN_ID && config.STEEM_CHAIN_ID == STEEM_CHAIN_ID
|
69
|
+
STEEM_CHAIN_KEY_PREFIX
|
70
|
+
else
|
71
|
+
config.keys.find{|k| k.end_with? '_CHAIN_ID'}.split('_').first.downcase.tap do |guess|
|
72
|
+
warn "Guessing chain_key_prefix = '#{guess}' for unknown chain on: #{node_url}"
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def self.default_url(chain = default_chain_key_prefix)
|
79
|
+
ENV.fetch('MEESEEKER_NODE_URL') do
|
80
|
+
case chain.to_s
|
81
|
+
when STEEM_CHAIN_KEY_PREFIX then DEFAULT_STEEM_URL
|
82
|
+
when HIVE_CHAIN_KEY_PREFIX then DEFAULT_HIVE_URL
|
83
|
+
else
|
84
|
+
raise "Unknown chain: #{chain}"
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
@problem_node_urls = []
|
90
|
+
|
15
91
|
@redis = Redis.new(url: ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0'))
|
16
|
-
@node_url = ENV.fetch('
|
92
|
+
@node_url = default_url(ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', HIVE_CHAIN_KEY_PREFIX))
|
17
93
|
@steem_engine_node_url = ENV.fetch('MEESEEKER_STEEM_ENGINE_NODE_URL', 'https://api.steem-engine.com/rpc')
|
94
|
+
@hive_engine_node_url = ENV.fetch('MEESEEKER_HIVE_ENGINE_NODE_URL', 'https://api.hive-engine.com/rpc')
|
18
95
|
@stream_mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').downcase.to_sym
|
19
96
|
@include_virtual = ENV.fetch('MEESEEKER_INCLUDE_VIRTUAL', 'true').downcase == 'true'
|
20
97
|
@include_block_header = ENV.fetch('MEESEEKER_INCLUDE_BLOCK_HEADER', 'true').downcase == 'true'
|
21
98
|
@publish_op_custom_id = ENV.fetch('MEESEEKER_PUBLISH_OP_CUSTOM_ID', 'false').downcase == 'true'
|
22
|
-
@expire_keys = ENV.fetch('MEESEEKER_EXPIRE_KEYS', BLOCKS_PER_DAY *
|
99
|
+
@expire_keys = ENV.fetch('MEESEEKER_EXPIRE_KEYS', BLOCKS_PER_DAY * BLOCK_INTERVAL).to_i
|
100
|
+
@max_keys = ENV.fetch('MEESEEKER_MAX_KEYS', '-1').to_i
|
23
101
|
|
24
102
|
extend self
|
25
103
|
|
26
|
-
attr_accessor :redis, :node_url, :steem_engine_node_url,
|
27
|
-
:
|
104
|
+
attr_accessor :redis, :node_url, :steem_engine_node_url,
|
105
|
+
:hive_engine_node_url, :expire_keys, :max_keys, :stream_mode,
|
106
|
+
:include_virtual, :include_block_header, :publish_op_custom_id
|
107
|
+
|
108
|
+
def self.shuffle_node_url(chain = ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', HIVE_CHAIN_KEY_PREFIX))
|
109
|
+
chain = chain.to_s
|
110
|
+
node_url = ENV.fetch('MEESEEKER_NODE_URL', default_url(ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', chain)))
|
111
|
+
return node_url unless node_url == SHUFFLE_URL
|
112
|
+
|
113
|
+
@problem_node_urls = [] if rand(1..1000) == 13
|
114
|
+
shuffle_node_url!(chain)
|
115
|
+
end
|
116
|
+
|
117
|
+
def self.api_class(chain = default_chain_key_prefix)
|
118
|
+
case chain.to_s
|
119
|
+
when STEEM_CHAIN_KEY_PREFIX then Steem::Api
|
120
|
+
when HIVE_CHAIN_KEY_PREFIX then Hive::Api
|
121
|
+
else
|
122
|
+
raise "Unknown chain: #{chain}"
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def self.condenser_api_class(chain = default_chain_key_prefix)
|
127
|
+
case chain.to_s
|
128
|
+
when STEEM_CHAIN_KEY_PREFIX then Steem::CondenserApi
|
129
|
+
when HIVE_CHAIN_KEY_PREFIX then Hive::CondenserApi
|
130
|
+
else
|
131
|
+
raise "Unknown chain: #{chain}"
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
def self.block_api_class(chain = default_chain_key_prefix)
|
136
|
+
case chain.to_s
|
137
|
+
when STEEM_CHAIN_KEY_PREFIX then Steem::BlockApi
|
138
|
+
when HIVE_CHAIN_KEY_PREFIX then Hive::BlockApi
|
139
|
+
else
|
140
|
+
raise "Unknown chain: #{chain}"
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
def self.database_api_class(chain = default_chain_key_prefix)
|
145
|
+
case chain.to_s
|
146
|
+
when STEEM_CHAIN_KEY_PREFIX then Steem::DatabaseApi
|
147
|
+
when HIVE_CHAIN_KEY_PREFIX then Hive::DatabaseApi
|
148
|
+
else
|
149
|
+
raise "Unknown chain: #{chain}"
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
def self.stream_class(chain = default_chain_key_prefix)
|
154
|
+
case chain.to_s
|
155
|
+
when STEEM_CHAIN_KEY_PREFIX then Steem::Stream
|
156
|
+
when HIVE_CHAIN_KEY_PREFIX then Hive::Stream
|
157
|
+
else
|
158
|
+
raise "Unknown chain: #{chain}"
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
def self.shuffle_node_url!(chain = ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', HIVE_CHAIN_KEY_PREFIX))
|
163
|
+
chain = chain.to_s
|
164
|
+
failover_urls = case chain
|
165
|
+
when STEEM_CHAIN_KEY_PREFIX then DEFAULT_STEEM_FAILOVER_URLS - @problem_node_urls
|
166
|
+
when HIVE_CHAIN_KEY_PREFIX then DEFAULT_HIVE_FAILOVER_URLS - @problem_node_urls
|
167
|
+
else; []
|
168
|
+
end
|
169
|
+
url = failover_urls.sample
|
170
|
+
api = api_class(chain).new(url: url)
|
171
|
+
|
172
|
+
api.get_accounts(['fullnodeupdate']) do |accounts|
|
173
|
+
fullnodeupdate = accounts.first
|
174
|
+
metadata = (JSON[fullnodeupdate.json_metadata] rescue nil) || {}
|
175
|
+
|
176
|
+
nodes = metadata.fetch('report', []).map do |report|
|
177
|
+
next if chain == HIVE_CHAIN_KEY_PREFIX && !report[HIVE_CHAIN_KEY_PREFIX]
|
178
|
+
next if chain != HIVE_CHAIN_KEY_PREFIX && !!report[HIVE_CHAIN_KEY_PREFIX]
|
179
|
+
|
180
|
+
report['node']
|
181
|
+
end.compact.uniq
|
182
|
+
|
183
|
+
nodes -= @problem_node_urls
|
184
|
+
|
185
|
+
if nodes.any?
|
186
|
+
nodes.sample
|
187
|
+
else
|
188
|
+
@node_url = failover_urls.sample
|
189
|
+
end
|
190
|
+
end
|
191
|
+
rescue => e
|
192
|
+
puts "#{url}: #{e}"
|
193
|
+
|
194
|
+
@problem_node_urls << url
|
195
|
+
failover_urls -= @problem_node_urls
|
196
|
+
failover_urls.sample
|
197
|
+
end
|
198
|
+
|
199
|
+
shuffle_node_url! if @node_url == SHUFFLE_URL
|
28
200
|
end
|
@@ -3,22 +3,26 @@ module Meeseeker
|
|
3
3
|
MAX_VOP_RETRY = 3
|
4
4
|
|
5
5
|
def perform(options = {})
|
6
|
-
|
6
|
+
chain = (options[:chain] || 'hive').to_sym
|
7
|
+
url = Meeseeker.default_url(chain)
|
8
|
+
block_api = Meeseeker.block_api_class(chain).new(url: url)
|
7
9
|
redis = Meeseeker.redis
|
8
10
|
last_key_prefix = nil
|
9
11
|
trx_index = 0
|
10
12
|
current_block_num = nil
|
11
13
|
block_transactions = []
|
14
|
+
chain_key_prefix = chain.to_s if !!options[:chain]
|
15
|
+
chain_key_prefix ||= Meeseeker.default_chain_key_prefix
|
12
16
|
|
13
17
|
stream_operations(options) do |op, trx_id, block_num|
|
14
18
|
begin
|
15
|
-
current_key_prefix = "
|
19
|
+
current_key_prefix = "#{chain_key_prefix}:#{block_num}:#{trx_id}"
|
16
20
|
|
17
21
|
if current_key_prefix == last_key_prefix
|
18
22
|
trx_index += 1
|
19
23
|
else
|
20
24
|
if !!last_key_prefix
|
21
|
-
|
25
|
+
_, b, t = last_key_prefix.split(':')
|
22
26
|
transaction_payload = {
|
23
27
|
block_num: b.to_i,
|
24
28
|
transaction_id: t,
|
@@ -26,17 +30,28 @@ module Meeseeker
|
|
26
30
|
}
|
27
31
|
|
28
32
|
block_transactions << trx_id unless trx_id == VIRTUAL_TRX_ID
|
29
|
-
redis.publish(
|
33
|
+
redis.publish("#{chain_key_prefix}:transaction", transaction_payload.to_json)
|
30
34
|
end
|
31
|
-
last_key_prefix = "
|
35
|
+
last_key_prefix = "#{chain_key_prefix}:#{block_num}:#{trx_id}"
|
32
36
|
trx_index = 0
|
33
37
|
end
|
34
38
|
|
35
|
-
op_type = op.type.
|
39
|
+
op_type = if op.type.end_with? '_operation'
|
40
|
+
op.type.split('_')[0..-2].join('_')
|
41
|
+
else
|
42
|
+
op.type
|
43
|
+
end
|
44
|
+
|
36
45
|
key = "#{current_key_prefix}:#{trx_index}:#{op_type}"
|
37
46
|
puts key
|
38
47
|
end
|
39
48
|
|
49
|
+
unless Meeseeker.max_keys == -1
|
50
|
+
while redis.keys("#{chain_key_prefix}:*").size > Meeseeker.max_keys
|
51
|
+
sleep Meeseeker::BLOCK_INTERVAL
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
40
55
|
redis.set(key, op.to_json)
|
41
56
|
redis.expire(key, Meeseeker.expire_keys) unless Meeseeker.expire_keys == -1
|
42
57
|
|
@@ -49,26 +64,30 @@ module Meeseeker
|
|
49
64
|
if Meeseeker.include_block_header
|
50
65
|
catch :block_header do
|
51
66
|
block_api.get_block_header(block_num: block_num) do |result|
|
52
|
-
|
67
|
+
if result.nil? || result.header.nil?
|
68
|
+
puts "Node returned empty result for block_header on block_num: #{block_num} (rate limiting?). Retrying ..."
|
69
|
+
sleep Meeseeker::BLOCK_INTERVAL
|
70
|
+
throw :block_header
|
71
|
+
end
|
53
72
|
|
54
73
|
block_payload.merge!(result.header.to_h)
|
55
74
|
end
|
56
75
|
end
|
57
76
|
end
|
58
77
|
|
59
|
-
redis.set(
|
60
|
-
redis.publish(
|
78
|
+
redis.set(chain_key_prefix + LAST_BLOCK_NUM_KEY_SUFFIX, block_num)
|
79
|
+
redis.publish("#{chain_key_prefix}:block", block_payload.to_json)
|
61
80
|
current_block_num = block_num
|
62
81
|
end
|
63
82
|
|
64
|
-
redis.publish("
|
83
|
+
redis.publish("#{chain_key_prefix}:op:#{op_type}", {key: key}.to_json)
|
65
84
|
|
66
85
|
if Meeseeker.publish_op_custom_id
|
67
86
|
if %w(custom custom_binary custom_json).include? op_type
|
68
87
|
id = (op["value"]["id"] rescue nil).to_s
|
69
88
|
|
70
89
|
if id.size > 0
|
71
|
-
redis.publish("
|
90
|
+
redis.publish("#{chain_key_prefix}:op:#{op_type}:#{id}", {key: key}.to_json)
|
72
91
|
end
|
73
92
|
end
|
74
93
|
end
|
@@ -76,7 +95,10 @@ module Meeseeker
|
|
76
95
|
end
|
77
96
|
private
|
78
97
|
def stream_operations(options = {}, &block)
|
98
|
+
chain = (options[:chain] || 'hive').to_sym
|
79
99
|
redis = Meeseeker.redis
|
100
|
+
chain_key_prefix = chain.to_s if !!options[:chain]
|
101
|
+
chain_key_prefix ||= Meeseeker.chain_key_prefix
|
80
102
|
last_block_num = nil
|
81
103
|
mode = options.delete(:mode) || Meeseeker.stream_mode
|
82
104
|
options[:include_virtual] ||= Meeseeker.include_virtual
|
@@ -84,8 +106,9 @@ module Meeseeker
|
|
84
106
|
if !!options[:at_block_num]
|
85
107
|
last_block_num = options[:at_block_num].to_i
|
86
108
|
else
|
87
|
-
|
88
|
-
|
109
|
+
url = Meeseeker.default_url(chain)
|
110
|
+
database_api = Meeseeker.database_api_class(chain).new(url: url)
|
111
|
+
last_block_num = redis.get(chain_key_prefix + LAST_BLOCK_NUM_KEY_SUFFIX).to_i + 1
|
89
112
|
|
90
113
|
block_num = catch :dynamic_global_properties do
|
91
114
|
database_api.get_dynamic_global_properties do |dgpo|
|
@@ -116,11 +139,12 @@ module Meeseeker
|
|
116
139
|
end
|
117
140
|
|
118
141
|
begin
|
119
|
-
|
142
|
+
url = Meeseeker.default_url(chain)
|
143
|
+
stream_options = {url: url, mode: mode}
|
120
144
|
options = options.merge(at_block_num: last_block_num)
|
121
145
|
condenser_api = nil
|
122
146
|
|
123
|
-
|
147
|
+
Meeseeker.stream_class.new(stream_options).tap do |stream|
|
124
148
|
puts "Stream begin: #{stream_options.to_json}; #{options.to_json}"
|
125
149
|
|
126
150
|
# Prior to v0.0.4, we only streamed operations with stream.operations.
|
@@ -129,67 +153,89 @@ module Meeseeker
|
|
129
153
|
# to embed it into op values. This should also reduce streaming
|
130
154
|
# overhead since we no longer stream block_headers inder the hood.
|
131
155
|
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
yield op, block.transaction_ids[index], block_num
|
138
|
-
end
|
139
|
-
end
|
140
|
-
|
141
|
-
next unless !!Meeseeker.include_virtual
|
142
|
-
|
143
|
-
retries = 0
|
144
|
-
|
145
|
-
# This is where it gets tricky. Virtual ops sometims don't show up
|
146
|
-
# right away, especially if we're streaming on head blocks. In that
|
147
|
-
# situation, we might only need to wait about 1 block. This loop
|
148
|
-
# will likely one execute one iteration, but we have fallback logic
|
149
|
-
# in case there are complications.
|
150
|
-
#
|
151
|
-
# See: https://developers.steem.io/tutorials-recipes/virtual-operations-when-streaming-blockchain-transactions
|
152
|
-
|
153
|
-
loop do
|
154
|
-
condenser_api ||= Steem::CondenserApi.new(url: Meeseeker.node_url)
|
155
|
-
condenser_api.get_ops_in_block(block_num, true) do |vops|
|
156
|
-
redo if vops.nil?
|
156
|
+
loop do
|
157
|
+
begin
|
158
|
+
stream.blocks(options) do |b, n|
|
159
|
+
redo if b.nil?
|
157
160
|
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
# impact overall performance because steem-ruby will batch
|
162
|
-
# when block streams fall behind.
|
163
|
-
|
164
|
-
if retries < MAX_VOP_RETRY
|
165
|
-
retries = retries + 1
|
166
|
-
condenser_api = nil
|
167
|
-
sleep 3 * retries
|
161
|
+
b.transactions.each_with_index do |transaction, index|
|
162
|
+
transaction.operations.each do |op|
|
163
|
+
op = op.merge(timestamp: b.timestamp)
|
168
164
|
|
169
|
-
|
165
|
+
yield op, b.transaction_ids[index], n
|
170
166
|
end
|
171
|
-
|
172
|
-
puts "Gave up retrying virtual ops lookup on block #{block_num}"
|
173
|
-
|
174
|
-
break
|
175
167
|
end
|
176
168
|
|
177
|
-
|
178
|
-
|
179
|
-
|
169
|
+
next unless !!Meeseeker.include_virtual
|
170
|
+
|
171
|
+
retries = 0
|
172
|
+
|
173
|
+
# This is where it gets tricky. Virtual ops sometims don't show up
|
174
|
+
# right away, especially if we're streaming on head blocks. In that
|
175
|
+
# situation, we might only need to wait about 1 block. This loop
|
176
|
+
# will likely one execute one iteration, but we have fallback logic
|
177
|
+
# in case there are complications.
|
178
|
+
#
|
179
|
+
# See: https://developers.steem.io/tutorials-recipes/virtual-operations-when-streaming-blockchain-transactions
|
180
180
|
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
181
|
+
loop do
|
182
|
+
# TODO (HF23) Switch to account_history_api.enum_virtual_ops if supported.
|
183
|
+
url = Meeseeker.default_url(chain)
|
184
|
+
condenser_api ||= Meeseeker.condenser_api_class(chain).new(url: url)
|
185
|
+
condenser_api.get_ops_in_block(n, true) do |vops|
|
186
|
+
if vops.nil?
|
187
|
+
puts "Node returned empty result for get_ops_in_block on block_num: #{n} (rate limiting?). Retrying ..."
|
188
|
+
vops = []
|
189
|
+
end
|
190
|
+
|
191
|
+
if vops.empty? && mode != :head
|
192
|
+
# Usually, we just need to slow down to allow virtual ops to
|
193
|
+
# show up after a short delay. Adding this delay doesn't
|
194
|
+
# impact overall performance because steem-ruby will batch
|
195
|
+
# when block streams fall behind.
|
196
|
+
|
197
|
+
if retries < MAX_VOP_RETRY
|
198
|
+
retries = retries + 1
|
199
|
+
condenser_api = nil
|
200
|
+
sleep Meeseeker::BLOCK_INTERVAL * retries
|
201
|
+
|
202
|
+
redo
|
203
|
+
end
|
204
|
+
|
205
|
+
puts "Gave up retrying virtual ops lookup on block #{n}"
|
206
|
+
|
207
|
+
break
|
208
|
+
end
|
209
|
+
|
210
|
+
if retries > 0
|
211
|
+
puts "Found virtual ops for block #{n} aftere #{retries} retrie(s)"
|
212
|
+
end
|
213
|
+
|
214
|
+
vops.each do |vop|
|
215
|
+
normalized_op = Hashie::Mash.new(
|
216
|
+
type: vop.op[0],
|
217
|
+
value: vop.op[1],
|
218
|
+
timestamp: vop.timestamp
|
219
|
+
)
|
220
|
+
|
221
|
+
yield normalized_op, vop.trx_id, vop.block
|
222
|
+
end
|
223
|
+
end
|
187
224
|
|
188
|
-
|
225
|
+
break
|
189
226
|
end
|
190
227
|
end
|
191
228
|
|
192
229
|
break
|
230
|
+
rescue => e
|
231
|
+
raise e unless e.to_s.include? 'Request Entity Too Large'
|
232
|
+
|
233
|
+
# We need to tell steem-ruby to avoid json-rpc-batch on this
|
234
|
+
# node.
|
235
|
+
|
236
|
+
Meeseeker.block_api_class(chain).const_set 'MAX_RANGE_SIZE', 1
|
237
|
+
sleep Meeseeker::BLOCK_INTERVAL
|
238
|
+
redo
|
193
239
|
end
|
194
240
|
end
|
195
241
|
end
|