meeseeker 0.0.3pre1 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +41 -1
- data/Rakefile +181 -13
- data/bin/meeseeker +4 -2
- data/lib/meeseeker.rb +1 -0
- data/lib/meeseeker/block_follower_job.rb +29 -21
- data/lib/meeseeker/version.rb +1 -1
- data/lib/meeseeker/witness_schedule_job.rb +68 -0
- data/test/meeseeker/meeseeker_test.rb +9 -6
- metadata +5 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8b01bc12292352c934627c0f6ed373b6eec30dd05fc91ddcfbe83a5aa3da1e95
|
4
|
+
data.tar.gz: '07983e2f5ab16f93e178160140e208636fb5e073e10aac7b5a0212eb50e49f68'
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 04d532184efc2c4d086e78e25e8e96106bbd0bf15121b2b55e25ef872874ea44da99ed994fe4e96ee6653dd002be7f82d3c1dbe5fa751703a6167487f4989631
|
7
|
+
data.tar.gz: 195cd89d7cdad7f430f6b2b91816c50bca5bafc52551edc8cb8ff253620980ceb872f7ad360a0a25824aefa038a4dbf978a25b843a9c509f62467d717974d684
|
data/README.md
CHANGED
@@ -107,7 +107,9 @@ When `meeseeker sync` starts for the first time, it initializes from the last ir
|
|
107
107
|
|
108
108
|
For `redis-cli`, please see: https://redis.io/topics/pubsub
|
109
109
|
|
110
|
-
|
110
|
+
##### Sync
|
111
|
+
|
112
|
+
When running `meeseeker sync`, the following channels are available:
|
111
113
|
|
112
114
|
* `steem:block`
|
113
115
|
* `steem:transaction`
|
@@ -193,6 +195,24 @@ end
|
|
193
195
|
|
194
196
|
Many other clients are supported: https://redis.io/clients
|
195
197
|
|
198
|
+
##### Witness Schedule
|
199
|
+
|
200
|
+
When running `meeseeker witness:schedule`, the `steem:witness:schedule` channel is available. This is offered as a separate command because most applications don't need to worry about this level of blockchain logistics.
|
201
|
+
|
202
|
+
For example, from `redis-cli`, if we wanted to subscribe to the witness schedule:
|
203
|
+
|
204
|
+
```
|
205
|
+
$ redis-cli
|
206
|
+
127.0.0.1:6379> subscribe steem:witness:schedule
|
207
|
+
Reading messages... (press Ctrl-C to quit)
|
208
|
+
1) "subscribe"
|
209
|
+
2) "steem:witness:schedule"
|
210
|
+
3) (integer) 1
|
211
|
+
1) "message"
|
212
|
+
2) "steem:witness:schedule"
|
213
|
+
3) "{\"id\":0,\"current_virtual_time\":\"415293532210075480213212125\",\"next_shuffle_block_num\":30035208,\"current_shuffled_witnesses\":[\"thecryptodrive\",\"timcliff\",\"utopian-io\",\"themarkymark\",\"aggroed\",\"smooth.witness\",\"someguy123\",\"gtg\",\"followbtcnews\",\"yabapmatt\",\"therealwolf\",\"ausbitbank\",\"curie\",\"clayop\",\"drakos\",\"blocktrades\",\"good-karma\",\"roelandp\",\"lukestokes.mhth\",\"liondani\",\"anyx\"],\"num_scheduled_witnesses\":21,\"elected_weight\":1,\"timeshare_weight\":5,\"miner_weight\":1,\"witness_pay_normalization_factor\":25,\"median_props\":{\"account_creation_fee\":{\"amount\":\"3000\",\"precision\":3,\"nai\":\"@@000000021\"},\"maximum_block_size\":65536,\"sbd_interest_rate\":0,\"account_subsidy_budget\":797,\"account_subsidy_decay\":347321},\"majority_version\":\"0.20.8\",\"max_voted_witnesses\":20,\"max_miner_witnesses\":0,\"max_runner_witnesses\":1,\"hardfork_required_witnesses\":17,\"account_subsidy_rd\":{\"resource_unit\":10000,\"budget_per_time_unit\":797,\"pool_eq\":157691079,\"max_pool_size\":157691079,\"decay_params\":{\"decay_per_time_unit\":347321,\"decay_per_time_unit_denom_shift\":36},\"min_decay\":0},\"account_subsidy_witness_rd\":{\"resource_unit\":10000,\"budget_per_time_unit\":996,\"pool_eq\":9384019,\"max_pool_size\":9384019,\"decay_params\":{\"decay_per_time_unit\":7293741,\"decay_per_time_unit_denom_shift\":36},\"min_decay\":257},\"min_witness_account_subsidy_decay\":0}"
|
214
|
+
```
|
215
|
+
|
196
216
|
#### Using `SCAN`
|
197
217
|
|
198
218
|
From the redis manual:
|
@@ -276,6 +296,26 @@ redis-cli --scan --pattern 'steem:*:31ecb9c85e9eabd7ca2460fdb4f3ce4a7ca6ec32:*'
|
|
276
296
|
|
277
297
|
See some of my previous Ruby How To posts in: [#radiator](https://steemit.com/created/radiator) [#ruby](https://steemit.com/created/ruby)
|
278
298
|
|
299
|
+
### Docker
|
300
|
+
|
301
|
+
This will launch meeseeker in a docker container, so you can immediately attach to it on port 6380.
|
302
|
+
|
303
|
+
```bash
|
304
|
+
docker run -d -p 6380:6379 inertia/meeseeker:latest
|
305
|
+
redis-cli -p 6380
|
306
|
+
```
|
307
|
+
|
308
|
+
You can also pass any of the environment variables meeseeker accepts. For example, this will launch meeseeker with `custom_json.id` channels enabled, but only keeps ops around for 5 minutes:
|
309
|
+
|
310
|
+
```bash
|
311
|
+
docker run \
|
312
|
+
--env MEESEEKER_PUBLISH_OP_CUSTOM_ID=true \
|
313
|
+
--env MEESEEKER_EXPIRE_KEYS=300 \
|
314
|
+
-d -p 6380:6379 inertia/meeseeker:latest
|
315
|
+
```
|
316
|
+
|
317
|
+
Also see: https://hub.docker.com/r/inertia/meeseeker/
|
318
|
+
|
279
319
|
## Get in touch!
|
280
320
|
|
281
321
|
If you're using Radiator, I'd love to hear from you. Drop me a line and tell me what you think! I'm @inertia on STEEM.
|
data/Rakefile
CHANGED
@@ -2,6 +2,8 @@ require "bundler/gem_tasks"
|
|
2
2
|
require "rake/testtask"
|
3
3
|
require 'meeseeker'
|
4
4
|
|
5
|
+
defined? Thread.report_on_exception and Thread.report_on_exception = true
|
6
|
+
|
5
7
|
Rake::TestTask.new(:test) do |t|
|
6
8
|
t.libs << 'test'
|
7
9
|
t.libs << 'lib'
|
@@ -44,6 +46,14 @@ task(:sync, [:at_block_num] => [:check_schema]) do |t, args|
|
|
44
46
|
job.perform(at_block_num: args[:at_block_num])
|
45
47
|
end
|
46
48
|
|
49
|
+
namespace :witness do
|
50
|
+
desc 'Publish the witness schedule every minute or so (steem:witness:schedule).'
|
51
|
+
task :schedule do
|
52
|
+
job = Meeseeker::WitnessScheduleJob.new
|
53
|
+
job.perform
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
47
57
|
task(:find, [:what, :key] => [:check_schema]) do |t, args|
|
48
58
|
redis = Meeseeker.redis
|
49
59
|
match = case args[:what].downcase.to_sym
|
@@ -77,25 +87,24 @@ end
|
|
77
87
|
namespace :verify do
|
78
88
|
desc 'Verifies transactions land where they should.'
|
79
89
|
task :block_org, [:max_blocks] do |t, args|
|
80
|
-
defined? Thread.report_on_exception and Thread.report_on_exception = true
|
81
|
-
|
82
90
|
max_blocks = args[:max_blocks]
|
83
91
|
node_url = ENV.fetch('MEESEEKER_NODE_URL', 'https://api.steemit.com')
|
84
92
|
database_api = Steem::DatabaseApi.new(url: node_url)
|
85
|
-
|
93
|
+
mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').to_sym
|
94
|
+
until_block_num = if !!max_blocks
|
95
|
+
database_api.get_dynamic_global_properties do |dgpo|
|
96
|
+
raise 'Got empty dynamic_global_properties result.' if dgpo.nil?
|
97
|
+
|
98
|
+
case mode
|
99
|
+
when :head then dgpo.head_block_number
|
100
|
+
when :irreversible then dgpo.last_irreversible_block_num
|
101
|
+
else; abort "Unknown block mode: #{mode}"
|
102
|
+
end
|
103
|
+
end + max_blocks.to_i
|
104
|
+
end
|
86
105
|
|
87
106
|
Thread.new do
|
88
107
|
job = Meeseeker::BlockFollowerJob.new
|
89
|
-
mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').to_sym
|
90
|
-
until_block_num = if !!max_blocks
|
91
|
-
database_api.get_dynamic_global_properties do |dgpo|
|
92
|
-
case mode
|
93
|
-
when :head then dgpo.head_block_number
|
94
|
-
when :irreversible then dgpo.last_irreversible_block_num
|
95
|
-
else; abort "Unknown block mode: #{mode}"
|
96
|
-
end
|
97
|
-
end + max_blocks.to_i
|
98
|
-
end
|
99
108
|
|
100
109
|
loop do
|
101
110
|
begin
|
@@ -138,6 +147,9 @@ namespace :verify do
|
|
138
147
|
|
139
148
|
if !!max_blocks
|
140
149
|
if block_num >= until_block_num
|
150
|
+
# We're done trailing blocks. Typically, this is used by unit
|
151
|
+
# tests so the test can halt.
|
152
|
+
|
141
153
|
subscription.unsubscribe
|
142
154
|
next
|
143
155
|
end
|
@@ -152,6 +164,8 @@ namespace :verify do
|
|
152
164
|
end
|
153
165
|
|
154
166
|
database_api.get_dynamic_global_properties do |dgpo|
|
167
|
+
raise 'Got empty dynamic_global_properties result.' if dgpo.nil?
|
168
|
+
|
155
169
|
(block_num - dgpo.last_irreversible_block_num).tap do |offset|
|
156
170
|
# This will block all channel callbacks until the first known block
|
157
171
|
# is irreversible. After that, the offsets should mostly go
|
@@ -169,6 +183,8 @@ namespace :verify do
|
|
169
183
|
expected_ids -= [Meeseeker::VIRTUAL_TRX_ID]
|
170
184
|
|
171
185
|
actual_ids, actual_witness = block_api.get_block(block_num: block_num) do |result|
|
186
|
+
raise 'Got empty block result.' if result.nil? || result.block.nil?
|
187
|
+
|
172
188
|
block = result.block
|
173
189
|
[block.transaction_ids, block.witness]
|
174
190
|
end
|
@@ -204,4 +220,156 @@ namespace :verify do
|
|
204
220
|
end
|
205
221
|
end
|
206
222
|
end
|
223
|
+
|
224
|
+
namespace :witness do
|
225
|
+
desc 'Verifies witnessses in the schedule produced a block.'
|
226
|
+
task :schedule, [:max_blocks] do |t, args|
|
227
|
+
max_blocks = args[:max_blocks]
|
228
|
+
node_url = ENV.fetch('MEESEEKER_NODE_URL', 'https://api.steemit.com')
|
229
|
+
database_api = Steem::DatabaseApi.new(url: node_url)
|
230
|
+
mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').to_sym
|
231
|
+
until_block_num = if !!max_blocks
|
232
|
+
database_api.get_dynamic_global_properties do |dgpo|
|
233
|
+
raise 'Got empty dynamic_global_properties result.' if dgpo.nil?
|
234
|
+
|
235
|
+
case mode
|
236
|
+
when :head then dgpo.head_block_number
|
237
|
+
when :irreversible then dgpo.last_irreversible_block_num
|
238
|
+
else; abort "Unknown block mode: #{mode}"
|
239
|
+
end
|
240
|
+
end + max_blocks.to_i
|
241
|
+
end
|
242
|
+
|
243
|
+
Thread.new do
|
244
|
+
job = Meeseeker::WitnessScheduleJob.new
|
245
|
+
|
246
|
+
loop do
|
247
|
+
begin
|
248
|
+
job.perform(mode: mode, until_block_num: until_block_num)
|
249
|
+
rescue => e
|
250
|
+
puts e.inspect
|
251
|
+
sleep 5
|
252
|
+
end
|
253
|
+
|
254
|
+
break # success
|
255
|
+
end
|
256
|
+
|
257
|
+
puts 'Background sync finished ...'
|
258
|
+
end
|
259
|
+
|
260
|
+
begin
|
261
|
+
block_api = Steem::BlockApi.new(url: node_url)
|
262
|
+
schedule_channel = 'steem:witness:schedule'
|
263
|
+
redis_url = ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0')
|
264
|
+
subscription = Redis.new(url: redis_url)
|
265
|
+
ctx = Redis.new(url: redis_url)
|
266
|
+
timeout = (max_blocks).to_i * 3
|
267
|
+
|
268
|
+
subscribe_mode, subscribe_args = if timeout > 0
|
269
|
+
[:subscribe_with_timeout, [timeout, [schedule_channel]]]
|
270
|
+
else
|
271
|
+
[:subscribe, [[schedule_channel]]]
|
272
|
+
end
|
273
|
+
|
274
|
+
# Check if the redis context is still available right before we
|
275
|
+
# subscribe.
|
276
|
+
break unless subscription.ping == 'PONG'
|
277
|
+
|
278
|
+
subscription.send(subscribe_mode, *subscribe_args) do |on|
|
279
|
+
on.subscribe do |channel, subscriptions|
|
280
|
+
puts "Subscribed to ##{channel} (subscriptions: #{subscriptions})"
|
281
|
+
end
|
282
|
+
|
283
|
+
on.message do |channel, message|
|
284
|
+
payload = JSON[message]
|
285
|
+
next_shuffle_block_num = payload['next_shuffle_block_num']
|
286
|
+
current_shuffled_witnesses = payload['current_shuffled_witnesses']
|
287
|
+
num_witnesses = current_shuffled_witnesses.size
|
288
|
+
from_block_num = next_shuffle_block_num - num_witnesses + 1
|
289
|
+
to_block_num = from_block_num + num_witnesses - 1
|
290
|
+
block_range = from_block_num..to_block_num # typically 21 blocks
|
291
|
+
|
292
|
+
if !!max_blocks
|
293
|
+
if block_range.include? until_block_num
|
294
|
+
# We're done trailing blocks. Typically, this is used by unit
|
295
|
+
# tests so the test can halt.
|
296
|
+
|
297
|
+
subscription.unsubscribe
|
298
|
+
end
|
299
|
+
end
|
300
|
+
|
301
|
+
begin
|
302
|
+
# We write witnesses to this hash until all 21 produce blocks.
|
303
|
+
actual_witnesses = {}
|
304
|
+
tries = 0
|
305
|
+
|
306
|
+
while actual_witnesses.size != num_witnesses
|
307
|
+
# Allow the immediate node to catch up in case it's behind by a
|
308
|
+
# block.
|
309
|
+
sleep 3
|
310
|
+
|
311
|
+
# Typically, nodes will allow up to 50 block headers in one
|
312
|
+
# request, if backed by jussi. We only need 21, so each
|
313
|
+
# request should only make a single response with the entire
|
314
|
+
# round. Under normal circumstances, this call happens only
|
315
|
+
# once. But if the there's additional p2p or cache latency,
|
316
|
+
# it might have missing headers.
|
317
|
+
|
318
|
+
block_api.get_block_headers(block_range: block_range) do |header, block_num|
|
319
|
+
unless !!header
|
320
|
+
# Can happen when there's excess p2p latency and/or jussi
|
321
|
+
# cache is under load.
|
322
|
+
puts "Waiting for block header: #{block_num}"
|
323
|
+
|
324
|
+
next
|
325
|
+
end
|
326
|
+
|
327
|
+
actual_witnesses[header.witness] = block_num
|
328
|
+
end
|
329
|
+
|
330
|
+
break if (tries += 1) > 5
|
331
|
+
end
|
332
|
+
|
333
|
+
# If there are multiple tries due to high p2p latency, even though
|
334
|
+
# we got all 21 block headers, seeing this message could be an
|
335
|
+
# early-warning of other problems on the blockchain.
|
336
|
+
|
337
|
+
# If there's a missing block header, this will always show 5
|
338
|
+
# tries.
|
339
|
+
|
340
|
+
puts "Tries: #{tries}" if tries > 1
|
341
|
+
|
342
|
+
missing_witnesses = current_shuffled_witnesses - actual_witnesses.keys
|
343
|
+
extra_witnesses = actual_witnesses.keys - current_shuffled_witnesses
|
344
|
+
|
345
|
+
if missing_witnesses.any? || extra_witnesses.any?
|
346
|
+
puts "Expected only these witness to produce a block in #{block_range}."
|
347
|
+
puts "Missing witnesses: #{missing_witnesses.join(', ')}"
|
348
|
+
puts "Extra witnesses: #{extra_witnesses.join(', ')}"
|
349
|
+
|
350
|
+
puts "\nWitnesses and block numbers in range:"
|
351
|
+
actual_witnesses.sort_by{ |k, v| v }.each do |k, v|
|
352
|
+
puts "#{v}: #{k}"
|
353
|
+
end
|
354
|
+
puts "Count: #{actual_witnesses.size}"
|
355
|
+
|
356
|
+
# Non-zero exit to notify the shell caller that there's a
|
357
|
+
# problem.
|
358
|
+
|
359
|
+
exit(-(missing_witnesses.size + extra_witnesses.size))
|
360
|
+
end
|
361
|
+
end
|
362
|
+
|
363
|
+
# Perfect round.
|
364
|
+
|
365
|
+
puts "Found all #{num_witnesses} expected witnesses in block range #{block_range}: √"
|
366
|
+
end
|
367
|
+
|
368
|
+
on.unsubscribe do |channel, subscriptions|
|
369
|
+
puts "Unsubscribed from ##{channel} (subscriptions: #{subscriptions})"
|
370
|
+
end
|
371
|
+
end
|
372
|
+
end
|
373
|
+
end
|
374
|
+
end
|
207
375
|
end
|
data/bin/meeseeker
CHANGED
@@ -18,12 +18,12 @@ filename = __FILE__.split('/').last
|
|
18
18
|
|
19
19
|
case ARGV[0]
|
20
20
|
when 'console' then Rake::Task['console'].invoke
|
21
|
-
when 'sync'
|
21
|
+
when 'sync', 'witness:schedule'
|
22
22
|
backoff = 0.01
|
23
23
|
max_backoff = 30
|
24
24
|
|
25
25
|
loop do; begin
|
26
|
-
Rake::Task[
|
26
|
+
Rake::Task[ARGV[0]].invoke(ARGV[1])
|
27
27
|
rescue => e
|
28
28
|
puts "Error: #{e.inspect}"
|
29
29
|
backoff = [backoff, max_backoff].min
|
@@ -36,6 +36,8 @@ when 'reset' then Rake::Task['reset'].invoke
|
|
36
36
|
else
|
37
37
|
puts "\nBegin/resume sync:"
|
38
38
|
puts "\t#{filename} sync\n\n"
|
39
|
+
puts "Publish witness schedule:"
|
40
|
+
puts "\t#{filename} witness:schedule\n\n"
|
39
41
|
puts "Start in the ruby console:"
|
40
42
|
puts "\t#{filename} console\n\n"
|
41
43
|
puts 'Find block or transaction:'
|
data/lib/meeseeker.rb
CHANGED
@@ -45,8 +45,12 @@ module Meeseeker
|
|
45
45
|
}
|
46
46
|
|
47
47
|
if Meeseeker.include_block_header
|
48
|
-
|
49
|
-
|
48
|
+
catch :block_header do
|
49
|
+
block_api.get_block_header(block_num: block_num) do |result|
|
50
|
+
throw :block_header if result.nil || result.header.nil?
|
51
|
+
|
52
|
+
block_payload.merge!(result.header.to_h)
|
53
|
+
end
|
50
54
|
end
|
51
55
|
end
|
52
56
|
|
@@ -81,28 +85,32 @@ module Meeseeker
|
|
81
85
|
database_api = Steem::DatabaseApi.new(url: Meeseeker.node_url)
|
82
86
|
last_block_num = redis.get(LAST_BLOCK_NUM_KEY).to_i + 1
|
83
87
|
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
when :irreversible then dgpo.last_irreversible_block_num
|
88
|
-
else; abort "Unknown stream mode: #{mode}"
|
89
|
-
end
|
90
|
-
|
91
|
-
if Meeseeker.expire_keys == -1
|
92
|
-
last_block_num = [last_block_num, block_num].max
|
93
|
-
|
94
|
-
puts "Sync from: #{last_block_num}"
|
95
|
-
elsif block_num - last_block_num > Meeseeker.expire_keys / 3
|
96
|
-
last_block_num = block_num
|
88
|
+
block_num = catch :dynamic_global_properties do
|
89
|
+
database_api.get_dynamic_global_properties do |dgpo|
|
90
|
+
throw :dynamic_global_properties if dgpo.nil?
|
97
91
|
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
puts "Resuming from #{behind_sec / 60} minutes ago ..."
|
92
|
+
case mode
|
93
|
+
when :head then dgpo.head_block_number
|
94
|
+
when :irreversible then dgpo.last_irreversible_block_num
|
95
|
+
else; abort "Unknown stream mode: #{mode}"
|
96
|
+
end
|
104
97
|
end
|
105
98
|
end
|
99
|
+
|
100
|
+
if Meeseeker.expire_keys == -1
|
101
|
+
last_block_num = [last_block_num, block_num].max
|
102
|
+
|
103
|
+
puts "Sync from: #{last_block_num}"
|
104
|
+
elsif block_num - last_block_num > Meeseeker.expire_keys / 3
|
105
|
+
last_block_num = block_num
|
106
|
+
|
107
|
+
puts 'Starting new sync.'
|
108
|
+
else
|
109
|
+
behind_sec = block_num - last_block_num
|
110
|
+
behind_sec *= 3.0
|
111
|
+
|
112
|
+
puts "Resuming from #{behind_sec / 60} minutes ago ..."
|
113
|
+
end
|
106
114
|
end
|
107
115
|
|
108
116
|
begin
|
data/lib/meeseeker/version.rb
CHANGED
@@ -0,0 +1,68 @@
|
|
1
|
+
module Meeseeker
|
2
|
+
class WitnessScheduleJob
|
3
|
+
def perform(options = {})
|
4
|
+
database_api = Steem::DatabaseApi.new(url: Meeseeker.node_url)
|
5
|
+
redis = Meeseeker.redis
|
6
|
+
mode = options.delete(:mode) || Meeseeker.stream_mode
|
7
|
+
schedule = nil
|
8
|
+
last_shuffle_block_num = nil
|
9
|
+
|
10
|
+
loop do
|
11
|
+
# Using hammer assignment will ensure we only request a new schedule
|
12
|
+
# after we've published.
|
13
|
+
|
14
|
+
schedule ||= catch :witness_schedule do
|
15
|
+
database_api.get_witness_schedule do |result|
|
16
|
+
throw :witness_schedule if result.nil?
|
17
|
+
|
18
|
+
result
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
next_shuffle_block_num = schedule.next_shuffle_block_num
|
23
|
+
block_num = catch :dynamic_global_properties do
|
24
|
+
database_api.get_dynamic_global_properties do |dgpo|
|
25
|
+
throw :dynamic_global_properties if dgpo.nil?
|
26
|
+
|
27
|
+
case mode
|
28
|
+
when :head then dgpo.head_block_number
|
29
|
+
when :irreversible then dgpo.last_irreversible_block_num
|
30
|
+
else; abort "Unknown stream mode: #{mode}"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
# Find out how far away we are from the next schedule.
|
36
|
+
|
37
|
+
remaining_blocks = [next_shuffle_block_num - block_num - 1.5, 0].max
|
38
|
+
|
39
|
+
# It's better for the schedule to publish a little late than to miss
|
40
|
+
# an entire schedule, so we subtract 1.5 blocks from the total.
|
41
|
+
# Sometimes we check a little early and sometimes we check a little
|
42
|
+
# late. But it all averages out.
|
43
|
+
|
44
|
+
if remaining_blocks > 0
|
45
|
+
delay = [remaining_blocks * 3.0, 0.25].max
|
46
|
+
puts "Sleeping for #{delay} seconds (remaining blocks: #{remaining_blocks})."
|
47
|
+
sleep delay
|
48
|
+
next
|
49
|
+
end
|
50
|
+
|
51
|
+
# Now that we've reached the current schedule, check if we've published
|
52
|
+
# it already. If not, publish and reset for the next schedule.
|
53
|
+
|
54
|
+
if next_shuffle_block_num != last_shuffle_block_num
|
55
|
+
puts "next_shuffle_block_num: #{next_shuffle_block_num}; current_shuffled_witnesses: #{schedule.current_shuffled_witnesses.join(', ')}"
|
56
|
+
redis.publish('steem:witness:schedule', schedule.to_json)
|
57
|
+
last_shuffle_block_num = next_shuffle_block_num
|
58
|
+
end
|
59
|
+
|
60
|
+
schedule = nil # re-enabled hammer assignment
|
61
|
+
|
62
|
+
if !!options[:until_block_num]
|
63
|
+
break if block_num >= options[:until_block_num].to_i
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -12,16 +12,19 @@ module Meeseeker
|
|
12
12
|
Rake.application.init
|
13
13
|
Rake.application.load_rakefile
|
14
14
|
Dir.chdir(pwd)
|
15
|
-
|
16
|
-
|
17
|
-
def test_verify_block_org
|
18
|
-
max_blocks = 30 # must be at least 15 to get past irreversible
|
15
|
+
|
19
16
|
if !!Meeseeker.redis.get(Meeseeker::LAST_BLOCK_NUM_KEY)
|
20
17
|
fail "Found existing keys. Please use 'rake reset' to enable this test."
|
21
18
|
end
|
22
|
-
|
19
|
+
end
|
20
|
+
|
21
|
+
def test_verify_all_jobs
|
22
|
+
max_blocks = 30 # must be at least 15 to get past irreversible
|
23
|
+
|
23
24
|
assert Rake::Task['verify:block_org'].invoke(max_blocks)
|
24
|
-
assert Rake::Task['
|
25
|
+
assert Rake::Task['verify:witness:schedule'].invoke(max_blocks)
|
26
|
+
|
27
|
+
Rake::Task['reset'].invoke
|
25
28
|
end
|
26
29
|
end
|
27
30
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: meeseeker
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Anthony Martin
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-02-
|
11
|
+
date: 2019-02-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rake
|
@@ -208,6 +208,7 @@ files:
|
|
208
208
|
- lib/meeseeker.rb
|
209
209
|
- lib/meeseeker/block_follower_job.rb
|
210
210
|
- lib/meeseeker/version.rb
|
211
|
+
- lib/meeseeker/witness_schedule_job.rb
|
211
212
|
- meeseeker.gemspec
|
212
213
|
- test/meeseeker/meeseeker_test.rb
|
213
214
|
- test/test_helper.rb
|
@@ -227,9 +228,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
227
228
|
version: '0'
|
228
229
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
229
230
|
requirements:
|
230
|
-
- - "
|
231
|
+
- - ">="
|
231
232
|
- !ruby/object:Gem::Version
|
232
|
-
version:
|
233
|
+
version: '0'
|
233
234
|
requirements: []
|
234
235
|
rubyforge_project:
|
235
236
|
rubygems_version: 2.7.7
|