bitcoin-ruby 0.0.6 → 0.0.7
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +0 -1
- data/.travis.yml +2 -7
- data/COPYING +1 -1
- data/Gemfile +2 -6
- data/Gemfile.lock +34 -0
- data/README.rdoc +16 -68
- data/Rakefile +3 -6
- data/bin/bitcoin_shell +0 -1
- data/{concept-examples/blockchain-pow.rb → examples/concept-blockchain-pow.rb} +0 -0
- data/lib/bitcoin.rb +350 -296
- data/lib/bitcoin/builder.rb +3 -1
- data/lib/bitcoin/connection.rb +2 -1
- data/lib/bitcoin/contracthash.rb +76 -0
- data/lib/bitcoin/dogecoin.rb +97 -0
- data/lib/bitcoin/ffi/bitcoinconsensus.rb +74 -0
- data/lib/bitcoin/ffi/openssl.rb +98 -2
- data/lib/bitcoin/ffi/secp256k1.rb +144 -0
- data/lib/bitcoin/key.rb +12 -2
- data/lib/bitcoin/logger.rb +3 -12
- data/lib/bitcoin/protocol/block.rb +3 -9
- data/lib/bitcoin/protocol/parser.rb +6 -2
- data/lib/bitcoin/protocol/tx.rb +44 -13
- data/lib/bitcoin/protocol/txin.rb +4 -2
- data/lib/bitcoin/protocol/txout.rb +2 -2
- data/lib/bitcoin/script.rb +212 -37
- data/lib/bitcoin/trezor/mnemonic.rb +130 -0
- data/lib/bitcoin/version.rb +1 -1
- data/spec/bitcoin/bitcoin_spec.rb +32 -3
- data/spec/bitcoin/builder_spec.rb +18 -0
- data/spec/bitcoin/contracthash_spec.rb +45 -0
- data/spec/bitcoin/dogecoin_spec.rb +176 -0
- data/spec/bitcoin/ffi_openssl.rb +45 -0
- data/spec/bitcoin/fixtures/156e6e1b84c5c3bd3a0927b25e4119fadce6e6d5186f363317511d1d680fae9a.json +24 -0
- data/spec/bitcoin/fixtures/8d0b238a06b5a70be75d543902d02d7a514d68d3252a949a513865ac3538874c.json +24 -0
- data/spec/bitcoin/fixtures/coinbase-toshi.json +33 -0
- data/spec/bitcoin/fixtures/coinbase.json +24 -0
- data/spec/bitcoin/fixtures/dogecoin-block-60323982f9c5ff1b5a954eac9dc1269352835f47c2c5222691d80f0d50dcf053.bin +0 -0
- data/spec/bitcoin/fixtures/rawtx-01-toshi.json +46 -0
- data/spec/bitcoin/fixtures/rawtx-02-toshi.json +46 -0
- data/spec/bitcoin/fixtures/rawtx-03-toshi.json +73 -0
- data/spec/bitcoin/fixtures/rawtx-testnet-04fdc38d6722ab4b12d79113fc4b2896bdcc5169710690ee4e78541b98e467b4.bin +0 -0
- data/spec/bitcoin/fixtures/rawtx-testnet-0b294c7d11dd21bcccb8393e6744fed7d4d1981a08c00e3e88838cc421f33c9f.bin +0 -0
- data/spec/bitcoin/fixtures/rawtx-testnet-3bc52ac063291ad92d95ddda5fd776a342083b95607ad32ed8bc6f8f7d30449e.bin +0 -0
- data/spec/bitcoin/fixtures/rawtx-testnet-6f0bbdd4e71a8af4305018d738184df32dbb6f27284fdebd5b56d16947f7c181.bin +0 -0
- data/spec/bitcoin/fixtures/rawtx-testnet-a7c9b06e275e8674cc19a5f7d3e557c72c6d93576e635b33212dbe08ab7cdb60.bin +0 -0
- data/spec/bitcoin/fixtures/rawtx-testnet-f80acbd2f594d04ddb0e1cacba662132104909157dff526935a3c88abe9201a5.bin +0 -0
- data/spec/bitcoin/protocol/block_spec.rb +0 -22
- data/spec/bitcoin/protocol/tx_spec.rb +145 -2
- data/spec/bitcoin/script/script_spec.rb +282 -0
- data/spec/bitcoin/secp256k1_spec.rb +48 -0
- data/spec/bitcoin/spec_helper.rb +0 -51
- data/spec/bitcoin/trezor/mnemonic_spec.rb +161 -0
- metadata +48 -98
- data/bin/bitcoin_dns_seed +0 -130
- data/bin/bitcoin_gui +0 -80
- data/bin/bitcoin_node +0 -153
- data/bin/bitcoin_node_cli +0 -81
- data/bin/bitcoin_wallet +0 -402
- data/doc/CONFIG.rdoc +0 -66
- data/doc/EXAMPLES.rdoc +0 -13
- data/doc/NAMECOIN.rdoc +0 -34
- data/doc/NODE.rdoc +0 -225
- data/doc/STORAGE.rdoc +0 -33
- data/doc/WALLET.rdoc +0 -102
- data/examples/balance.rb +0 -66
- data/examples/forwarder.rb +0 -73
- data/examples/index_nhash.rb +0 -24
- data/examples/reindex_p2sh_addrs.rb +0 -44
- data/examples/relay_tx.rb +0 -22
- data/examples/verify_tx.rb +0 -57
- data/lib/bitcoin/config.rb +0 -58
- data/lib/bitcoin/gui/addr_view.rb +0 -44
- data/lib/bitcoin/gui/bitcoin-ruby.png +0 -0
- data/lib/bitcoin/gui/bitcoin-ruby.svg +0 -80
- data/lib/bitcoin/gui/conn_view.rb +0 -38
- data/lib/bitcoin/gui/connection.rb +0 -70
- data/lib/bitcoin/gui/em_gtk.rb +0 -30
- data/lib/bitcoin/gui/gui.builder +0 -1643
- data/lib/bitcoin/gui/gui.rb +0 -292
- data/lib/bitcoin/gui/helpers.rb +0 -115
- data/lib/bitcoin/gui/tree_view.rb +0 -84
- data/lib/bitcoin/gui/tx_view.rb +0 -69
- data/lib/bitcoin/namecoin.rb +0 -280
- data/lib/bitcoin/network/command_client.rb +0 -104
- data/lib/bitcoin/network/command_handler.rb +0 -570
- data/lib/bitcoin/network/connection_handler.rb +0 -387
- data/lib/bitcoin/network/node.rb +0 -565
- data/lib/bitcoin/storage/dummy/dummy_store.rb +0 -179
- data/lib/bitcoin/storage/models.rb +0 -171
- data/lib/bitcoin/storage/sequel/migrations.rb +0 -99
- data/lib/bitcoin/storage/sequel/migrations/001_base_schema.rb +0 -52
- data/lib/bitcoin/storage/sequel/migrations/002_tx.rb +0 -45
- data/lib/bitcoin/storage/sequel/migrations/003_change_txin_script_sig_to_blob.rb +0 -18
- data/lib/bitcoin/storage/sequel/migrations/004_change_txin_prev_out_to_blob.rb +0 -18
- data/lib/bitcoin/storage/sequel/migrations/005_change_tx_hash_to_bytea.rb +0 -14
- data/lib/bitcoin/storage/sequel/migrations/006_add_tx_nhash.rb +0 -31
- data/lib/bitcoin/storage/sequel/migrations/007_add_prev_out_index_index.rb +0 -16
- data/lib/bitcoin/storage/sequel/migrations/008_add_txin_p2sh_type.rb +0 -31
- data/lib/bitcoin/storage/sequel/migrations/009_add_addrs_type.rb +0 -56
- data/lib/bitcoin/storage/sequel/sequel_store.rb +0 -551
- data/lib/bitcoin/storage/storage.rb +0 -517
- data/lib/bitcoin/storage/utxo/migrations/001_base_schema.rb +0 -52
- data/lib/bitcoin/storage/utxo/migrations/002_utxo.rb +0 -18
- data/lib/bitcoin/storage/utxo/migrations/003_update_indices.rb +0 -14
- data/lib/bitcoin/storage/utxo/migrations/004_add_addrs_type.rb +0 -14
- data/lib/bitcoin/storage/utxo/utxo_store.rb +0 -374
- data/lib/bitcoin/validation.rb +0 -400
- data/lib/bitcoin/wallet/coinselector.rb +0 -33
- data/lib/bitcoin/wallet/keygenerator.rb +0 -77
- data/lib/bitcoin/wallet/keystore.rb +0 -207
- data/lib/bitcoin/wallet/txdp.rb +0 -118
- data/lib/bitcoin/wallet/wallet.rb +0 -281
- data/spec/bitcoin/fixtures/freicoin-block-000000005d231b285e63af83edae2d8f5e50e70d396468643092b9239fd3be3c.bin +0 -0
- data/spec/bitcoin/fixtures/freicoin-block-000000005d231b285e63af83edae2d8f5e50e70d396468643092b9239fd3be3c.json +0 -43
- data/spec/bitcoin/fixtures/freicoin-genesis-block-000000005b1e3d23ecfd2dd4a6e1a35238aa0392c0a8528c40df52376d7efe2c.bin +0 -0
- data/spec/bitcoin/fixtures/freicoin-genesis-block-000000005b1e3d23ecfd2dd4a6e1a35238aa0392c0a8528c40df52376d7efe2c.json +0 -67
- data/spec/bitcoin/namecoin_spec.rb +0 -182
- data/spec/bitcoin/node/command_api_spec.rb +0 -663
- data/spec/bitcoin/storage/models_spec.rb +0 -104
- data/spec/bitcoin/storage/reorg_spec.rb +0 -236
- data/spec/bitcoin/storage/storage_spec.rb +0 -387
- data/spec/bitcoin/storage/validation_spec.rb +0 -300
- data/spec/bitcoin/wallet/coinselector_spec.rb +0 -38
- data/spec/bitcoin/wallet/keygenerator_spec.rb +0 -69
- data/spec/bitcoin/wallet/keystore_spec.rb +0 -190
- data/spec/bitcoin/wallet/txdp_spec.rb +0 -76
- data/spec/bitcoin/wallet/wallet_spec.rb +0 -238
@@ -1,52 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
|
4
|
-
up do
|
5
|
-
|
6
|
-
@log.info { "Running migration #{__FILE__}" }
|
7
|
-
|
8
|
-
binary = adapter_scheme == :postgres ? :bytea : :varchar
|
9
|
-
|
10
|
-
alter_table :schema_info do
|
11
|
-
add_column :magic, :varchar # network magic-head
|
12
|
-
add_column :backend, :varchar # storage backend
|
13
|
-
end
|
14
|
-
|
15
|
-
next if tables.include?(:blk)
|
16
|
-
|
17
|
-
create_table :blk do
|
18
|
-
primary_key :id
|
19
|
-
column :hash, binary, :null => false, :unique => true, :index => true
|
20
|
-
column :depth, :int, :null => false, :index => true
|
21
|
-
column :version, :bigint, :null => false
|
22
|
-
column :prev_hash, binary, :null => false, :index => true
|
23
|
-
column :mrkl_root, binary, :null => false
|
24
|
-
column :time, :bigint, :null => false
|
25
|
-
column :bits, :bigint, :null => false
|
26
|
-
column :nonce, :bigint, :null => false
|
27
|
-
column :blk_size, :int, :null => false
|
28
|
-
column :chain, :int, :null => false
|
29
|
-
column :work, binary, :index => true
|
30
|
-
column :aux_pow, binary
|
31
|
-
end
|
32
|
-
|
33
|
-
create_table :addr do
|
34
|
-
primary_key :id
|
35
|
-
column :hash160, String, :null => false, :index => true
|
36
|
-
end
|
37
|
-
|
38
|
-
create_table :addr_txout do
|
39
|
-
column :addr_id, :int, :null => false, :index => true
|
40
|
-
column :txout_id, :int, :null => false, :index => true
|
41
|
-
end
|
42
|
-
|
43
|
-
create_table :names do
|
44
|
-
column :txout_id, :int, :null => false, :index => true
|
45
|
-
column :hash, binary, :index => true
|
46
|
-
column :name, binary, :index => true
|
47
|
-
column :value, binary
|
48
|
-
end
|
49
|
-
|
50
|
-
end
|
51
|
-
|
52
|
-
end
|
@@ -1,45 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
up do
|
4
|
-
|
5
|
-
@log.info { "Running migration #{__FILE__}" }
|
6
|
-
|
7
|
-
next if tables.include?(:tx)
|
8
|
-
|
9
|
-
create_table :tx do
|
10
|
-
primary_key :id
|
11
|
-
column :hash, :varchar, :null => false, :unique => true, :index => true
|
12
|
-
column :version, :bigint, :null => false
|
13
|
-
column :lock_time, :bigint, :null => false
|
14
|
-
column :coinbase, :bool, :null => false
|
15
|
-
column :tx_size, :int, :null => false
|
16
|
-
end
|
17
|
-
|
18
|
-
create_table :blk_tx do
|
19
|
-
column :blk_id, :int, :null => false, :index => true
|
20
|
-
column :tx_id, :int, :null => false, :index => true
|
21
|
-
column :idx, :int, :null => false
|
22
|
-
end
|
23
|
-
|
24
|
-
create_table :txin do
|
25
|
-
primary_key :id
|
26
|
-
column :tx_id, :int, :null => false, :index => true
|
27
|
-
column :tx_idx, :int, :null => false
|
28
|
-
column :script_sig, :varchar, :null => false
|
29
|
-
column :prev_out, :varchar, :null => false, :index => true
|
30
|
-
column :prev_out_index, :bigint, :null => false
|
31
|
-
column :sequence, :bigint, :null => false
|
32
|
-
end
|
33
|
-
|
34
|
-
create_table :txout do
|
35
|
-
primary_key :id
|
36
|
-
column :tx_id, :int, :null => false, :index => true
|
37
|
-
column :tx_idx, :int, :null => false
|
38
|
-
column :pk_script, (@db.adapter_scheme == :postgres ? :bytea : :blob), :null => false
|
39
|
-
column :value, :bigint
|
40
|
-
column :type, :int, :null => false, :index => true
|
41
|
-
end
|
42
|
-
|
43
|
-
end
|
44
|
-
|
45
|
-
end
|
@@ -1,18 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
up do
|
4
|
-
|
5
|
-
@log.info { "Running migration #{__FILE__}" }
|
6
|
-
|
7
|
-
if adapter_scheme == :postgres
|
8
|
-
add_column :txin, :tmp_script_sig, :bytea
|
9
|
-
self[:txin].where.update("tmp_script_sig = script_sig::bytea")
|
10
|
-
drop_column :txin, :script_sig
|
11
|
-
add_column :txin, :script_sig, :bytea
|
12
|
-
self[:txin].where.update("script_sig = tmp_script_sig")
|
13
|
-
drop_column :txin, :tmp_script_sig
|
14
|
-
end
|
15
|
-
|
16
|
-
end
|
17
|
-
|
18
|
-
end
|
@@ -1,18 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
up do
|
4
|
-
|
5
|
-
@log.info { "Running migration #{__FILE__}" }
|
6
|
-
|
7
|
-
if adapter_scheme == :postgres
|
8
|
-
add_column :txin, :tmp_prev_out, :bytea
|
9
|
-
self[:txin].where.update("tmp_prev_out = prev_out::bytea")
|
10
|
-
drop_column :txin, :prev_out
|
11
|
-
add_column :txin, :prev_out, :bytea, index: true
|
12
|
-
self[:txin].where.update("prev_out = tmp_prev_out")
|
13
|
-
drop_column :txin, :tmp_prev_out
|
14
|
-
end
|
15
|
-
|
16
|
-
end
|
17
|
-
|
18
|
-
end
|
@@ -1,14 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
up do
|
4
|
-
|
5
|
-
@log.info { "Running migration #{__FILE__}" }
|
6
|
-
|
7
|
-
if adapter_scheme == :postgres
|
8
|
-
execute "DROP VIEW unconfirmed" if self.views.include?(:unconfirmed)
|
9
|
-
execute "ALTER TABLE tx ALTER COLUMN hash TYPE bytea USING hash::bytea"
|
10
|
-
end
|
11
|
-
|
12
|
-
end
|
13
|
-
|
14
|
-
end
|
@@ -1,31 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
up do
|
4
|
-
|
5
|
-
@log.info { "Running migration #{__FILE__}" }
|
6
|
-
|
7
|
-
def process_block blk
|
8
|
-
print "\r#{blk.hash} - #{blk.depth}"
|
9
|
-
blk.tx.each do |tx|
|
10
|
-
self[:tx].where(hash: tx.hash.htb.blob).update(nhash: tx.nhash.htb.blob)
|
11
|
-
end
|
12
|
-
end
|
13
|
-
|
14
|
-
if @store.config[:index_nhash]
|
15
|
-
puts "Building normalized hash index..."
|
16
|
-
|
17
|
-
add_column :tx, :nhash, :bytea
|
18
|
-
|
19
|
-
if blk = @store.get_block_by_depth(0)
|
20
|
-
process_block(blk)
|
21
|
-
while blk = blk.get_next_block
|
22
|
-
process_block(blk)
|
23
|
-
end
|
24
|
-
end
|
25
|
-
|
26
|
-
add_index :tx, :nhash
|
27
|
-
|
28
|
-
end
|
29
|
-
end
|
30
|
-
|
31
|
-
end
|
@@ -1,16 +0,0 @@
|
|
1
|
-
Sequel.migration do
|
2
|
-
|
3
|
-
up do
|
4
|
-
|
5
|
-
@log.info { "Running migration #{__FILE__}" }
|
6
|
-
|
7
|
-
# Naming seems to be different on different adapters and sequel's
|
8
|
-
# "drop_index(:txin, :prev_out)" doesn't seem to be handling it correctly
|
9
|
-
execute "DROP INDEX IF EXISTS txin_prev_out_idx;"
|
10
|
-
execute "DROP INDEX IF EXISTS txin_prev_out_index;"
|
11
|
-
|
12
|
-
add_index :txin, [:prev_out, :prev_out_index]
|
13
|
-
|
14
|
-
end
|
15
|
-
|
16
|
-
end
|
@@ -1,31 +0,0 @@
|
|
1
|
-
# Add column txin.p2sh_type and index the type of the inner p2sh script of all inputs
|
2
|
-
|
3
|
-
Sequel.migration do
|
4
|
-
|
5
|
-
up do
|
6
|
-
|
7
|
-
@log.info { "Running migration #{__FILE__}" }
|
8
|
-
|
9
|
-
if @store.config[:index_p2sh_type]
|
10
|
-
puts "Building p2sh type index..."
|
11
|
-
|
12
|
-
add_column :txin, :p2sh_type, :int
|
13
|
-
|
14
|
-
self[:txout].where(type: 4).each do |txout|
|
15
|
-
tx = self[:tx][id: txout[:tx_id]]
|
16
|
-
next unless next_in = self[:txin][prev_out: tx[:hash].reverse, prev_out_index: txout[:tx_idx]]
|
17
|
-
script = Bitcoin::Script.new(next_in[:script_sig], txout[:pk_script])
|
18
|
-
if script.is_p2sh?
|
19
|
-
inner_script = Bitcoin::Script.new(script.inner_p2sh_script)
|
20
|
-
p2sh_type = @store.class::SCRIPT_TYPES.index(inner_script.type)
|
21
|
-
self[:txin].where(id: next_in[:id]).update(p2sh_type: p2sh_type)
|
22
|
-
end
|
23
|
-
|
24
|
-
end
|
25
|
-
|
26
|
-
add_index :txin, [:id, :p2sh_type]
|
27
|
-
|
28
|
-
end
|
29
|
-
end
|
30
|
-
|
31
|
-
end
|
@@ -1,56 +0,0 @@
|
|
1
|
-
# Add column addr.type and correct the type for all p2sh addresses
|
2
|
-
|
3
|
-
Sequel.migration do
|
4
|
-
|
5
|
-
up do
|
6
|
-
|
7
|
-
@log.info { "Running migration #{__FILE__}" }
|
8
|
-
|
9
|
-
puts "Fixing address types for #{self[:txout].where(type: 4).count} p2sh addresses..."
|
10
|
-
|
11
|
-
add_column :addr, :type, :int, default: 0, null: false
|
12
|
-
|
13
|
-
i = 0
|
14
|
-
# iterate over all txouts with p2sh type
|
15
|
-
self[:txout].where(type: 4).each do |txout|
|
16
|
-
# find addr_txout mapping
|
17
|
-
addr_txout = self[:addr_txout][txout_id: txout[:id]]
|
18
|
-
|
19
|
-
# find currently linked address
|
20
|
-
addr = self[:addr][id: addr_txout[:addr_id]]
|
21
|
-
|
22
|
-
# skip if address type is already p2sh
|
23
|
-
next i+=1 if addr[:type] == 1
|
24
|
-
|
25
|
-
# if address has other txouts, that are not p2sh-type, we need a different one
|
26
|
-
if self[:addr_txout].where(addr_id: addr[:id])
|
27
|
-
.join(:txout, id: :txout_id).where("type != 4").any?
|
28
|
-
|
29
|
-
# if there is already a corrected address
|
30
|
-
if a = self[:addr][hash160: addr[:hash160], type: 1]
|
31
|
-
# use the existing corrected address
|
32
|
-
addr_id = a[:id]
|
33
|
-
else
|
34
|
-
# create new address with correct p2sh type
|
35
|
-
addr_id = self[:addr].insert(hash160: addr[:hash160], type: 1)
|
36
|
-
end
|
37
|
-
|
38
|
-
# change mapping to point to new address
|
39
|
-
self[:addr_txout].where(txout_id: txout[:id]).update(addr_id: addr_id)
|
40
|
-
|
41
|
-
# if address has only this txout
|
42
|
-
else
|
43
|
-
# change to correct type
|
44
|
-
self[:addr].where(id: addr[:id]).update(type: 1)
|
45
|
-
end
|
46
|
-
|
47
|
-
print "\r#{i}"; i+=1
|
48
|
-
|
49
|
-
end
|
50
|
-
puts
|
51
|
-
|
52
|
-
add_index :addr, [:hash160, :type]
|
53
|
-
|
54
|
-
end
|
55
|
-
|
56
|
-
end
|
@@ -1,551 +0,0 @@
|
|
1
|
-
# encoding: ascii-8bit
|
2
|
-
|
3
|
-
Bitcoin.require_dependency :sequel, message:
|
4
|
-
"Note: You will also need an adapter for your database like sqlite3, mysql2, postgresql"
|
5
|
-
|
6
|
-
module Bitcoin::Storage::Backends
|
7
|
-
|
8
|
-
# Storage backend using Sequel to connect to arbitrary SQL databases.
|
9
|
-
# Inherits from StoreBase and implements its interface.
|
10
|
-
class SequelStore < SequelStoreBase
|
11
|
-
|
12
|
-
# sequel database connection
|
13
|
-
attr_accessor :db
|
14
|
-
|
15
|
-
DEFAULT_CONFIG = {
|
16
|
-
# TODO
|
17
|
-
mode: :full,
|
18
|
-
|
19
|
-
# cache head block. only the instance that is updating the head should do this.
|
20
|
-
cache_head: false,
|
21
|
-
|
22
|
-
# store an index of tx.nhash values
|
23
|
-
index_nhash: false
|
24
|
-
}
|
25
|
-
|
26
|
-
# create sequel store with given +config+
|
27
|
-
def initialize config, *args
|
28
|
-
super config, *args
|
29
|
-
end
|
30
|
-
|
31
|
-
# connect to database
|
32
|
-
def connect
|
33
|
-
super
|
34
|
-
end
|
35
|
-
|
36
|
-
# reset database; delete all data
|
37
|
-
def reset
|
38
|
-
[:blk, :blk_tx, :tx, :txin, :txout, :addr, :addr_txout, :names].each {|table| @db[table].delete }
|
39
|
-
@head = nil
|
40
|
-
end
|
41
|
-
|
42
|
-
# persist given block +blk+ to storage.
|
43
|
-
def persist_block blk, chain, depth, prev_work = 0
|
44
|
-
@db.transaction do
|
45
|
-
attrs = {
|
46
|
-
:hash => blk.hash.htb.blob,
|
47
|
-
:depth => depth,
|
48
|
-
:chain => chain,
|
49
|
-
:version => blk.ver,
|
50
|
-
:prev_hash => blk.prev_block.reverse.blob,
|
51
|
-
:mrkl_root => blk.mrkl_root.reverse.blob,
|
52
|
-
:time => blk.time,
|
53
|
-
:bits => blk.bits,
|
54
|
-
:nonce => blk.nonce,
|
55
|
-
:blk_size => blk.to_payload.bytesize,
|
56
|
-
:work => (prev_work + blk.block_work).to_s
|
57
|
-
}
|
58
|
-
attrs[:aux_pow] = blk.aux_pow.to_payload.blob if blk.aux_pow
|
59
|
-
existing = @db[:blk].filter(:hash => blk.hash.htb.blob)
|
60
|
-
if existing.any?
|
61
|
-
existing.update attrs
|
62
|
-
block_id = existing.first[:id]
|
63
|
-
else
|
64
|
-
block_id = @db[:blk].insert(attrs)
|
65
|
-
blk_tx, new_tx, addrs, names = [], [], [], []
|
66
|
-
|
67
|
-
# store tx
|
68
|
-
existing_tx = Hash[*@db[:tx].filter(hash: blk.tx.map {|tx| tx.hash.htb.blob }).map { |tx| [tx[:hash].hth, tx[:id]] }.flatten]
|
69
|
-
blk.tx.each.with_index do |tx, idx|
|
70
|
-
existing = existing_tx[tx.hash]
|
71
|
-
existing ? blk_tx[idx] = existing : new_tx << [tx, idx]
|
72
|
-
end
|
73
|
-
|
74
|
-
new_tx_ids = fast_insert(:tx, new_tx.map {|tx, _| tx_data(tx) }, return_ids: true)
|
75
|
-
new_tx_ids.each.with_index {|tx_id, idx| blk_tx[new_tx[idx][1]] = tx_id }
|
76
|
-
|
77
|
-
fast_insert(:blk_tx, blk_tx.map.with_index {|id, idx| { blk_id: block_id, tx_id: id, idx: idx } })
|
78
|
-
|
79
|
-
# store txins
|
80
|
-
fast_insert(:txin, new_tx.map.with_index {|tx, tx_idx|
|
81
|
-
tx, _ = *tx
|
82
|
-
tx.in.map.with_index {|txin, txin_idx|
|
83
|
-
p2sh_type = nil
|
84
|
-
if @config[:index_p2sh_type] && !txin.coinbase? && (script = tx.scripts[txin_idx]) && script.is_p2sh?
|
85
|
-
p2sh_type = Bitcoin::Script.new(script.inner_p2sh_script).type
|
86
|
-
end
|
87
|
-
txin_data(new_tx_ids[tx_idx], txin, txin_idx, p2sh_type) } }.flatten)
|
88
|
-
|
89
|
-
# store txouts
|
90
|
-
txout_i = 0
|
91
|
-
txout_ids = fast_insert(:txout, new_tx.map.with_index {|tx, tx_idx|
|
92
|
-
tx, _ = *tx
|
93
|
-
tx.out.map.with_index {|txout, txout_idx|
|
94
|
-
script_type, a, n = *parse_script(txout, txout_i, tx.hash, txout_idx)
|
95
|
-
addrs += a; names += n; txout_i += 1
|
96
|
-
txout_data(new_tx_ids[tx_idx], txout, txout_idx, script_type) } }.flatten, return_ids: true)
|
97
|
-
|
98
|
-
# store addrs
|
99
|
-
persist_addrs addrs.map {|i, addr| [txout_ids[i], addr]}
|
100
|
-
names.each {|i, script| store_name(script, txout_ids[i]) }
|
101
|
-
end
|
102
|
-
@head = wrap_block(attrs.merge(id: block_id)) if chain == MAIN
|
103
|
-
@db[:blk].where(:prev_hash => blk.hash.htb.blob, :chain => ORPHAN).each do |b|
|
104
|
-
log.debug { "connecting orphan #{b[:hash].hth}" }
|
105
|
-
begin
|
106
|
-
store_block(get_block(b[:hash].hth))
|
107
|
-
rescue SystemStackError
|
108
|
-
EM.defer { store_block(get_block(b[:hash].hth)) } if EM.reactor_running?
|
109
|
-
end
|
110
|
-
end
|
111
|
-
return depth, chain
|
112
|
-
end
|
113
|
-
end
|
114
|
-
|
115
|
-
def reorg new_side, new_main
|
116
|
-
@db.transaction do
|
117
|
-
@db[:blk].where(hash: new_side.map {|h| h.htb.blob }).update(chain: SIDE)
|
118
|
-
new_main.each do |block_hash|
|
119
|
-
unless @config[:skip_validation]
|
120
|
-
get_block(block_hash).validator(self).validate(raise_errors: true)
|
121
|
-
end
|
122
|
-
@db[:blk].where(hash: block_hash.htb.blob).update(chain: MAIN)
|
123
|
-
end
|
124
|
-
end
|
125
|
-
end
|
126
|
-
|
127
|
-
# bulk-store addresses and txout mappings
|
128
|
-
def persist_addrs addrs
|
129
|
-
addr_txouts, new_addrs = [], []
|
130
|
-
|
131
|
-
# find addresses that are already there
|
132
|
-
existing_addr = {}
|
133
|
-
addrs.each do |i, addr|
|
134
|
-
hash160 = Bitcoin.hash160_from_address(addr)
|
135
|
-
type = Bitcoin.address_type(addr)
|
136
|
-
if existing = @db[:addr][hash160: hash160, type: ADDRESS_TYPES.index(type)]
|
137
|
-
existing_addr[[hash160, type]] = existing[:id]
|
138
|
-
end
|
139
|
-
end
|
140
|
-
|
141
|
-
# iterate over all txouts, grouped by hash160
|
142
|
-
addrs.group_by {|_, a| a }.each do |addr, txouts|
|
143
|
-
hash160 = Bitcoin.hash160_from_address(addr)
|
144
|
-
type = Bitcoin.address_type(addr)
|
145
|
-
|
146
|
-
if existing_id = existing_addr[[hash160, type]]
|
147
|
-
# link each txout to existing address
|
148
|
-
txouts.each {|id, _| addr_txouts << [existing_id, id] }
|
149
|
-
else
|
150
|
-
# collect new address/txout mapping
|
151
|
-
new_addrs << [[hash160, type], txouts.map {|id, _| id }]
|
152
|
-
end
|
153
|
-
end
|
154
|
-
|
155
|
-
# insert all new addresses
|
156
|
-
new_addr_ids = fast_insert(:addr, new_addrs.map {|hash160_and_type, txout_id|
|
157
|
-
hash160, type = *hash160_and_type
|
158
|
-
{ hash160: hash160, type: ADDRESS_TYPES.index(type) }
|
159
|
-
}, return_ids: true)
|
160
|
-
|
161
|
-
|
162
|
-
# link each new txout to the new addresses
|
163
|
-
new_addr_ids.each.with_index do |addr_id, idx|
|
164
|
-
new_addrs[idx][1].each do |txout_id|
|
165
|
-
addr_txouts << [addr_id, txout_id]
|
166
|
-
end
|
167
|
-
end
|
168
|
-
|
169
|
-
# insert addr/txout links
|
170
|
-
fast_insert(:addr_txout, addr_txouts.map {|addr_id, txout_id| { addr_id: addr_id, txout_id: txout_id }})
|
171
|
-
end
|
172
|
-
|
173
|
-
# prepare transaction data for storage
|
174
|
-
def tx_data tx
|
175
|
-
data = {
|
176
|
-
hash: tx.hash.htb.blob,
|
177
|
-
version: tx.ver, lock_time: tx.lock_time,
|
178
|
-
coinbase: tx.in.size == 1 && tx.in[0].coinbase?,
|
179
|
-
tx_size: tx.payload.bytesize }
|
180
|
-
data[:nhash] = tx.nhash.htb.blob if @config[:index_nhash]
|
181
|
-
data
|
182
|
-
end
|
183
|
-
|
184
|
-
# store transaction +tx+
|
185
|
-
def store_tx(tx, validate = true)
|
186
|
-
@log.debug { "Storing tx #{tx.hash} (#{tx.to_payload.bytesize} bytes)" }
|
187
|
-
tx.validator(self).validate(raise_errors: true) if validate
|
188
|
-
@db.transaction do
|
189
|
-
transaction = @db[:tx][:hash => tx.hash.htb.blob]
|
190
|
-
return transaction[:id] if transaction
|
191
|
-
tx_id = @db[:tx].insert(tx_data(tx))
|
192
|
-
tx.in.each_with_index {|i, idx| store_txin(tx_id, i, idx)}
|
193
|
-
tx.out.each_with_index {|o, idx| store_txout(tx_id, o, idx, tx.hash)}
|
194
|
-
tx_id
|
195
|
-
end
|
196
|
-
end
|
197
|
-
|
198
|
-
# prepare txin data for storage
|
199
|
-
def txin_data tx_id, txin, idx, p2sh_type = nil
|
200
|
-
data = {
|
201
|
-
tx_id: tx_id, tx_idx: idx,
|
202
|
-
script_sig: txin.script_sig.blob,
|
203
|
-
prev_out: txin.prev_out.blob,
|
204
|
-
prev_out_index: txin.prev_out_index,
|
205
|
-
sequence: txin.sequence.unpack("V")[0],
|
206
|
-
}
|
207
|
-
data[:p2sh_type] = SCRIPT_TYPES.index(p2sh_type) if @config[:index_p2sh_type]
|
208
|
-
data
|
209
|
-
end
|
210
|
-
|
211
|
-
# store input +txin+
|
212
|
-
def store_txin(tx_id, txin, idx, p2sh_type = nil)
|
213
|
-
@db[:txin].insert(txin_data(tx_id, txin, idx, p2sh_type))
|
214
|
-
end
|
215
|
-
|
216
|
-
# prepare txout data for storage
|
217
|
-
def txout_data tx_id, txout, idx, script_type
|
218
|
-
{ tx_id: tx_id, tx_idx: idx,
|
219
|
-
pk_script: txout.pk_script.blob,
|
220
|
-
value: txout.value, type: script_type }
|
221
|
-
end
|
222
|
-
|
223
|
-
# store output +txout+
|
224
|
-
def store_txout(tx_id, txout, idx, tx_hash = "")
|
225
|
-
script_type, addrs, names = *parse_script(txout, idx, tx_hash, idx)
|
226
|
-
txout_id = @db[:txout].insert(txout_data(tx_id, txout, idx, script_type))
|
227
|
-
persist_addrs addrs.map {|i, h| [txout_id, h] }
|
228
|
-
names.each {|i, script| store_name(script, txout_id) }
|
229
|
-
txout_id
|
230
|
-
end
|
231
|
-
|
232
|
-
# delete transaction
|
233
|
-
# TODO: also delete blk_tx mapping
|
234
|
-
def delete_tx(hash)
|
235
|
-
log.debug { "Deleting tx #{hash} since all its outputs are spent" }
|
236
|
-
@db.transaction do
|
237
|
-
tx = get_tx(hash)
|
238
|
-
tx.in.each {|i| @db[:txin].where(:id => i.id).delete }
|
239
|
-
tx.out.each {|o| @db[:txout].where(:id => o.id).delete }
|
240
|
-
@db[:tx].where(:id => tx.id).delete
|
241
|
-
end
|
242
|
-
end
|
243
|
-
|
244
|
-
# check if block +blk_hash+ exists in the main chain
|
245
|
-
def has_block(blk_hash)
|
246
|
-
!!@db[:blk].where(:hash => blk_hash.htb.blob, :chain => 0).get(1)
|
247
|
-
end
|
248
|
-
|
249
|
-
# check if transaction +tx_hash+ exists
|
250
|
-
def has_tx(tx_hash)
|
251
|
-
!!@db[:tx].where(:hash => tx_hash.htb.blob).get(1)
|
252
|
-
end
|
253
|
-
|
254
|
-
# get head block (highest block from the MAIN chain)
|
255
|
-
def get_head
|
256
|
-
(@config[:cache_head] && @head) ? @head :
|
257
|
-
@head = wrap_block(@db[:blk].filter(:chain => MAIN).order(:depth).last)
|
258
|
-
end
|
259
|
-
|
260
|
-
def get_head_hash
|
261
|
-
(@config[:cache_head] && @head) ? @head.hash :
|
262
|
-
@head = @db[:blk].filter(:chain => MAIN).order(:depth).last[:hash].hth
|
263
|
-
end
|
264
|
-
|
265
|
-
# get depth of MAIN chain
|
266
|
-
def get_depth
|
267
|
-
depth = (@config[:cache_head] && @head) ? @head.depth :
|
268
|
-
@depth = @db[:blk].filter(:chain => MAIN).order(:depth).last[:depth] rescue nil
|
269
|
-
|
270
|
-
return -1 unless depth
|
271
|
-
depth
|
272
|
-
end
|
273
|
-
|
274
|
-
# get block for given +blk_hash+
|
275
|
-
def get_block(blk_hash)
|
276
|
-
wrap_block(@db[:blk][:hash => blk_hash.htb.blob])
|
277
|
-
end
|
278
|
-
|
279
|
-
# get block by given +depth+
|
280
|
-
def get_block_by_depth(depth)
|
281
|
-
wrap_block(@db[:blk][:depth => depth, :chain => MAIN])
|
282
|
-
end
|
283
|
-
|
284
|
-
# get block by given +prev_hash+
|
285
|
-
def get_block_by_prev_hash(prev_hash)
|
286
|
-
wrap_block(@db[:blk][:prev_hash => prev_hash.htb.blob, :chain => MAIN])
|
287
|
-
end
|
288
|
-
|
289
|
-
# get block by given +tx_hash+
|
290
|
-
def get_block_by_tx(tx_hash)
|
291
|
-
tx = @db[:tx][:hash => tx_hash.htb.blob]
|
292
|
-
return nil unless tx
|
293
|
-
parent = @db[:blk_tx][:tx_id => tx[:id]]
|
294
|
-
return nil unless parent
|
295
|
-
wrap_block(@db[:blk][:id => parent[:blk_id]])
|
296
|
-
end
|
297
|
-
|
298
|
-
# get block by given +id+
|
299
|
-
def get_block_by_id(block_id)
|
300
|
-
wrap_block(@db[:blk][:id => block_id])
|
301
|
-
end
|
302
|
-
|
303
|
-
# get block id in the main chain by given +tx_id+
|
304
|
-
def get_block_id_for_tx_id(tx_id)
|
305
|
-
@db[:blk_tx].join(:blk, id: :blk_id)
|
306
|
-
.where(tx_id: tx_id, chain: MAIN).first[:blk_id] rescue nil
|
307
|
-
end
|
308
|
-
|
309
|
-
# get transaction for given +tx_hash+
|
310
|
-
def get_tx(tx_hash)
|
311
|
-
wrap_tx(@db[:tx][:hash => tx_hash.htb.blob])
|
312
|
-
end
|
313
|
-
|
314
|
-
# get array of txes with given +tx_hashes+
|
315
|
-
def get_txs(tx_hashes)
|
316
|
-
txs = db[:tx].filter(hash: tx_hashes.map{|h| h.htb.blob})
|
317
|
-
txs_ids = txs.map {|tx| tx[:id]}
|
318
|
-
return [] if txs_ids.empty?
|
319
|
-
|
320
|
-
# we fetch all needed block ids, inputs and outputs to avoid doing number of queries propertional to number of transactions
|
321
|
-
block_ids = Hash[*db[:blk_tx].join(:blk, id: :blk_id).filter(tx_id: txs_ids, chain: 0).map {|b| [b[:tx_id], b[:blk_id]] }.flatten]
|
322
|
-
inputs = db[:txin].filter(:tx_id => txs_ids).order(:tx_idx).map.group_by{ |txin| txin[:tx_id] }
|
323
|
-
outputs = db[:txout].filter(:tx_id => txs_ids).order(:tx_idx).map.group_by{ |txout| txout[:tx_id] }
|
324
|
-
|
325
|
-
txs.map {|tx| wrap_tx(tx, block_ids[tx[:id]], inputs: inputs[tx[:id]], outputs: outputs[tx[:id]]) }
|
326
|
-
end
|
327
|
-
|
328
|
-
# get transaction by given +tx_id+
|
329
|
-
def get_tx_by_id(tx_id)
|
330
|
-
wrap_tx(@db[:tx][:id => tx_id])
|
331
|
-
end
|
332
|
-
|
333
|
-
# get corresponding Models::TxIn for the txout in transaction
|
334
|
-
# +tx_hash+ with index +txout_idx+
|
335
|
-
def get_txin_for_txout(tx_hash, txout_idx)
|
336
|
-
tx_hash = tx_hash.htb_reverse.blob
|
337
|
-
wrap_txin(@db[:txin][:prev_out => tx_hash, :prev_out_index => txout_idx])
|
338
|
-
end
|
339
|
-
|
340
|
-
# optimized version of Storage#get_txins_for_txouts
|
341
|
-
def get_txins_for_txouts(txouts)
|
342
|
-
@db[:txin].filter([:prev_out, :prev_out_index] => txouts.map{|tx_hash, tx_idx| [tx_hash.htb_reverse.blob, tx_idx]}).map{|i| wrap_txin(i)}
|
343
|
-
end
|
344
|
-
|
345
|
-
def get_txout_by_id(txout_id)
|
346
|
-
wrap_txout(@db[:txout][:id => txout_id])
|
347
|
-
end
|
348
|
-
|
349
|
-
# get corresponding Models::TxOut for +txin+
|
350
|
-
def get_txout_for_txin(txin)
|
351
|
-
tx = @db[:tx][:hash => txin.prev_out.reverse.blob]
|
352
|
-
return nil unless tx
|
353
|
-
wrap_txout(@db[:txout][:tx_idx => txin.prev_out_index, :tx_id => tx[:id]])
|
354
|
-
end
|
355
|
-
|
356
|
-
# get all Models::TxOut matching given +script+
|
357
|
-
def get_txouts_for_pk_script(script)
|
358
|
-
txouts = @db[:txout].filter(:pk_script => script.blob).order(:id)
|
359
|
-
txouts.map{|txout| wrap_txout(txout)}
|
360
|
-
end
|
361
|
-
|
362
|
-
# get all Models::TxOut matching given +hash160+
|
363
|
-
def get_txouts_for_hash160(hash160, type = :hash160, unconfirmed = false)
|
364
|
-
addr = @db[:addr][hash160: hash160, type: ADDRESS_TYPES.index(type)]
|
365
|
-
return [] unless addr
|
366
|
-
txouts = @db[:addr_txout].where(addr_id: addr[:id])
|
367
|
-
.map{|t| @db[:txout][id: t[:txout_id]] }
|
368
|
-
.map{|o| wrap_txout(o) }
|
369
|
-
unless unconfirmed
|
370
|
-
txouts.select!{|o| @db[:blk][id: o.get_tx.blk_id][:chain] == MAIN rescue false }
|
371
|
-
end
|
372
|
-
txouts
|
373
|
-
end
|
374
|
-
|
375
|
-
def get_txouts_for_name_hash(hash)
|
376
|
-
@db[:names].filter(hash: hash).map {|n| get_txout_by_id(n[:txout_id]) }
|
377
|
-
end
|
378
|
-
|
379
|
-
# get all unconfirmed Models::TxOut
|
380
|
-
def get_unconfirmed_tx
|
381
|
-
@db[:unconfirmed].map{|t| wrap_tx(t)}
|
382
|
-
end
|
383
|
-
|
384
|
-
# Grab the position of a tx in a given block
|
385
|
-
def get_idx_from_tx_hash(tx_hash)
|
386
|
-
tx = @db[:tx][:hash => tx_hash.htb.blob]
|
387
|
-
return nil unless tx
|
388
|
-
parent = @db[:blk_tx][:tx_id => tx[:id]]
|
389
|
-
return nil unless parent
|
390
|
-
return parent[:idx]
|
391
|
-
end
|
392
|
-
|
393
|
-
# wrap given +block+ into Models::Block
|
394
|
-
def wrap_block(block)
|
395
|
-
return nil unless block
|
396
|
-
|
397
|
-
data = {:id => block[:id], :depth => block[:depth], :chain => block[:chain], :work => block[:work].to_i, :hash => block[:hash].hth, :size => block[:blk_size]}
|
398
|
-
blk = Bitcoin::Storage::Models::Block.new(self, data)
|
399
|
-
|
400
|
-
blk.ver = block[:version]
|
401
|
-
blk.prev_block = block[:prev_hash].reverse
|
402
|
-
blk.mrkl_root = block[:mrkl_root].reverse
|
403
|
-
blk.time = block[:time].to_i
|
404
|
-
blk.bits = block[:bits]
|
405
|
-
blk.nonce = block[:nonce]
|
406
|
-
|
407
|
-
blk.aux_pow = Bitcoin::P::AuxPow.new(block[:aux_pow]) if block[:aux_pow]
|
408
|
-
|
409
|
-
blk_tx = db[:blk_tx].filter(blk_id: block[:id]).join(:tx, id: :tx_id).order(:idx)
|
410
|
-
|
411
|
-
# fetch inputs and outputs for all transactions in the block to avoid additional queries for each transaction
|
412
|
-
inputs = db[:txin].filter(:tx_id => blk_tx.map{ |tx| tx[:id] }).order(:tx_idx).map.group_by{ |txin| txin[:tx_id] }
|
413
|
-
outputs = db[:txout].filter(:tx_id => blk_tx.map{ |tx| tx[:id] }).order(:tx_idx).map.group_by{ |txout| txout[:tx_id] }
|
414
|
-
|
415
|
-
blk.tx = blk_tx.map { |tx| wrap_tx(tx, block[:id], inputs: inputs[tx[:id]], outputs: outputs[tx[:id]]) }
|
416
|
-
|
417
|
-
blk.hash = block[:hash].hth
|
418
|
-
blk
|
419
|
-
end
|
420
|
-
|
421
|
-
# wrap given +transaction+ into Models::Transaction
|
422
|
-
def wrap_tx(transaction, block_id = nil, prefetched = {})
|
423
|
-
return nil unless transaction
|
424
|
-
|
425
|
-
block_id ||= @db[:blk_tx].join(:blk, id: :blk_id)
|
426
|
-
.where(tx_id: transaction[:id], chain: 0).first[:blk_id] rescue nil
|
427
|
-
|
428
|
-
data = {id: transaction[:id], blk_id: block_id, size: transaction[:tx_size], idx: transaction[:idx]}
|
429
|
-
tx = Bitcoin::Storage::Models::Tx.new(self, data)
|
430
|
-
|
431
|
-
inputs = prefetched[:inputs] || db[:txin].filter(:tx_id => transaction[:id]).order(:tx_idx)
|
432
|
-
inputs.each { |i| tx.add_in(wrap_txin(i)) }
|
433
|
-
|
434
|
-
outputs = prefetched[:outputs] || db[:txout].filter(:tx_id => transaction[:id]).order(:tx_idx)
|
435
|
-
outputs.each { |o| tx.add_out(wrap_txout(o)) }
|
436
|
-
tx.ver = transaction[:version]
|
437
|
-
tx.lock_time = transaction[:lock_time]
|
438
|
-
tx.hash = transaction[:hash].hth
|
439
|
-
tx
|
440
|
-
end
|
441
|
-
|
442
|
-
# wrap given +input+ into Models::TxIn
|
443
|
-
def wrap_txin(input)
|
444
|
-
return nil unless input
|
445
|
-
data = { :id => input[:id], :tx_id => input[:tx_id], :tx_idx => input[:tx_idx],
|
446
|
-
:p2sh_type => input[:p2sh_type] ? SCRIPT_TYPES[input[:p2sh_type]] : nil }
|
447
|
-
txin = Bitcoin::Storage::Models::TxIn.new(self, data)
|
448
|
-
txin.prev_out = input[:prev_out]
|
449
|
-
txin.prev_out_index = input[:prev_out_index]
|
450
|
-
txin.script_sig_length = input[:script_sig].bytesize
|
451
|
-
txin.script_sig = input[:script_sig]
|
452
|
-
txin.sequence = [input[:sequence]].pack("V")
|
453
|
-
txin
|
454
|
-
end
|
455
|
-
|
456
|
-
# wrap given +output+ into Models::TxOut
|
457
|
-
def wrap_txout(output)
|
458
|
-
return nil unless output
|
459
|
-
data = {:id => output[:id], :tx_id => output[:tx_id], :tx_idx => output[:tx_idx],
|
460
|
-
:hash160 => output[:hash160], :type => SCRIPT_TYPES[output[:type]]}
|
461
|
-
txout = Bitcoin::Storage::Models::TxOut.new(self, data)
|
462
|
-
txout.value = output[:value]
|
463
|
-
txout.pk_script = output[:pk_script]
|
464
|
-
txout
|
465
|
-
end
|
466
|
-
|
467
|
-
# check data consistency of the top +count+ blocks. validates that
|
468
|
-
# - the block hash computed from the stored data is the same
|
469
|
-
# - the prev_hash is the same as the previous blocks' hash
|
470
|
-
# - the merkle root computed from all transactions is correct
|
471
|
-
def check_consistency count = 1000
|
472
|
-
return if get_depth < 1 || count <= 0
|
473
|
-
depth = get_depth
|
474
|
-
count = depth - 1 if count == -1
|
475
|
-
count = depth - 1 if count >= depth
|
476
|
-
log.info { "Checking consistency of last #{count} blocks..." }
|
477
|
-
prev_blk = get_block_by_depth(depth - count - 1)
|
478
|
-
(depth - count).upto(depth).each do |depth|
|
479
|
-
blk = get_block_by_depth(depth)
|
480
|
-
raise "Block hash #{blk.depth} invalid!" unless blk.hash == blk.recalc_block_hash
|
481
|
-
raise "Prev hash #{blk.depth} invalid!" unless blk.prev_block.reverse.hth == prev_blk.hash
|
482
|
-
raise "Merkle root #{blk.depth} invalid!" unless blk.verify_mrkl_root
|
483
|
-
print "#{blk.hash} #{blk.depth} OK\r"
|
484
|
-
prev_blk = blk
|
485
|
-
end
|
486
|
-
log.info { "Last #{count} blocks are consistent." }
|
487
|
-
end
|
488
|
-
|
489
|
-
# get total received of +address+ address
|
490
|
-
def get_received(address)
|
491
|
-
return 0 unless Bitcoin.valid_address?(address)
|
492
|
-
|
493
|
-
txouts = get_txouts_for_address(address)
|
494
|
-
return 0 unless txouts.any?
|
495
|
-
|
496
|
-
txouts.inject(0){ |m, out| m + out.value }
|
497
|
-
|
498
|
-
# total = 0
|
499
|
-
# txouts.each do |txout|
|
500
|
-
# tx = txout.get_tx
|
501
|
-
# total += txout.value
|
502
|
-
# end
|
503
|
-
end
|
504
|
-
|
505
|
-
protected
|
506
|
-
|
507
|
-
# Abstraction for doing many quick inserts.
|
508
|
-
#
|
509
|
-
# * +table+ - db table name
|
510
|
-
# * +data+ - a table of hashes with the same keys
|
511
|
-
# * +opts+
|
512
|
-
# ** return_ids - if true table of inserted rows ids will be returned
|
513
|
-
def fast_insert(table, data, opts={})
|
514
|
-
return [] if data.empty?
|
515
|
-
# For postgres we are using COPY which is much faster than separate INSERTs
|
516
|
-
if @db.adapter_scheme == :postgres
|
517
|
-
|
518
|
-
columns = data.first.keys
|
519
|
-
if opts[:return_ids]
|
520
|
-
ids = db.transaction do
|
521
|
-
# COPY does not return ids, so we set ids manually based on current sequence value
|
522
|
-
# We lock the table to avoid inserts that could happen in the middle of COPY
|
523
|
-
db.execute("LOCK TABLE #{table} IN SHARE UPDATE EXCLUSIVE MODE")
|
524
|
-
first_id = db.fetch("SELECT nextval('#{table}_id_seq') AS id").first[:id]
|
525
|
-
|
526
|
-
# Blobs need to be represented in the hex form (yes, we do hth on them earlier, could be improved
|
527
|
-
# \\x is the format of bytea as hex encoding in postgres
|
528
|
-
csv = data.map.with_index{|x,i| [first_id + i, columns.map{|c| x[c].kind_of?(Sequel::SQL::Blob) ? "\\x#{x[c].hth}" : x[c]}].join(',')}.join("\n")
|
529
|
-
db.copy_into(table, columns: [:id] + columns, format: :csv, data: csv)
|
530
|
-
last_id = first_id + data.size - 1
|
531
|
-
|
532
|
-
# Set sequence value to max id, last arg true means it will be incremented before next value
|
533
|
-
db.execute("SELECT setval('#{table}_id_seq', #{last_id}, true)")
|
534
|
-
(first_id..last_id).to_a # returned ids
|
535
|
-
end
|
536
|
-
else
|
537
|
-
csv = data.map{|x| columns.map{|c| x[c].kind_of?(Sequel::SQL::Blob) ? "\\x#{x[c].hth}" : x[c]}.join(',')}.join("\n")
|
538
|
-
@db.copy_into(table, format: :csv, columns: columns, data: csv)
|
539
|
-
end
|
540
|
-
|
541
|
-
else
|
542
|
-
|
543
|
-
# Life is simple when your are not optimizing ;)
|
544
|
-
@db[table].insert_multiple(data)
|
545
|
-
|
546
|
-
end
|
547
|
-
end
|
548
|
-
|
549
|
-
end
|
550
|
-
|
551
|
-
end
|