postspec 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +19 -0
- data/.rspec +3 -0
- data/.ruby-version +1 -0
- data/.travis.yml +6 -0
- data/Gemfile +5 -0
- data/README.md +36 -0
- data/Rakefile +6 -0
- data/TODO +6 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/exe/postspec +40 -0
- data/lib/postspec/config.rb +38 -0
- data/lib/postspec/environment.rb +491 -0
- data/lib/postspec/frame.rb +147 -0
- data/lib/postspec/render.rb +178 -0
- data/lib/postspec/state.rb +112 -0
- data/lib/postspec/version.rb +3 -0
- data/lib/postspec.rb +367 -0
- data/lib/postspec_helper.rb +128 -0
- data/lib/share/postspec_schema.sql +137 -0
- data/performance/performance_spec.rb +114 -0
- data/postspec.gemspec +41 -0
- data/snippets/sequences.sql +19 -0
- data/snippets/serials.sql +13 -0
- data/snippets/triggers.sql +14 -0
- metadata +237 -0
@@ -0,0 +1,147 @@
|
|
1
|
+
|
2
|
+
require 'yaml'
|
3
|
+
require 'digest'
|
4
|
+
|
5
|
+
module Postspec
|
6
|
+
class FrameStack
|
7
|
+
attr_reader :postspec
|
8
|
+
forward_to :postspec, :type
|
9
|
+
|
10
|
+
def initialize(postspec)
|
11
|
+
constrain postspec, Postspec
|
12
|
+
@postspec = postspec
|
13
|
+
@stack = []
|
14
|
+
end
|
15
|
+
|
16
|
+
forward_to :@stack, :empty?, :size, :each, :map
|
17
|
+
|
18
|
+
def push(frame) @stack.push frame; frame end
|
19
|
+
def pop() @stack.pop end
|
20
|
+
def top() @stack.last end
|
21
|
+
def dump()
|
22
|
+
puts self.class
|
23
|
+
indent { @stack.reverse.map(&:dump) }
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
# TODO: Add #stack and forward #postspec to it
|
28
|
+
class Frame
|
29
|
+
attr_reader :postspec
|
30
|
+
attr_reader :parent
|
31
|
+
attr_reader :search_path
|
32
|
+
def schema() @search_path.first end
|
33
|
+
|
34
|
+
attr_reader :push_sql
|
35
|
+
attr_reader :pop_sql
|
36
|
+
attr_reader :ids
|
37
|
+
attr_reader :anchors
|
38
|
+
|
39
|
+
def initialize(postspec, parent, search_path, push_sql, pop_sql, ids, fox_anchors)
|
40
|
+
constrain postspec, Postspec
|
41
|
+
constrain parent, NilClass, Frame
|
42
|
+
constrain search_path, [String], NilClass
|
43
|
+
constrain push_sql, String, [String], NilClass
|
44
|
+
constrain pop_sql, String, [String], NilClass
|
45
|
+
constrain ids, String => Integer
|
46
|
+
constrain fox_anchors, FixtureFox::Anchors, NilClass
|
47
|
+
@postspec = postspec
|
48
|
+
@parent = parent
|
49
|
+
@search_path = search_path || %w(public)
|
50
|
+
@push_sql = ["set search_path to #{@search_path.join(", ")}"] + Array(push_sql || [])
|
51
|
+
@pop_sql = Array(pop_sql || []) + (parent ? ["set search_path to #{parent.search_path.join(", ")}"] : [])
|
52
|
+
@ids = ids || {}
|
53
|
+
@fox_anchors = fox_anchors || FixtureFox::Anchors.new(@postspec.type)
|
54
|
+
anchors = @fox_anchors.values.map { |anchor| [anchor.name, anchor.id] }.to_h
|
55
|
+
@anchors = (parent ? parent.anchors.merge(anchors) : anchors)
|
56
|
+
end
|
57
|
+
|
58
|
+
# Returns true if this frame is associated with a transaction
|
59
|
+
def transaction?() true end
|
60
|
+
|
61
|
+
def dump()
|
62
|
+
puts self.class
|
63
|
+
indent {
|
64
|
+
puts "search_path: #{search_path}"
|
65
|
+
puts "push_sql:"
|
66
|
+
indent { puts push_sql }
|
67
|
+
puts "pop:sql:"
|
68
|
+
indent { puts pop_sql }
|
69
|
+
}
|
70
|
+
end
|
71
|
+
|
72
|
+
protected
|
73
|
+
attr_reader :fox_anchors
|
74
|
+
end
|
75
|
+
|
76
|
+
class NopFrame < Frame
|
77
|
+
def initialize(parent, search_path)
|
78
|
+
super(parent.postspec, parent, search_path, nil, nil, parent.ids, parent.fox_anchors)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
class FoxFrame < Frame
|
83
|
+
attr_reader :fox
|
84
|
+
attr_reader :data
|
85
|
+
alias_method :sql, :push_sql
|
86
|
+
|
87
|
+
# Note FoxFrame::new computes the fox object
|
88
|
+
def initialize(parent, search_path, fox, data, push_sql)
|
89
|
+
@fox = fox
|
90
|
+
@data = data
|
91
|
+
super(parent.postspec, parent, search_path, push_sql, nil, fox.ids, fox.anchors)
|
92
|
+
end
|
93
|
+
|
94
|
+
def self.new(parent, search_path, type, files)
|
95
|
+
fox_signature = self.fox_signature(files)
|
96
|
+
fox = @fox_pool[fox_signature] ||= FixtureFox::Fox.new(type, files, schema: search_path.last)
|
97
|
+
|
98
|
+
# The IDs for the data signature is the minimal set of external IDs used by the Fox object
|
99
|
+
ids = fox.tables.map { |table| (id = parent.ids[table.uid]) ? [table.uid, id] : nil }.compact.to_h
|
100
|
+
data_signature = self.data_signature(fox_signature, ids, fox.referenced_anchors)
|
101
|
+
data = @data_pool[data_signature] ||= fox.data(ids: parent.ids, anchors: parent.send(:fox_anchors))
|
102
|
+
push_sql = @sql_pool[data_signature] ||= data.to_sql(format: :exec, ids: parent.ids, delete: :none)
|
103
|
+
|
104
|
+
object = FoxFrame.allocate
|
105
|
+
object.send(:initialize, parent, search_path, fox, data, push_sql)
|
106
|
+
object
|
107
|
+
end
|
108
|
+
|
109
|
+
private
|
110
|
+
@fox_pool = {}
|
111
|
+
@data_pool = {}
|
112
|
+
@sql_pool = {}
|
113
|
+
|
114
|
+
def self.fox_signature(files)
|
115
|
+
sha256 = Digest::SHA256.new
|
116
|
+
files.sort.each { |source| sha256 << source }
|
117
|
+
sha256.base64digest
|
118
|
+
end
|
119
|
+
|
120
|
+
def self.data_signature(fox_signature, ids, anchors)
|
121
|
+
sha256 = Digest::SHA256.new
|
122
|
+
ids.sort_by(&:first).each { |key, value| sha256 << key << ":" << value.to_s << ";" }
|
123
|
+
anchors.sort_by(&:uid).each { |anchor| sha256 << anchor.uid << "->" << anchor.id.to_s << ";" }
|
124
|
+
fox_signature + ":" + sha256.base64digest
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
class SeedFrame < Frame
|
129
|
+
def transaction?() false end
|
130
|
+
|
131
|
+
def initialize(postspec, ids, anchors = nil)
|
132
|
+
constrain ids, String => Integer
|
133
|
+
constrain anchors, FixtureFox::Anchors, NilClass
|
134
|
+
super(postspec, nil, nil, [], [], ids, anchors)
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
class EmptyFrame < Frame
|
139
|
+
def transaction?() false end
|
140
|
+
|
141
|
+
def initialize(postspec)
|
142
|
+
push_sql = pop_sql = postspec.render.delete_tables(postspec.tables.map(&:uid))
|
143
|
+
super(postspec, nil, nil, push_sql, pop_sql, {}, nil)
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
@@ -0,0 +1,178 @@
|
|
1
|
+
|
2
|
+
module Postspec
|
3
|
+
class Render
|
4
|
+
attr_reader :postspec
|
5
|
+
forward_to :postspec, :conn
|
6
|
+
|
7
|
+
SEED_BUD_TRIGGER_NAME = "postspec_readonly_bud_trg"
|
8
|
+
SEED_BT_TRIGGER_NAME = "postspec_readonly_bt_trg"
|
9
|
+
|
10
|
+
def initialize(postspec)
|
11
|
+
constrain postspec, Postspec
|
12
|
+
@postspec = postspec
|
13
|
+
end
|
14
|
+
|
15
|
+
def truncate_tables(uids) ["truncate #{uids.join(', ')} cascade"] end
|
16
|
+
def delete_tables(uids) uids.map { |uid| "delete from #{uid}" } end # FIXME DUPLICATED
|
17
|
+
def reset_postspec_tables()
|
18
|
+
delete_tables %w(postspec.runs postspec.seeds postspec.inserts postspec.updates postspec.deletes)
|
19
|
+
end
|
20
|
+
|
21
|
+
def postspec_schema(state) raise NotYet end
|
22
|
+
|
23
|
+
def change_triggers(state)
|
24
|
+
constrain state, lambda { |state| [:create, :drop].include?(state) }
|
25
|
+
postspec.tables.map { |table|
|
26
|
+
%w(insert update delete).map { |event|
|
27
|
+
name = "register_#{event}_b#{event[0]}_trg"
|
28
|
+
exist = postspec.meta.exist?("#{table.uid}.#{name}()")
|
29
|
+
if state == :create && !exist
|
30
|
+
ref = (event == "insert" ? "new" : "old")
|
31
|
+
<<~EOS
|
32
|
+
create trigger #{name} before #{event} on #{table.uid}
|
33
|
+
for each row
|
34
|
+
execute function postspec.register_#{event}()
|
35
|
+
EOS
|
36
|
+
elsif state == :drop && exist
|
37
|
+
"drop trigger if exists #{name} on #{table.uid}"
|
38
|
+
else
|
39
|
+
nil
|
40
|
+
end
|
41
|
+
}.compact
|
42
|
+
}.flatten
|
43
|
+
end
|
44
|
+
|
45
|
+
def seed_triggers(state, uids = nil)
|
46
|
+
case state
|
47
|
+
when :create; create_seed_triggers(uids)
|
48
|
+
when :drop; drop_seed_triggers
|
49
|
+
else
|
50
|
+
raise ArgumentError
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def drop_seed_triggers
|
55
|
+
postspec.tables.map { |uid|
|
56
|
+
[SEED_BUD_TRIGGER_NAME, SEED_BT_TRIGGER_NAME].map { |trigger|
|
57
|
+
trigger_uid = "#{uid}.#{trigger}()"
|
58
|
+
postspec.meta.exist?(trigger_uid) ? "drop trigger #{trigger} on #{uid}" : nil
|
59
|
+
}.compact
|
60
|
+
}.flatten
|
61
|
+
end
|
62
|
+
|
63
|
+
# Create readonly seed triggers. Readonly triggers are used to raise an
|
64
|
+
# error when seed data are updated, deleted, or truncated. They all call
|
65
|
+
# the common postspec.readonly_failure() function that raises a Postgres
|
66
|
+
# exception
|
67
|
+
def create_seed_triggers(uids)
|
68
|
+
constrain uids, String => [Integer, NilClass]
|
69
|
+
result = []
|
70
|
+
uids.map { |uid, id|
|
71
|
+
bud_trigger = "#{uid}.#{SEED_BUD_TRIGGER_NAME}()"
|
72
|
+
bud_sql = <<~EOS1
|
73
|
+
create trigger #{SEED_BUD_TRIGGER_NAME}
|
74
|
+
before update or delete on #{uid}
|
75
|
+
for each row
|
76
|
+
when (old.id <= #{id})
|
77
|
+
execute function postspec.readonly_failure('#{uid}')
|
78
|
+
EOS1
|
79
|
+
bt_sql = <<~EOS2
|
80
|
+
create trigger postspec_readonly_trigger_bt
|
81
|
+
before truncate on #{uid}
|
82
|
+
execute function postspec.readonly_failure('#{uid}')
|
83
|
+
EOS2
|
84
|
+
[bud_sql, bt_sql, "insert into postspec.seeds (table_uid, record_id) values ('#{uid}', #{id})"]
|
85
|
+
}.flatten
|
86
|
+
end
|
87
|
+
|
88
|
+
def execution_unit(tables, sql)
|
89
|
+
return [] if sql.empty?
|
90
|
+
materialized_views =
|
91
|
+
tables.select { |uid| uid !~ /^postspec\./ }.map { |uid|
|
92
|
+
postspec.type.dot(uid).depending_materialized_views
|
93
|
+
}.flatten.map(&:uid).uniq
|
94
|
+
sql =
|
95
|
+
tables.map { |uid| "alter table #{uid} disable trigger all" } +
|
96
|
+
sql +
|
97
|
+
tables.map { |uid| "alter table #{uid} enable trigger all" } +
|
98
|
+
materialized_views.map { |uid| "refresh materialized view #{uid}" }
|
99
|
+
end
|
100
|
+
|
101
|
+
def delete_tables(arg)
|
102
|
+
constrain arg, Array, Hash
|
103
|
+
uids = arg.is_a?(Array) ? arg.map { |uid| [uid, 0] }.to_h : arg
|
104
|
+
sql =
|
105
|
+
uids.map { |uid, id| "delete from #{uid}" + (id > 0 ? " where id > #{id}" : "") } +
|
106
|
+
uids.select { |uid|
|
107
|
+
uid =~ /^postspec\./ ? true : !postspec.type.dot(uid).subtable?
|
108
|
+
}.map { |uid, id|
|
109
|
+
"alter table #{uid} alter column id restart" + (id > 0 ? " with #{id+1}" : "")
|
110
|
+
}
|
111
|
+
end
|
112
|
+
|
113
|
+
# FIXME: doesn't seem to be any improvement performance-wise
|
114
|
+
def delete_tables_new(arg)
|
115
|
+
constrain arg, Array, Hash
|
116
|
+
if arg.is_a?(Array)
|
117
|
+
delete_all = arg
|
118
|
+
delete_only = {}
|
119
|
+
else
|
120
|
+
delete_all = []
|
121
|
+
delete_only = {}
|
122
|
+
arg.each { |uid, id|
|
123
|
+
if id == 0
|
124
|
+
delete_all << uid
|
125
|
+
else
|
126
|
+
delete_only[uid] = id
|
127
|
+
end
|
128
|
+
}
|
129
|
+
end
|
130
|
+
table_alias_index = 0
|
131
|
+
if delete_all.empty?
|
132
|
+
delete_all_sql = []
|
133
|
+
else
|
134
|
+
delete_all_sql = [
|
135
|
+
"with " +
|
136
|
+
delete_all.map { |uid|
|
137
|
+
"t#{table_alias_index += 1} as (delete from #{uid} returning 1 as id)"
|
138
|
+
}.join(", ") +
|
139
|
+
" select " + (1...table_alias_index).map { |i| "t#{i}.id" }.join(", ") +
|
140
|
+
" from " + (1...table_alias_index).map { |i| "t#{i}" }.join(", ")
|
141
|
+
] +
|
142
|
+
delete_all.map { |uid| "alter table #{uid} alter column id restart" }
|
143
|
+
end
|
144
|
+
delete_only_sql =
|
145
|
+
delete_only.map { |uid, id| "delete from #{uid}" + (id > 0 ? " > #{id}" : "") } +
|
146
|
+
delete_only.map { |uid, id| "alter table #{uid} alter column id restart with #{id+1}" }
|
147
|
+
sql = delete_all_sql + delete_only_sql
|
148
|
+
# uids.map { |uid, id| "delete from #{uid}" + (id > 0 ? " > #{id}" : "") } +
|
149
|
+
# uids.map { |uid, id| "alter table #{uid} alter column id restart" }
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
|
155
|
+
|
156
|
+
|
157
|
+
|
158
|
+
|
159
|
+
|
160
|
+
|
161
|
+
|
162
|
+
|
163
|
+
|
164
|
+
|
165
|
+
|
166
|
+
|
167
|
+
|
168
|
+
|
169
|
+
|
170
|
+
|
171
|
+
|
172
|
+
|
173
|
+
|
174
|
+
|
175
|
+
|
176
|
+
|
177
|
+
|
178
|
+
|
@@ -0,0 +1,112 @@
|
|
1
|
+
module Postspec
|
2
|
+
class State
|
3
|
+
attr_reader :id
|
4
|
+
attr_reader :mode
|
5
|
+
attr_accessor :ready
|
6
|
+
attr_accessor :clean
|
7
|
+
attr_accessor :status
|
8
|
+
attr_reader :created_at
|
9
|
+
attr_reader :updated_at
|
10
|
+
|
11
|
+
def duration() @duraction ||= (1000 * (updated_at - created_at)).round(0) end
|
12
|
+
|
13
|
+
# Maps from table UID to sorted list of record IDs.
|
14
|
+
# TODO: Are they in use? Should they be used?? Used from postspec!
|
15
|
+
# FIXME: Move to Frame - maybe?
|
16
|
+
def inserted() get_multimap("inserts") end
|
17
|
+
def updated() get_multimap("updates") end
|
18
|
+
def deleted() get_multimap("deletes") end
|
19
|
+
|
20
|
+
# Map from table UID to max record ID for that table
|
21
|
+
def seeds() @seeds ||= get_map("seeds") end
|
22
|
+
|
23
|
+
def refresh() @inserted = @updated = @deleted = @seeds = nil end
|
24
|
+
|
25
|
+
def self.create(conn, mode)
|
26
|
+
id, created_at = conn.tuple <<~EOS
|
27
|
+
insert into postspec.runs (mode) values ('#{mode}') returning id, created_at
|
28
|
+
EOS
|
29
|
+
State.new(conn, id, mode, false, false, nil, created_at, nil)
|
30
|
+
end
|
31
|
+
|
32
|
+
# Return true if change-tables contains any records. It is used to check if
|
33
|
+
# the developer made any changes to the database after a (successful)
|
34
|
+
# postspec run
|
35
|
+
def self.dirty?(conn)
|
36
|
+
conn.value(%(
|
37
|
+
select true as present from postspec.inserts
|
38
|
+
union select true from postspec.updates
|
39
|
+
union select true from postspec.deletes
|
40
|
+
)) || false
|
41
|
+
end
|
42
|
+
|
43
|
+
def self.ensure(conn, mode)
|
44
|
+
end
|
45
|
+
|
46
|
+
def self.read(conn)
|
47
|
+
tuples = conn.tuples <<~EOS
|
48
|
+
select id, mode, ready, clean, status, created_at, updated_at
|
49
|
+
from postspec.runs
|
50
|
+
order by id desc
|
51
|
+
limit 1
|
52
|
+
EOS
|
53
|
+
tuple = tuples.first
|
54
|
+
tuple && State.new(conn, *tuple)
|
55
|
+
end
|
56
|
+
|
57
|
+
def self.write(conn, state)
|
58
|
+
conn.exec <<~EOS
|
59
|
+
update postspec.runs
|
60
|
+
set ready = #{state.ready},
|
61
|
+
clean = #{state.clean},
|
62
|
+
status = #{state.status.nil? ? 'null' : state.status},
|
63
|
+
updated_at = now() at time zone 'UTC'
|
64
|
+
where id = #{state.id}
|
65
|
+
EOS
|
66
|
+
@updated_at = conn.value "select updated_at from postspec.runs where id = #{state.id}"
|
67
|
+
end
|
68
|
+
|
69
|
+
def dump
|
70
|
+
puts "State"
|
71
|
+
indent {
|
72
|
+
puts "id: #{id.inspect}"
|
73
|
+
puts "mode: #{mode.inspect}"
|
74
|
+
puts "ready: #{ready.inspect}"
|
75
|
+
puts "clean: #{clean.inspect}"
|
76
|
+
puts "status: #{status.inspect}"
|
77
|
+
puts "duration: #{@duration.inspect}"
|
78
|
+
puts "created_at: #{created_at.inspect}"
|
79
|
+
}
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
attr_reader :conn
|
84
|
+
|
85
|
+
def initialize(conn, id, mode, ready, clean, status, created_at, updated_at)
|
86
|
+
@conn = conn
|
87
|
+
@id = id
|
88
|
+
@mode = mode.to_sym
|
89
|
+
@ready = ready
|
90
|
+
@clean = clean
|
91
|
+
@status = status
|
92
|
+
@created_at = created_at
|
93
|
+
@updated_at = updated_at
|
94
|
+
end
|
95
|
+
|
96
|
+
def get_map(table_name) conn.map "select distinct table_uid, record_id from postspec.#{table_name}" end
|
97
|
+
|
98
|
+
def get_multimap(table_name)
|
99
|
+
h = Hash.new #([])
|
100
|
+
conn.tuples(%(
|
101
|
+
select distinct table_uid,
|
102
|
+
record_id
|
103
|
+
from postspec.#{table_name}
|
104
|
+
order by
|
105
|
+
table_uid, record_id
|
106
|
+
)).map { |uid, id|
|
107
|
+
(h[uid] ||= []) << id
|
108
|
+
}
|
109
|
+
h
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end
|