pg_graph 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +18 -0
- data/.rspec +3 -0
- data/.ruby-version +1 -0
- data/.travis.yml +6 -0
- data/Gemfile +7 -0
- data/README.md +36 -0
- data/Rakefile +6 -0
- data/TODO +8 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/doc/diagram.drawio +1 -0
- data/exe/pg_graph +152 -0
- data/lib/data/association.rb +98 -0
- data/lib/data/data.rb +551 -0
- data/lib/data/dimension.rb +51 -0
- data/lib/data/read.rb +44 -0
- data/lib/data/render.rb +237 -0
- data/lib/data/value.rb +96 -0
- data/lib/ext/meta.rb +56 -0
- data/lib/ext/module.rb +18 -0
- data/lib/pg_graph/inflector.rb +105 -0
- data/lib/pg_graph/reflector.rb +187 -0
- data/lib/pg_graph/timer.rb +119 -0
- data/lib/pg_graph/version.rb +3 -0
- data/lib/pg_graph.rb +124 -0
- data/lib/type/dump_type.rb +69 -0
- data/lib/type/read.rb +269 -0
- data/lib/type/type.rb +617 -0
- data/pg_graph.gemspec +40 -0
- data/snippets/1-1.sql +19 -0
- data/snippets/N-M.sql +24 -0
- data/snippets/dag.sql +19 -0
- data/snippets/db.sql +52 -0
- data/snippets/kind.sql +19 -0
- data/snippets/recur.sql +14 -0
- metadata +205 -0
@@ -0,0 +1,119 @@
|
|
1
|
+
|
2
|
+
module Timer
|
3
|
+
class TimerNode
|
4
|
+
attr_reader :timer
|
5
|
+
attr_reader :label
|
6
|
+
|
7
|
+
def initialize(timer, label)
|
8
|
+
@timer, @label = timer, label
|
9
|
+
end
|
10
|
+
|
11
|
+
def label_width() label&.size || 0 end
|
12
|
+
def value_width() raise NotThis end
|
13
|
+
|
14
|
+
def total() raise NotThis end
|
15
|
+
|
16
|
+
protected
|
17
|
+
def format(time) sprintf "%.#{timer.scale}f", timer.factor * time end
|
18
|
+
end
|
19
|
+
|
20
|
+
class TimerElement < TimerNode
|
21
|
+
attr_accessor :time
|
22
|
+
|
23
|
+
def initialize(timer, label, time)
|
24
|
+
super(timer, label)
|
25
|
+
@time = time
|
26
|
+
end
|
27
|
+
|
28
|
+
def value_width() value.size end
|
29
|
+
def value() @value ||= format time end
|
30
|
+
|
31
|
+
def total() @time end
|
32
|
+
|
33
|
+
protected
|
34
|
+
def dump_element(file, indent, label_width, value_width)
|
35
|
+
file.printf "#{indent}%-#{label_width}s: %#{value_width}s#{timer.unit}\n", label, value
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
class TimerGroup < TimerNode
|
40
|
+
attr_accessor :unit # Currently only :s, or :ms. Default is :ms
|
41
|
+
def factor() { s: 1, ms: 1000 }[unit] end
|
42
|
+
attr_accessor :scale # Number of digits after the decimal point
|
43
|
+
attr_reader :nodes # Array of tuples of label and time or TimerGroup objects
|
44
|
+
|
45
|
+
def initialize(timer, label = nil, unit: :ms, scale: 2)
|
46
|
+
super(timer, label)
|
47
|
+
@unit = unit.to_sym
|
48
|
+
@scale = scale
|
49
|
+
@nodes = []
|
50
|
+
@nodes_by_label = {}
|
51
|
+
end
|
52
|
+
|
53
|
+
def time(label, &block)
|
54
|
+
t0 = Time.now
|
55
|
+
r = yield
|
56
|
+
dt = Time.now - t0
|
57
|
+
if @nodes_by_label.key?(label)
|
58
|
+
@nodes_by_label[label].time += dt
|
59
|
+
else
|
60
|
+
element = TimerElement.new(self, label, dt)
|
61
|
+
@nodes << element
|
62
|
+
@nodes_by_label[label] = element
|
63
|
+
end
|
64
|
+
r
|
65
|
+
end
|
66
|
+
|
67
|
+
def group(label, **opts)
|
68
|
+
r = TimerGroup.new(self, label, **{ unit: unit, scale: scale }.merge(opts))
|
69
|
+
@nodes << r
|
70
|
+
r
|
71
|
+
end
|
72
|
+
|
73
|
+
def total()
|
74
|
+
@nodes.inject(0) { |a,e| a + e.total }
|
75
|
+
end
|
76
|
+
|
77
|
+
def dump(file = $stdout, indent = "")
|
78
|
+
dump_element(file, indent, label_width, value_width)
|
79
|
+
end
|
80
|
+
|
81
|
+
protected
|
82
|
+
def label_width()
|
83
|
+
if label
|
84
|
+
([label.size - 2] + @nodes.map { |node| node.send(:label_width) }).max + 2
|
85
|
+
else
|
86
|
+
@nodes.map { |node| node.send :label_width }.max
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
def value_width() @value_width ||= @nodes.map { |node| node.send :value_width }.max end
|
91
|
+
|
92
|
+
def dump_element(file, indent, label_width, value_width)
|
93
|
+
if label
|
94
|
+
file.puts "#{indent}#{label} (#{format(total)}#{timer.unit})"
|
95
|
+
nodes.each { |node|
|
96
|
+
node.send :dump_element, file, indent + " ", label_width - 2, value_width
|
97
|
+
}
|
98
|
+
else
|
99
|
+
nodes.each { |node|
|
100
|
+
node.send :dump_element, file, indent, label_width, value_width
|
101
|
+
}
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
class Timer < TimerGroup
|
107
|
+
def initialize(label = nil, **opts)
|
108
|
+
super(self, label, **opts)
|
109
|
+
end
|
110
|
+
|
111
|
+
def dump(file = $stdout)
|
112
|
+
super
|
113
|
+
if @nodes.size > 1
|
114
|
+
file.printf "%-#{label_width}s: %#{value_width}s#{timer.unit}\n", "Total", format(total)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
data/lib/pg_graph.rb
ADDED
@@ -0,0 +1,124 @@
|
|
1
|
+
|
2
|
+
require "pg_graph/version"
|
3
|
+
|
4
|
+
require "boolean"
|
5
|
+
require "constrain"
|
6
|
+
#def constrain(*args) end
|
7
|
+
require "developer_exceptions"
|
8
|
+
|
9
|
+
require "hash_tree"
|
10
|
+
|
11
|
+
require "ext/meta.rb"
|
12
|
+
require "ext/module.rb"
|
13
|
+
|
14
|
+
require "pg_graph/version.rb"
|
15
|
+
require "pg_graph/inflector.rb"
|
16
|
+
require "pg_graph/reflector.rb"
|
17
|
+
|
18
|
+
require "type/type.rb"
|
19
|
+
require "type/read.rb"
|
20
|
+
require "type/dump_type.rb"
|
21
|
+
|
22
|
+
require "data/data.rb"
|
23
|
+
require "data/association.rb"
|
24
|
+
require "data/value.rb"
|
25
|
+
require "data/dimension.rb"
|
26
|
+
require "data/read.rb"
|
27
|
+
require "data/render.rb"
|
28
|
+
|
29
|
+
module PgGraph
|
30
|
+
include DeveloperExceptions
|
31
|
+
|
32
|
+
class Error < StandardError; end
|
33
|
+
|
34
|
+
# The supported ruby classes
|
35
|
+
RUBY_CLASSES = Inflector::SUPPORTED_RUBY_CLASSES
|
36
|
+
|
37
|
+
module Type
|
38
|
+
# :call-seq:
|
39
|
+
# new(meta, reflect = nil)
|
40
|
+
# new(pg_conn, reflect = nil)
|
41
|
+
#
|
42
|
+
# The +reflect+ argument can be a Reflector object, an yaml array, a file
|
43
|
+
# name or nil. The +ignore+ option is a list of schema names to exclude
|
44
|
+
# from the type system
|
45
|
+
#
|
46
|
+
# Note that together with ::=== and Database#is_a? this makes
|
47
|
+
# Type::Database pretend it is an instance of the Type module
|
48
|
+
#
|
49
|
+
def self.new(arg, reflect = nil, ignore: [])
|
50
|
+
constrain arg, PgMeta, PgConn
|
51
|
+
constrain reflect, Reflector, Array, String, NilClass
|
52
|
+
meta =
|
53
|
+
case arg
|
54
|
+
when PgMeta; arg
|
55
|
+
when PgConn; PgMeta.new(arg)
|
56
|
+
end
|
57
|
+
reflector =
|
58
|
+
case reflect
|
59
|
+
when Reflector; reflect
|
60
|
+
when Array; Reflector.load_yaml(reflect)
|
61
|
+
when String; Reflector.load_file(reflect)
|
62
|
+
when NilClass; Reflector.new
|
63
|
+
end
|
64
|
+
Database.new(meta.name, reflector).read(meta, ignore: ignore)
|
65
|
+
end
|
66
|
+
|
67
|
+
# Make the Type module pretend to have Database object instances
|
68
|
+
def self.===(element) element.is_a?(PgGraph::Type::Connection) or super end
|
69
|
+
|
70
|
+
class Database
|
71
|
+
# :call-seq:
|
72
|
+
# instantiate()
|
73
|
+
# instantiate(hash)
|
74
|
+
# instantiate(yaml)
|
75
|
+
# instantiate(pg_conn)
|
76
|
+
#
|
77
|
+
def instantiate(arg = nil)
|
78
|
+
constrain arg, NilClass, Hash, PgConn
|
79
|
+
Data.new(self, arg)
|
80
|
+
end
|
81
|
+
|
82
|
+
# Let Type::Database objects pretend to be-a module Type object
|
83
|
+
def is_a?(arg) arg == PgGraph::Type || super end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
module Data
|
88
|
+
# :call-seq:
|
89
|
+
# new(type, hash = {})
|
90
|
+
# new(type, yaml = {})
|
91
|
+
# new(type, pg_conn = nil)
|
92
|
+
#
|
93
|
+
# Note that together with ::=== and Data::Database#is_a? this makes
|
94
|
+
# Data::Database pretend it is an instance of the Data module
|
95
|
+
def self.new(type, arg)
|
96
|
+
constrain type, PgGraph::Type
|
97
|
+
constrain arg, Hash, PgConn, NilClass
|
98
|
+
Database.new(type, arg)
|
99
|
+
end
|
100
|
+
|
101
|
+
# Make the Data module pretend to have Database object instances
|
102
|
+
def self.===(element) element.is_a?(PgGraph::Data::Connection) or super end
|
103
|
+
|
104
|
+
class Database
|
105
|
+
# Let Data::Database objects pretend to be-a module Type object
|
106
|
+
def is_a?(arg) arg == PgGraph::Data || super end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
# Non-public namespace
|
111
|
+
module PrivatePgGraph
|
112
|
+
# Note that this changes the +args+ argument
|
113
|
+
def self.extract_reflections(args)
|
114
|
+
if args.last.is_a?(Hash)
|
115
|
+
reflections = args.last.delete(:reflections) || []
|
116
|
+
args.pop if args.last.empty? && !reflections.empty?
|
117
|
+
reflections
|
118
|
+
else
|
119
|
+
[]
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'indented_io'
|
2
|
+
|
3
|
+
module PgGraph::Type
|
4
|
+
class Node
|
5
|
+
def dump(children = self.children.values.sort_by(&:name), link_info: false, supertable: nil)
|
6
|
+
print identifier + (supertable ? " < #{supertable}" : "")
|
7
|
+
puts
|
8
|
+
indent { children.each { |child| child.dump(link_info: link_info) } }
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
class Schema
|
13
|
+
def dump(link_info: false)
|
14
|
+
super(record_types.sort_by(&:name), link_info: link_info)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
class RecordType
|
19
|
+
def dump(**opts)
|
20
|
+
supertable = table.supertable&.identifier
|
21
|
+
columns = fields.reject { |column|
|
22
|
+
column.primary_key? || column.name =~ /_id$/ || column.name =~ /_kind$/ # FIXME
|
23
|
+
}
|
24
|
+
super(columns, supertable: supertable, **opts)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
class Column
|
29
|
+
def dump(link_info: false)
|
30
|
+
print "#{identifier}: "
|
31
|
+
method = type.schema == record_type.schema ? :identifier : :schema_identifier
|
32
|
+
if self.is_a?(MmTableColumn) && !self.is_a?(NmTableColumn)
|
33
|
+
type_identifier = "{[#{type.element_type.send(method)}]}"
|
34
|
+
else
|
35
|
+
type_identifier = type.send(method)
|
36
|
+
end
|
37
|
+
print type_identifier
|
38
|
+
print "()" if generated?
|
39
|
+
print "[#{that_link_column}]" if kind?
|
40
|
+
print "?" if nullable?
|
41
|
+
print "!" if unique?
|
42
|
+
dump_type if link_info
|
43
|
+
puts
|
44
|
+
end
|
45
|
+
|
46
|
+
def dump_type() end
|
47
|
+
end
|
48
|
+
|
49
|
+
class RecordColumn
|
50
|
+
def dump_type
|
51
|
+
print " (#{this_link_column} -> #{type.table.name}.#{that_link_column}) (RecordColumn)"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
class TableColumn
|
56
|
+
def dump_type
|
57
|
+
print " (#{this_link_column} -> #{type.table.name}.#{that_link_column}) (TableColumn)"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
class MmTableColumn
|
62
|
+
def dump_type
|
63
|
+
print \
|
64
|
+
" (#{this_link_column} -> #{mm_table.name}.#{this_mm_column}," +
|
65
|
+
" #{mm_table.name}.#{that_mm_column} -> #{that_table.name}.#{that_link_column}) (MmTableColumn)"
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
data/lib/type/read.rb
ADDED
@@ -0,0 +1,269 @@
|
|
1
|
+
module PgGraph::Type
|
2
|
+
class Error < StandardError; end
|
3
|
+
class KeyNotFound < Error; end
|
4
|
+
|
5
|
+
class Database
|
6
|
+
def read_meta(meta, ignore: [])
|
7
|
+
# Array of [record, meta_column] tuples
|
8
|
+
field_columns = []
|
9
|
+
link_columns = []
|
10
|
+
kind_columns = [] # The link side
|
11
|
+
id_link_columns = [] # sub tables
|
12
|
+
|
13
|
+
# Temporary arrays of meta tables
|
14
|
+
tables = [] # Ordinary tables
|
15
|
+
mm_tables = [] # M:M link tables
|
16
|
+
|
17
|
+
# Create schemas and tables and initialize lists of objects
|
18
|
+
meta.schemas.values.each { |meta_schema|
|
19
|
+
next if ignore.include?(meta_schema.name)
|
20
|
+
schema = Schema.new(self, meta_schema.name)
|
21
|
+
meta_schema.tables.values.select { |t| t.table? }.each { |meta_table| # FIXME Ignore views for now
|
22
|
+
(meta_table.mm_table? ? mm_tables : tables) << meta_table
|
23
|
+
table = Table.new(
|
24
|
+
schema, meta_table.name,
|
25
|
+
mm_table: meta_table.mm_table?,
|
26
|
+
depending_materialized_views: meta_table.depending_views.select(&:materialized?))
|
27
|
+
record_type = RecordType.new(table)
|
28
|
+
|
29
|
+
# Process columns
|
30
|
+
array_columns = []
|
31
|
+
meta_table.columns.values.each { |meta_column|
|
32
|
+
# Create basic types needed by columns
|
33
|
+
type_name = meta_column.type
|
34
|
+
if !schema.key?(type_name) && !catalog.key?(type_name)
|
35
|
+
if meta_column.array?
|
36
|
+
element_name = meta_column.element_type
|
37
|
+
dimensions = meta_column.dimensions
|
38
|
+
if !schema.key?(element_name) && !catalog.key?(element_name)
|
39
|
+
element_type = SimpleType.new(catalog, element_name)
|
40
|
+
else
|
41
|
+
element_type = schema[element_name] || catalog[element_name]
|
42
|
+
end
|
43
|
+
ArrayType.new(catalog, type_name, element_type, dimensions)
|
44
|
+
else
|
45
|
+
SimpleType.new(catalog, type_name)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
# Collect columns
|
50
|
+
if meta_column.reference?
|
51
|
+
if meta_column.name == "id"
|
52
|
+
id_link_columns
|
53
|
+
elsif meta_column.name =~ /^(?:.*_)?kind$/
|
54
|
+
kind_columns
|
55
|
+
else
|
56
|
+
link_columns
|
57
|
+
end
|
58
|
+
else
|
59
|
+
field_columns
|
60
|
+
end << [record_type, meta_column]
|
61
|
+
}
|
62
|
+
}
|
63
|
+
}
|
64
|
+
|
65
|
+
# Build list of depending tables
|
66
|
+
(tables + mm_tables).each { |meta_table|
|
67
|
+
table = dot(meta_table.path)
|
68
|
+
meta_table.depending_tables.each { |meta_depending_table|
|
69
|
+
depending_table = dot(meta_depending_table.path)
|
70
|
+
table.depending_tables << depending_table
|
71
|
+
# puts "#{table.uid} -> #{depending_table.uid}"
|
72
|
+
}
|
73
|
+
|
74
|
+
}
|
75
|
+
|
76
|
+
# Create postgres columns except kind_columns
|
77
|
+
(id_link_columns + link_columns + field_columns).each { |record_type, meta_column|
|
78
|
+
next if meta_column.kind?
|
79
|
+
type = record_type.schema[meta_column.type] || catalog[meta_column.type]
|
80
|
+
SimpleColumn.new(
|
81
|
+
record_type, meta_column.name, meta_column.name, type,
|
82
|
+
ordinal: meta_column.ordinal,
|
83
|
+
**column_options(meta_column))
|
84
|
+
}
|
85
|
+
|
86
|
+
|
87
|
+
# Create and collect forward-references. link_fields is a list of [uid, record_column] tuples
|
88
|
+
link_fields = []
|
89
|
+
(link_columns + kind_columns).each { |record_type, meta_column|
|
90
|
+
meta_column.references.each { |constraint|
|
91
|
+
constraint.referencing_columns.size == 1 or raise Error, "Can't handle multi-column keys (for now)"
|
92
|
+
type = dot(constraint.referenced_table.path).type.record_type
|
93
|
+
this_link_column = constraint.referencing_columns.first.name
|
94
|
+
that_link_column = constraint.referenced_columns.first.name
|
95
|
+
|
96
|
+
field =
|
97
|
+
if meta_column.kind?
|
98
|
+
name = reflector.this(meta_column.uid) || meta_column.name
|
99
|
+
name = meta_column.name if record_type[name] # Check for duplicates
|
100
|
+
!record_type.nil? or raise Error, "Duplicate column name: #{name}" # Check again
|
101
|
+
column_type = record_type.schema[meta_column.type] || catalog[meta_column.type]
|
102
|
+
|
103
|
+
parent = (name != meta_column.name ? record_type : nil)
|
104
|
+
kind_column = SimpleColumn.new(
|
105
|
+
parent, meta_column.name, meta_column.name, column_type,
|
106
|
+
ordinal: meta_column.ordinal,
|
107
|
+
**column_options(meta_column))
|
108
|
+
|
109
|
+
KindRecordColumn.new(
|
110
|
+
record_type, name, meta_column.name, type, this_link_column, that_link_column,
|
111
|
+
kind_column, **column_options(meta_column))
|
112
|
+
else
|
113
|
+
name = reflector.this(meta_column.uid)
|
114
|
+
RecordColumn.new(
|
115
|
+
record_type, name, meta_column.name, type, this_link_column, that_link_column,
|
116
|
+
**column_options(meta_column))
|
117
|
+
end
|
118
|
+
link_fields << [meta_column.uid, field]
|
119
|
+
}
|
120
|
+
}
|
121
|
+
|
122
|
+
# Detect derived tables
|
123
|
+
# link_fields.each { |uid, record_column|
|
124
|
+
# if record_column.this_link_column.primary_key? && that_link_column.primary_key?
|
125
|
+
|
126
|
+
# Create back-reference fields
|
127
|
+
(link_fields).each { |uid, this_column|
|
128
|
+
this_record_type = this_column.record_type
|
129
|
+
this_table = this_record_type.table
|
130
|
+
that_record_type = this_column.type
|
131
|
+
next if this_table.mm_table?
|
132
|
+
|
133
|
+
# Field name of back-reference
|
134
|
+
if this_column.primary_key? # child record
|
135
|
+
this_column.postgres_column == "id" or raise Error, "Primary keys should be named 'id'"
|
136
|
+
name = this_record_type.name
|
137
|
+
else
|
138
|
+
name = reflector.that(uid, this_column.unique?, table: this_record_type.name)
|
139
|
+
name ||= PgGraph.inflector.pluralize(this_column.table.name) if this_column.kind?
|
140
|
+
end
|
141
|
+
|
142
|
+
next if name.nil?
|
143
|
+
|
144
|
+
# Check for name collisions
|
145
|
+
if that_record_type.key?(name)
|
146
|
+
# Check if this is a 1:1 relation with keys on both sides. In that
|
147
|
+
# case, the back-reference is already created
|
148
|
+
that_column = that_record_type[name]
|
149
|
+
if that_column.is_a?(RecordColumn) && that_column.type == this_record_type
|
150
|
+
next # Do nothing
|
151
|
+
|
152
|
+
# Check if the reference spans across schemes so we can disambiguate
|
153
|
+
# by prefixing the schema name
|
154
|
+
elsif this_column.table.schema != that_column.table.schema
|
155
|
+
name = "#{this_column.table.schema.name}_#{name}"
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
# Final check for name collisions
|
160
|
+
!that_record_type.key?(name) or
|
161
|
+
raise Error, "Name collision in reverse map for #{this_column.uid}, trying #{name}"
|
162
|
+
|
163
|
+
# Create back-references
|
164
|
+
if this_column.unique?
|
165
|
+
RecordColumn.new(
|
166
|
+
that_record_type, name, nil, this_record_type,
|
167
|
+
this_column.that_link_column, this_column.this_link_column)
|
168
|
+
else
|
169
|
+
TableColumn.new(that_record_type, name, this_table.type,
|
170
|
+
this_column.that_link_column, this_column.this_link_column)
|
171
|
+
end
|
172
|
+
}
|
173
|
+
|
174
|
+
# Setup super/subtables
|
175
|
+
id_link_columns.each { |this_record_type, meta_column|
|
176
|
+
# Create forward and backward references for id links
|
177
|
+
constraint = meta_column.references.first
|
178
|
+
that_record_type = dot(constraint.referenced_table.path).type.record_type
|
179
|
+
SuperRecordColumn.new(this_record_type, that_record_type)
|
180
|
+
SubRecordColumn.new(that_record_type, this_record_type)
|
181
|
+
|
182
|
+
# Create hierarchy
|
183
|
+
this_table = this_record_type.table
|
184
|
+
that_table = that_record_type.table
|
185
|
+
this_table.instance_variable_set(:@supertable, that_table)
|
186
|
+
that_table.instance_variable_set(:@has_subtables, true)
|
187
|
+
}
|
188
|
+
|
189
|
+
# Create N:M or M:M reference fields
|
190
|
+
mm_tables.each { |link_meta_table|
|
191
|
+
constraint1 = link_meta_table.referential_constraints.values[0]
|
192
|
+
constraint2 = link_meta_table.referential_constraints.values[1]
|
193
|
+
table1 = dot(constraint1.referenced_table.path)
|
194
|
+
table2 = dot(constraint2.referenced_table.path)
|
195
|
+
mm_table = dot(link_meta_table.path)
|
196
|
+
|
197
|
+
link_column1 = constraint1.referenced_columns.first.name
|
198
|
+
mm_column1 = constraint1.referencing_columns.first.name
|
199
|
+
mm_column1_uid = constraint1.referencing_columns.first.uid
|
200
|
+
|
201
|
+
link_column2 = constraint2.referenced_columns.first.name
|
202
|
+
mm_column2 = constraint2.referencing_columns.first.name
|
203
|
+
mm_column2_uid = constraint2.referencing_columns.first.uid
|
204
|
+
|
205
|
+
column1_name = reflector.that(mm_column1_uid, false, table: table2.record_type.name)
|
206
|
+
column2_name = reflector.that(mm_column2_uid, false, table: table1.record_type.name)
|
207
|
+
|
208
|
+
# FIXME: DAGs over an super table creates problems if reflections
|
209
|
+
# doesn't match (eg. role_id/group_id instead of
|
210
|
+
# child_group_id/parent_group_id)
|
211
|
+
|
212
|
+
# Detect DAGs. This check is performed after column names have been
|
213
|
+
# computed to allow for user defined reflections
|
214
|
+
# if column1_name == column2_name && table1 == table2
|
215
|
+
# column1_name = reflector.that(mm_column1_uid, false)
|
216
|
+
# column2_name = reflector.that(mm_column2_uid, false)
|
217
|
+
# puts "BINGO"
|
218
|
+
# puts column1_name
|
219
|
+
# puts column2_name
|
220
|
+
# exit 1
|
221
|
+
# else
|
222
|
+
# end
|
223
|
+
|
224
|
+
|
225
|
+
# puts "mm_tables.each"
|
226
|
+
# indent {
|
227
|
+
# puts "mm_column1_uid: #{mm_column1_uid}"
|
228
|
+
# puts "mm_column2_uid: #{mm_column2_uid}"
|
229
|
+
# puts "table1: #{table1}"
|
230
|
+
# puts "column1_name: #{column1_name}"
|
231
|
+
# puts "table2: #{table2}"
|
232
|
+
# puts "column2_name: #{column2_name}"
|
233
|
+
# }
|
234
|
+
|
235
|
+
if table1.type.record_type.key?(column1_name)
|
236
|
+
raise Error, "Duplicate column name in #{table1.identifier}: #{column1_name}"
|
237
|
+
end
|
238
|
+
|
239
|
+
if table2.type.record_type.key?(column2_name)
|
240
|
+
raise Error, "Duplicate column name in #{table2.identifier}: #{column2_name}"
|
241
|
+
end
|
242
|
+
|
243
|
+
klass = link_meta_table.nm_table? ? NmTableColumn : MmTableColumn
|
244
|
+
klass.new(
|
245
|
+
table1.type.record_type, column1_name, table2.type, mm_table.type,
|
246
|
+
link_column1, mm_column1, mm_column2, link_column2)
|
247
|
+
klass.new(
|
248
|
+
table2.type.record_type, column2_name, table1.type, mm_table.type,
|
249
|
+
link_column2, mm_column2, mm_column1, link_column1)
|
250
|
+
}
|
251
|
+
|
252
|
+
self
|
253
|
+
end
|
254
|
+
|
255
|
+
private
|
256
|
+
# Create a options Hash for ColumnField objects
|
257
|
+
def column_options(meta_column)
|
258
|
+
{
|
259
|
+
primary_key: meta_column.primary_key?,
|
260
|
+
identity: meta_column.identity?,
|
261
|
+
nullable: meta_column.nullable?,
|
262
|
+
unique: meta_column.unique?,
|
263
|
+
readonly: !meta_column.updatable? || meta_column.generated?,
|
264
|
+
generated: meta_column.generated?
|
265
|
+
}
|
266
|
+
end
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|