tinkit 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. data/LICENSE +176 -0
  2. data/README +11 -0
  3. data/Rakefile +75 -0
  4. data/lib/glue_envs/couchrest/couchrest_attachment_handler.rb +260 -0
  5. data/lib/glue_envs/couchrest/couchrest_files_mgr.rb +198 -0
  6. data/lib/glue_envs/couchrest_glue_env.rb +536 -0
  7. data/lib/glue_envs/files_mgr_base.rb +51 -0
  8. data/lib/glue_envs/filesystem/filesystem_files_mgr.rb +187 -0
  9. data/lib/glue_envs/filesystem_glue_env.rb +395 -0
  10. data/lib/glue_envs/mysql/mysql_files_mgr.rb +175 -0
  11. data/lib/glue_envs/mysql_glue_env.rb +428 -0
  12. data/lib/glue_envs/sdb_s3/sdb_s3_files_mgr.rb +314 -0
  13. data/lib/glue_envs/sdb_s3_glue_env.rb +248 -0
  14. data/lib/helpers/camel.rb +21 -0
  15. data/lib/helpers/filesystem_helpers.rb +27 -0
  16. data/lib/helpers/hash_helpers.rb +74 -0
  17. data/lib/helpers/log_helper.rb +34 -0
  18. data/lib/helpers/mime_types_new.rb +126 -0
  19. data/lib/helpers/old_more_open_struct.rb +28 -0
  20. data/lib/helpers/require_helper.rb +45 -0
  21. data/lib/helpers/tk_escape.rb +17 -0
  22. data/lib/midas/bufs_data_structure.rb +84 -0
  23. data/lib/midas/node_element_operations.rb +264 -0
  24. data/lib/tinkit.rb +38 -0
  25. data/lib/tinkit_base_node.rb +733 -0
  26. data/lib/tinkit_node_factory.rb +47 -0
  27. data/spec/couchrest_files_mgr_spec.rb +551 -0
  28. data/spec/couchrest_glue_spec.rb +246 -0
  29. data/spec/filesystem_files_mgr_spec.rb +236 -0
  30. data/spec/filesystem_glue_spec.rb +243 -0
  31. data/spec/filesystem_helpers_spec.rb +42 -0
  32. data/spec/helpers/bufs_node_builder.rb +17 -0
  33. data/spec/helpers/bufs_sample_dataset.rb +160 -0
  34. data/spec/helpers/bufs_test_environments.rb +81 -0
  35. data/spec/helpers/tmp_view_cleaner.rb +15 -0
  36. data/spec/lib_helpers/tk_escape_spec.rb +45 -0
  37. data/spec/mysql_files_mgr_spec.rb +250 -0
  38. data/spec/mysql_glue_spec.rb +214 -0
  39. data/spec/node_element_operations_spec.rb +392 -0
  40. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec1.rb +82 -0
  41. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec2.rb +68 -0
  42. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec3.rb +80 -0
  43. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec4.rb +110 -0
  44. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec5.rb +84 -0
  45. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec6.rb +83 -0
  46. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec7.rb +101 -0
  47. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec8.rb +92 -0
  48. data/spec/sdb_s3_files_mgr_spec/sdb_s3_files_mgr_spec_all.rb +266 -0
  49. data/spec/sdb_s3_glue_spec.rb +230 -0
  50. data/spec/tinkit_node_factory_spec.rb +1108 -0
  51. metadata +114 -0
@@ -0,0 +1,175 @@
1
+ require 'dbi'
2
+ require 'json'
3
+
4
+ require Tinkit.helpers 'mime_types_new'
5
+
6
+ module MysqlInterface
7
+ class FilesMgr
8
+
9
+ class << self; attr_accessor :dbh; end
10
+ @@home_dir = ENV["HOME"]
11
+ @@my_pw = File.open("#{@@home_dir}/.locker/tinkit_mysql"){|f| f.read}.strip
12
+
13
+ @dbh = DBI.connect("DBI:Mysql:tinkit:localhost", "tinkit", @@my_pw)
14
+
15
+ #Table Structure
16
+ MySqlPrimaryKey = '__pkid-file'
17
+ NodeName = 'node_name'
18
+ Basename = 'basename'
19
+ ContentType = 'content_type'
20
+ ModifiedAt = 'modified_at'
21
+ RawContent = 'raw_content'
22
+ FileTableKeys = [MySqlPrimaryKey, NodeName, Basename, ContentType, ModifiedAt, RawContent]
23
+
24
+ TablePostFix = "_files" #TODO: See if you can get away from that
25
+ #options: include in model_save_params, or let base node pass on methods to
26
+ #underlying glue env (probably this)
27
+
28
+ attr_accessor :file_table_name
29
+
30
+ def initialize(glue_env, node_key_value)
31
+ @dbh = self.class.dbh
32
+ @file_table_name = glue_env.file_mgr_table
33
+ end
34
+
35
+ def add(node, file_datas)
36
+ filenames = []
37
+ file_datas.each do |file_data|
38
+ filenames << file_data[:src_filename]
39
+ end
40
+
41
+ filenames.each do |filename|
42
+ #p File.open(filename, 'rb'){|f| f.read}
43
+ basename = File.basename(filename)
44
+ #derive content_type
45
+ content_type = MimeNew.for_ofc_x(basename)
46
+ #derive modified time from file
47
+ modified_at = File.mtime(filename).to_s
48
+ rb = 'rb' #lazily avoiding escape issues
49
+ node_name = node.__send__(node.my_GlueEnv.model_key.to_sym)
50
+ fields_str = "`#{NodeName}`, `#{Basename}`, `#{ContentType}`, `#{ModifiedAt}`, `#{RawContent}`"
51
+ prep_sql = "REPLACE INTO `#{@file_table_name}` (#{fields_str})
52
+ VALUES ( ?, ?, ?, ?, ?)"
53
+ sth = @dbh.prepare(prep_sql)
54
+ values_input = [node_name, basename, content_type, modified_at, File.open(filename, rb){|f| f.read}]
55
+ sth.execute(*values_input)
56
+ end
57
+ filenames.map {|f| TkEscape.escape(File.basename(f))} #return basenames
58
+ end
59
+
60
+ def add_raw_data(node, attach_name, content_type, raw_data, file_modified_at = nil)
61
+ raise "No Data provided for file" unless raw_data
62
+ if file_modified_at
63
+ modified_at = file_modified_at
64
+ else
65
+ modified_at = Time.now.to_s
66
+ end
67
+
68
+ attachment_package = {}
69
+ node_name = node.__send__(node.my_GlueEnv.model_key.to_sym)
70
+ fields_str = "`#{NodeName}`, `#{Basename}`, `#{ContentType}`, `#{ModifiedAt}`, `#{RawContent}`"
71
+ prep_sql = "REPLACE INTO `#{@file_table_name}` (#{fields_str})
72
+ VALUES ( ?, ?, ?, ?, ?)"
73
+ sth = @dbh.prepare(prep_sql)
74
+ values_input = [node_name, attach_name, content_type, modified_at, raw_data]
75
+ sth.execute(*values_input)
76
+ return [attach_name]
77
+ end
78
+
79
+ def list(node)
80
+ model_key = node.my_GlueEnv.model_key
81
+
82
+ sql = "SELECT `#{Basename}` FROM `#{@file_table_name}`
83
+ WHERE `#{NodeName}` = '#{node.__send__(model_key.to_sym)}'"
84
+ sth = @dbh.prepare(sql)
85
+ rtn = []
86
+ sth.execute
87
+ while row=sth.fetch do
88
+ rtn << row.to_h
89
+ end
90
+ #rtn
91
+ sth.finish
92
+ basenames = rtn.map{|basename_hash| basename_hash.values}.flatten
93
+ end
94
+
95
+ def get_raw_data(node, file_basename)
96
+ model_key = node.my_GlueEnv.model_key
97
+ sql = "SELECT `#{RawContent}` FROM `#{@file_table_name}`
98
+ WHERE `#{NodeName}` = '#{node.__send__(model_key.to_sym)}'
99
+ AND `#{Basename}` = '#{file_basename}'"
100
+ #puts "Raw Data SQL: #{sql}"
101
+ sth = @dbh.prepare(sql)
102
+ rtn = []
103
+ sth.execute
104
+ while row=sth.fetch do
105
+ rtn << row.to_h
106
+ end
107
+ #rtn
108
+ sth.finish
109
+ rtn_val = rtn.first || {} #remember in production to sort on internal primary id (once delete revisions works)
110
+ rtn_val['raw_content']
111
+ end
112
+
113
+ #todo change name to get_files_metadata
114
+ def get_attachments_metadata(node)
115
+ files_md = {}
116
+ md_list = FileTableKeys
117
+ md_list.delete(RawContent)
118
+ md_fields = md_list.join("`, `")
119
+
120
+ model_key = node.my_GlueEnv.model_key
121
+ sql = "SELECT `#{md_fields}` FROM `#{@file_table_name}`
122
+ WHERE `#{NodeName}` = '#{node.__send__(model_key.to_sym)}'"
123
+ sth = @dbh.prepare(sql)
124
+ rtn = []
125
+ sth.execute
126
+ while row=sth.fetch do
127
+ rtn << row.to_h
128
+ end
129
+ #rtn
130
+ sth.finish
131
+ objects = rtn
132
+ objects.each do |object|
133
+ obj_md = object
134
+ #speputs "Obj It: #{obj_md.inspect}"
135
+ obj_md_file_modified = obj_md["modified_at"]
136
+ obj_md_content_type = obj_md["content_type"]
137
+ new_md = {:content_type => obj_md_content_type, :file_modified => obj_md_file_modified}
138
+ new_md.merge(obj_md) #where does the original metadata go?
139
+ #p new_md.keys
140
+ files_md[obj_md["basename"]] = new_md
141
+ #puts "Obj METADATA: #{new_md.inspect}"
142
+ end
143
+ files_md
144
+ end#def
145
+
146
+ def subtract(node, file_basenames)
147
+ if file_basenames == :all
148
+ subtract_all(node)
149
+ else
150
+ subtract_some(node, file_basenames)
151
+ end
152
+ end
153
+
154
+ def subtract_all(node)
155
+ model_key = node.my_GlueEnv.model_key
156
+ sql = "DELETE FROM `#{@file_table_name}`
157
+ WHERE `#{NodeName}` = '#{node.__send__(model_key.to_sym)}'"
158
+ @dbh.do(sql)
159
+ end
160
+
161
+ def subtract_some(node, file_basenames)
162
+ file_basenames = [file_basenames].flatten
163
+ model_key = node.my_GlueEnv.model_key
164
+ #probalby get better performance by changing the sql match query
165
+ #rather than iterating
166
+ file_basenames.each do |file_basename|
167
+ sql = "DELETE FROM `#{@file_table_name}`
168
+ WHERE `#{NodeName}` = '#{node.__send__(model_key.to_sym)}'
169
+ AND `#{Basename}` = '#{file_basename}'"
170
+
171
+ @dbh.do(sql)
172
+ end
173
+ end#def
174
+ end#class
175
+ end#mod
@@ -0,0 +1,428 @@
1
+ #Tinkit directory structure defined in lib/helpers/require_helpers'
2
+ require Tinkit.midas 'bufs_data_structure'
3
+ require Tinkit.glue '/mysql/mysql_files_mgr'
4
+ require Tinkit.helpers 'hash_helpers'
5
+ require Tinkit.helpers 'log_helper'
6
+
7
+ require 'json'
8
+ require 'dbi'
9
+
10
+ module MysqlEnv
11
+ class << self; attr_accessor :dbh; end
12
+ @@home_dir = ENV["HOME"]
13
+ @@my_pw = File.open("#{@@home_dir}/.locker/tinkit_mysql"){|f| f.read}.strip
14
+
15
+ self.dbh = DBI.connect("DBI:Mysql:tinkit:localhost", "tinkit", @@my_pw)
16
+
17
+ class GlueEnv
18
+
19
+ @@log = TinkitLog.set(self.name, :warn)
20
+
21
+ #table format
22
+ # primary key - autogenerated integer, should not be visible outside of the db model
23
+ # model_key - the actual key that will be used to store the data
24
+ # version key - not sure if it will be used
25
+ #namespace key - name to identify this is the mysql interface?
26
+ #used to identify metadata for models (should be consistent across models)
27
+ #ModelKey = :my_id
28
+ VersionKey = :_rev #derived from timestamp
29
+ NamespaceKey = :mysql_namespace
30
+ #Mysql Primary Key ID
31
+ #so we can use auto-incrementing int primary keys
32
+ #with worrying about forcing user data to conform
33
+ PersistLayerKey ='__mysql_pk'
34
+
35
+
36
+ #TODO, Don't set directly to constant, use accessor
37
+
38
+ attr_accessor :user_id,
39
+ :user_datastore_location,
40
+ :metadata_keys,
41
+ :required_instance_keys,
42
+ :required_save_keys,
43
+ :node_key,
44
+ :model_key,
45
+ :version_key,
46
+ :namespace_key,
47
+ :_files_mgr_class,
48
+ :views,
49
+ :model_save_params,
50
+ :moab_data,
51
+ :persist_layer_key,
52
+ #accessors specific to this persitence model
53
+ :dbh, #database handler #spec uses
54
+ :file_mgr_table #identifies the table the FileMgr Class should use
55
+
56
+
57
+
58
+
59
+
60
+ def initialize(persist_env, data_model_bindings)
61
+ #TODO: Determine if class_name is needed to segment cluster data within user data
62
+ #host = "https://sdb.amazonaws.com/" (not provided by user)
63
+ @dbh = MysqlEnv.dbh
64
+ #@_file_mgr_table = 'blahblah' #set in file_mgr
65
+ mysql_env = persist_env[:env]
66
+ #TODO: validations on format
67
+
68
+ @user_id = mysql_env[:user_id]
69
+ @cluster_name = persist_env[:name]
70
+ #use namespace generator?
71
+ @domain_table_name = "#{mysql_env[:path]}__#{@user_id}"
72
+ #data_model_bindings from NodeElementOperations
73
+ key_fields = data_model_bindings[:key_fields]
74
+ initial_views_data = data_model_bindings[:views]
75
+
76
+ @required_instance_keys = key_fields[:required_keys] #DataStructureModels::Tinkit::RequiredInstanceKeys
77
+ @required_save_keys = key_fields[:required_keys] #DataStructureModels::Tinkit::RequiredSaveKeys
78
+ @node_key = key_fields[:primary_key] #DataStructureModels::Tinkit::NodeKey
79
+
80
+ @version_key = VersionKey
81
+ @model_key = @node_key #ModelKey
82
+ @persist_layer_key = PersistLayerKey
83
+ @namespace_key = NamespaceKey
84
+ @metadata_keys = [@persist_layer_key, @version_key, @namespace_key]
85
+
86
+ initial_table_fields = @required_instance_keys + @required_save_keys + @metadata_keys
87
+ initial_table_fields.compact!
88
+ initial_table_fields.uniq!
89
+ #may want to verify a flat array
90
+ node_identifying_keys = [@model_key] #, @version_key]
91
+
92
+ @user_datastore_location = use_table!(initial_table_fields, node_identifying_keys, @domain_table_name)
93
+
94
+ @model_save_params = {:dbh => dbh, :table => user_datastore_location, :node_key => @node_key}
95
+ @_files_mgr_class = MysqlInterface::FilesMgr
96
+ #@_file_mgr_table = 'blah' #should be overwritten later
97
+ @file_mgr_table = create_file_mgr_table
98
+ #@views = "temp"
99
+
100
+ end#def
101
+
102
+ def save(new_data)
103
+ #raise "Required key missing" unless @model_save_params[:required_save_key] = @required_save_key
104
+ rev = Time.now.hash.to_s
105
+ new_data[@version_key] = rev
106
+ orig_cols = get_existing_columns(@user_datastore_location)
107
+ new_cols = new_data.keys
108
+ table_name = reconcile_table(@user_datastore_location, orig_cols, new_cols)
109
+ esc_col_names = new_data.keys.map{|k| "`#{k}`"}.join(",")
110
+ json_values = new_data.values.map{|v| "'#{v.to_json}'"}.join(",")
111
+
112
+ #Need to update a bit when moved to tinkit (formerly bufs) to account for revs
113
+ sql = "REPLACE INTO `#{table_name}` (#{esc_col_names}) VALUEs (#{json_values})"
114
+ @dbh.do(sql)
115
+ new_data['rev'] = new_data[@version_key]
116
+ end
117
+
118
+ def get(id)
119
+ @@log.info {"Getting #{id} from #{@user_datastore_location} using key #{@model_key}"} if @@log.info?
120
+ #get all records with the given id and return the one with the highest (internal) primary key
121
+ #because tinkit _rev has no ordering properties ... this may be a bug eventually, ok so far
122
+ sql = "SELECT * FROM `#{@user_datastore_location}` WHERE `#{@model_key}` = '#{id.to_json}'"
123
+ sth = @dbh.prepare(sql)
124
+ rtn = []
125
+ sth.execute
126
+ while row=sth.fetch do
127
+ rtn << row.to_h
128
+ end
129
+ #rtn
130
+ sth.finish
131
+ rtn_raw = rtn.first || {} #remember in production to sort on internal primary id
132
+ rtnj = {}
133
+ rtn_raw.delete(PersistLayerKey)
134
+ rtn_raw.each do |k,v|
135
+ rtnj[k] = jparse(v)
136
+ end
137
+ rtn_h = HashKeys.str_to_sym(rtnj)
138
+ rtn_h = nil if rtn_h.empty?
139
+ return rtn_h
140
+ end
141
+
142
+ def destroy_node(model_metadata)
143
+ @@log.debug "destroy node metadata: #{model_metadata.inspect}"
144
+ key, key_value = if model_metadata[@model_key]
145
+ [@model_key, model_metadata[@model_key].to_json]
146
+ elsif model_metadata[@persist_layer_key]
147
+ [@model_key, model_metadata[@persist_layer_key].to_json ]
148
+ else
149
+ raise "No key in model metadata for deletion"
150
+ end
151
+ node_id = key_value #model_metadata[key] #persist_layer_key has wrong value here
152
+ node_rev = model_metadata[@version_key]
153
+ #used to be node_id.to_json
154
+ sql = "DELETE FROM `#{@user_datastore_location}`
155
+ WHERE `#{key}` = '#{node_id}'"
156
+ @dbh.do(sql)
157
+ end
158
+
159
+ def generate_model_key(namespace, node_key_value)
160
+ #We can get away with this because the node key value is a first class lookup in mysql
161
+ #and uniqueness is handled within a table (rather than global like some other persistence layers)
162
+ #However, are there any side effects?
163
+ "#{node_key_value}"
164
+ #generate_pk_data(node_key_value)
165
+ end
166
+
167
+ #This is a bit hokey. It takes the node_key to lookup the db primary key
168
+ #in order to look up the node. This is not the best use of mysql
169
+ #The other persistence layers need unique lookup keys, while sql can natively use any
170
+ #Short version: This should be optimized to a better way
171
+
172
+ #def generate_pk_data(record_id)
173
+ #url_friendly_class_name = self.class.name.gsub('::','-')
174
+ # sql = "SELECT `#{@persist_layer_key}` FROM `#{@user_datastore_location}`
175
+ # WHERE `#{@model_key}` = '#{record_id}'"
176
+ # sth = @dbh.prepare(sql)
177
+ # rtn = []
178
+ # sth.execute
179
+ # while row=sth.fetch do
180
+ # rtn << row.to_h
181
+ # end
182
+ # #rtn
183
+ # sth.finish
184
+ # raise "Multiple records for the unique key: #{@model_key} => #{record_id}" if rtn.size > 1
185
+ # return nil unless rtn.size > 0
186
+ # pk_data = rtn.first[@persist_layer_key]
187
+ #end
188
+
189
+ def query_all
190
+ sql = "SELECT * FROM `#{@user_datastore_location}`"
191
+ sth = @dbh.prepare(sql)
192
+ rtn = []
193
+ rtn_raw_rows = []
194
+ sth.execute
195
+ while row=sth.fetch do
196
+ rtn_raw_rows << row.to_h
197
+ end
198
+ #rtn
199
+ sth.finish
200
+ rtnj = {}
201
+ rtn_raw_rows.each do |rtn_raw|
202
+ rtn_raw.delete(PersistLayerKey)
203
+ rtn_raw.each do |k,v|
204
+ rtnj[k] = jparse(v)
205
+ end
206
+ rtn_h = HashKeys.str_to_sym(rtnj)
207
+ rtn << rtn_h
208
+ end
209
+ return rtn
210
+ end
211
+
212
+ def raw_all
213
+ query_all
214
+ end
215
+
216
+ #current relations supported:
217
+ # - :equals (data in the key field matches this_value)
218
+ # - :contains (this_value is contained in the key field data (same as equals for non-enumerable types )
219
+ def find_nodes_where(key, relation, this_value)
220
+ res = case relation
221
+ when :equals
222
+ find_equals(key, this_value)
223
+ when :contains
224
+ find_contains(key, this_value)
225
+ end #case
226
+ return res
227
+ end
228
+
229
+ def find_equals(key, this_value)
230
+ results =[]
231
+ query_all.each do |record|
232
+ test_val = record[key]
233
+ results << record if test_val == this_value
234
+ end
235
+ results
236
+ end
237
+
238
+ def find_contains(key, this_value)
239
+ #TODO: Make into map/reduce to be more efficient
240
+ #SQL has native support for this type of lookup, using it.
241
+ sql = "SELECT * FROM `#{@user_datastore_location}`"
242
+ sth = @dbh.prepare(sql)
243
+ rtn_raw_list = []
244
+ final_rtn = []
245
+ sth.execute
246
+ while row=sth.fetch do
247
+ rowh = row.to_h
248
+ rowh.delete(PersistLayerKey)
249
+ rtn_raw_list << rowh if find_contains_type_helper(rowh[key.to_s], this_value)
250
+ end
251
+ sth.finish
252
+ rtn_raw_list.each do |rtn_raw|
253
+ rtnj = {}
254
+ rtn_raw.each {|k,v| rtnj[k] = jparse(v) }
255
+ final_rtn <<= HashKeys.str_to_sym(rtnj)
256
+ end
257
+ #return full data for select results
258
+ return final_rtn
259
+ end
260
+
261
+ def find_contains_type_helper(stored_dataj, this_value)
262
+ resp = nil
263
+ stored_data = jparse(stored_dataj)
264
+ if stored_data.respond_to?(:"include?")
265
+ resp = (stored_data.include?(this_value))
266
+ else
267
+ resp = (stored_data == this_value)
268
+ end
269
+ return resp
270
+ end
271
+
272
+ def destroy_bulk(records)
273
+ record_key_data = records.map{|r| r[@model_key].to_json}
274
+ #record_rev = ?
275
+ record_key_data_sql = record_key_data.join("', '")
276
+ sql = "DELETE FROM `#{@user_datastore_location}`
277
+ WHERE `#{@model_key}` IN ('#{record_key_data_sql}')"
278
+ @dbh.do(sql)
279
+ end
280
+
281
+ def use_table!(fields, keys, table_name)
282
+ #return [fields, keys, table_name]
283
+ table_name = find_table!(fields, keys, table_name)
284
+ raise "No table could be found or created" unless table_name
285
+ column_names = get_existing_columns(table_name)
286
+ column_names = get_existing_columns(table_name)
287
+ fields_str = fields.map{|f| f.to_s}
288
+ unless fields_str.sort == column_names.sort
289
+ #puts "Warning Fields Dont Match, Adding unmatched fields to table"
290
+ #table has changed, reconcile them
291
+ table_name = reconcile_table(table_name, column_names, fields_str)
292
+ end
293
+ return table_name
294
+ end
295
+
296
+ def find_table!(fields, keys, table_name)
297
+ rtn_val = table_name
298
+ tables = @dbh.tables
299
+ unless tables.include? table_name
300
+ create_table(fields, keys, table_name)
301
+ end
302
+ rtn_val
303
+ end
304
+
305
+ def create_table(fields, keys, table_name)
306
+ rtn_val = nil
307
+ field_str_list = []
308
+ fields.delete(PersistLayerKey)
309
+ #keys.each do |k|
310
+ # fields.delete(k)
311
+ #end
312
+ fields.each do |field|
313
+ sql_str = "`#{field}` VARCHAR(255) NOT NULL,"
314
+ field_str_list << sql_str
315
+ end
316
+ field_str = field_str_list.join("\n")
317
+ #change this for tinkit
318
+ mk_str = "UNIQUE KEY `_uniq_idx`(`#{keys.join("`, `")}`)"
319
+ sql = "CREATE TABLE `#{table_name}` (
320
+ `#{PersistLayerKey}` INT NOT NULL AUTO_INCREMENT,
321
+ #{field_str}
322
+ PRIMARY KEY ( `#{PersistLayerKey}` ),
323
+ #{mk_str} )"
324
+ @dbh.do(sql)
325
+ rtn_val = table_name if @dbh.tables.include? table_name
326
+ end
327
+
328
+ def get_existing_columns(table_name)
329
+ existing_columns = []
330
+ sql = "DESCRIBE #{table_name}"
331
+ sth = @dbh.prepare(sql)
332
+ sth.execute
333
+ sth.each do |row|
334
+ fld = row.to_h['Field']
335
+ existing_columns << fld if fld
336
+ end
337
+ #for select queries this would work sth.column_names (but less efficient)
338
+ sth.finish
339
+ return existing_columns
340
+ end
341
+
342
+ def reconcile_table(table_name, orig_cols, new_cols, opts = {} )
343
+ opts[:allow_remove] = opts[:allow_remove] || nil #useless code but shows options
344
+ #remove_cols = []
345
+ current_cols = get_existing_columns(table_name).map{|col| col.to_sym}
346
+ orig_cols = orig_cols.map{|col| col.to_sym}
347
+ new_cols = new_cols.map{|col| col.to_sym}
348
+ add_cols = new_cols
349
+ add_cols = new_cols - current_cols - [@persist_layer_key]
350
+ add_cols.delete_if{|col| col =~ /^XXXX_/ }
351
+ remove_cols = current_cols - new_cols - [@persist_layer_key]
352
+ remove_cols.delete_if{|col| col !=~ /^XXXX_/ }
353
+ if opts[:allow_remove]
354
+ remove_columns(table_name, remove_cols) unless remove_cols.empty?
355
+ end
356
+ add_columns(table_name, add_cols) unless add_cols.empty?
357
+ return table_name
358
+ end
359
+
360
+ def add_columns(table_name, add_cols)
361
+ add_list = []
362
+ add_cols.each do |col_name|
363
+ add_list << "ADD `#{col_name}` VARCHAR (255)"
364
+ end
365
+ add_sql= add_list.join(", ")
366
+ sql = "ALTER TABLE `#{table_name}` #{add_sql}"
367
+ sth = @dbh.prepare(sql)
368
+ sth.execute
369
+ sth.finish
370
+ end
371
+
372
+ def remove_columns(table_name, remove_cols)
373
+ remove_list = []
374
+ existing_cols = get_existing_columns(table_name)
375
+ existing_cols.each do |curr_col_name|
376
+ #existing_cols.each do |ex_col|
377
+ expired_col_regexp = /^XXXX_XXXX_/
378
+ if curr_col_name.match expired_col_regexp
379
+ #iterating in ruby makes cleaner ruby code, but not as efficient
380
+ sql = "ALTER TABLE #{table_name} DROP #{curr_col_name}"
381
+ sth = @dbh.prepare(sql)
382
+ sth.execute
383
+ sth.finish
384
+ end#if
385
+ end
386
+ #end#each existing_cols
387
+ removed_cols = existing_cols - get_existing_columns(table_name)
388
+ remove_cols = remove_cols - removed_cols
389
+ remove_cols.each do |rmv_col_name|
390
+ remove_list << "CHANGE `#{rmv_col_name}` `XXXX_#{rmv_col_name}` VARCHAR(255)"
391
+ end#each remove_cols
392
+ existing_cols = get_existing_columns(table_name)
393
+ remove_sql= remove_list.join(", ")
394
+ sql = "ALTER TABLE `#{table_name}` #{remove_sql}"
395
+ sth = @dbh.prepare(sql)
396
+ sth.execute
397
+ sth.finish
398
+ end
399
+
400
+ def create_file_mgr_table
401
+ file_mgr_key = '__pkid-file'
402
+ file_table_name_postfix = "files"
403
+ #Create the table to store files when class is loaded
404
+ #Add modified_at to the UNIQUE KEY to keep versions
405
+
406
+ file_table_name = "#{@domain_table_name}_#{file_table_name_postfix}"
407
+ sql = "CREATE TABLE IF NOT EXISTS `#{file_table_name}` (
408
+ `#{file_mgr_key}` INT NOT NULL AUTO_INCREMENT,
409
+ node_name VARCHAR(255),
410
+ basename VARCHAR(255) NOT NULL,
411
+ content_type VARCHAR(255),
412
+ modified_at VARCHAR(255),
413
+ raw_content LONGBLOB,
414
+ PRIMARY KEY (`#{file_mgr_key}`),
415
+ UNIQUE KEY (node_name, basename) )"
416
+
417
+ @dbh.do(sql)
418
+ return file_table_name
419
+ end#def
420
+
421
+ def jparse(str)
422
+ return JSON.parse(str) if str =~ /\A\s*[{\[]/
423
+ JSON.parse("[#{str}]")[0]
424
+ end
425
+
426
+
427
+ end#class
428
+ end#module