cicd-builder 0.9.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ module CiCd
2
+ module Builder
3
+ module Errors
4
+
5
+ class Unknown < StandardError ; end
6
+ class Internal < Unknown ; end
7
+ class InvalidVersion < Unknown ; end
8
+ class InvalidVersionConstraint < Unknown ; end
9
+
10
+ end
11
+ end
12
+ end
@@ -0,0 +1,86 @@
1
+ module CiCd
2
+ module Builder
3
+
4
+ # ---------------------------------------------------------------------------------------------------------------
5
+ def parseOptions()
6
+ # Parse options
7
+ @options = {
8
+ log_level: :warn,
9
+ #dry_run: false,
10
+
11
+ gen: '3.0.0',
12
+ }.merge @default_options
13
+
14
+ opt_parser = OptionParser.new do |opts|
15
+ opts.banner = "Usage: #{MYNAME} [@options]"
16
+
17
+ opts.on('-l', '--log_level LEVEL', '--log-level LEVEL', [:trace, :debug, :info, :note, :warn, :error, :fatal, :todo], "Log level ([:trace, :debug, :info, :step, :warn, :error, :fatal, :todo])") do |v|
18
+ @options[:log_level] = v
19
+ end
20
+ opts.on("-f", "--inifile FILE", "INI file with settings") do |v|
21
+ @options[:inifile] = v
22
+ end
23
+ #opts.on("-n", "--[no-]dry-run", "Do a dry run, Default --no-dry-run") do |v|
24
+ # @options[:dry_run] = v
25
+ #end
26
+ end
27
+
28
+ opt_parser.parse!
29
+
30
+ # Set up logger
31
+ Logging.init :trace, :debug, :info, :note, :warn, :error, :fatal, :todo
32
+ @logger = Logging.logger(STDOUT,
33
+ :pattern => "%#{::Logging::MAX_LEVEL_LENGTH}l: %m\n",
34
+ :date_pattern => '%Y-%m-%d %H:%M:%S')
35
+ @logger.level = @options[:log_level]
36
+
37
+ if @options.key?(:inifile)
38
+ @options[:inifile] = File.expand_path(@options[:inifile])
39
+ unless File.exist?(@options[:inifile])
40
+ raise StandardError.new("#{@options[:inifile]} not found!")
41
+ end
42
+ begin
43
+ # ENV.each{ |key,_|
44
+ # ENV.delete(key)
45
+ # }
46
+ ini = IniFile.load(@options[:inifile])
47
+ ini['global'].each{ |key,value|
48
+ ENV[key]=value
49
+ }
50
+ def _expand(k,v,regex,rerun)
51
+ matches = v.match(regex)
52
+ if matches
53
+ var = matches[1]
54
+ if ENV.has_key?(var)
55
+ ENV[k]=v.gsub(/\$\{#{var}\}/,ENV[var]).gsub(/\$#{var}/,ENV[var])
56
+ else
57
+ rerun[var] = 1
58
+ end
59
+ end
60
+ end
61
+
62
+ pending = nil
63
+ rerun = {}
64
+ begin
65
+ pending = rerun
66
+ rerun = {}
67
+ ENV.to_hash.each{|k,v|
68
+ if v.match(/\$/)
69
+ _expand(k,v,%r'[^\\]\$\{(\w+)\}', rerun)
70
+ _expand(k,v,%r'[^\\]\$(\w+)', rerun)
71
+ end
72
+ }
73
+ # Should break out the first time that we make no progress!
74
+ end while pending != rerun
75
+ rescue IniFile::Error => e
76
+ # noop
77
+ rescue Exception => e
78
+ @logger.error "#{e.class.name} #{e.message}"
79
+ raise e
80
+ end
81
+ end
82
+ @options
83
+ end
84
+
85
+ end
86
+ end
@@ -0,0 +1,428 @@
1
+ require 'json'
2
+
3
+ module CiCd
4
+ module Builder
5
+
6
+ # ---------------------------------------------------------------------------------------------------------------
7
+ def getS3Bucket()
8
+ unless @s3
9
+ @s3 = AWS::S3.new(
10
+ :access_key_id => ENV['AWS_ACCESS_KEY_ID'],
11
+ :secret_access_key => ENV['AWS_SECRET_ACCESS_KEY'])
12
+ end
13
+ unless @s3_bucket
14
+ @s3_bucket = @s3.buckets[ENV['AWS_S3_BUCKET']]
15
+ end
16
+ @s3_bucket
17
+ end
18
+
19
+ # ---------------------------------------------------------------------------------------------------------------
20
+ def uploadToS3(artifacts)
21
+ bucket = getS3Bucket()
22
+ artifacts.each{|art|
23
+ # makes no request, returns an AWS::S3::S3Object
24
+ s3_obj = bucket.objects[art[:key]]
25
+ upload = true
26
+ if art[:data].has_key?(:file)
27
+ md5 = Digest::MD5.file(art[:data][:file]).hexdigest
28
+ else
29
+ #noinspection RubyArgCount
30
+ md5 = Digest::MD5.hexdigest(art[:data][:data])
31
+ end
32
+ if s3_obj.exists?
33
+ @logger.info "s3://#{ENV['AWS_S3_BUCKET']}/#{art[:key]} exists"
34
+ etag = s3_obj.etag.gsub(/"/,'')
35
+ unless etag == md5
36
+ checksum = s3_obj.metadata[:checksum]
37
+ unless checksum and checksum == md5
38
+ @logger.warn "s3://#{ENV['AWS_S3_BUCKET']}/#{art[:key]} is different from our #{art[:key]}(#{s3_obj.etag} <=> #{md5})"
39
+ upload = true
40
+ end
41
+ end
42
+ end
43
+
44
+ if upload
45
+ @logger.info "Upload new s3://#{ENV['AWS_S3_BUCKET']}/#{art[:key]}"
46
+ # Get size before upload changes our object
47
+ if art[:data].has_key?(:file)
48
+ size = File.size(art[:data][:file])
49
+ else
50
+ size = art[:data][:data].length
51
+ end
52
+ art[:data][:metadata] = {checksum: md5}
53
+ s3_obj.write(art[:data])
54
+ if art.has_key?(:public_url)
55
+ @vars[art[:public_url]] = s3_obj.public_url
56
+ end
57
+ if art.has_key?(:read_url)
58
+ @vars[art[:read_url]] = s3_obj.url_for(:read) if art.has_key?(:read_url)
59
+ end
60
+ @logger.info "#{art[:label]}: #{@vars[art[:public_url]]}" if art.has_key?(:public_url)
61
+ # if size > 16 * 1024 * 1024
62
+ # if size < 5 * 1024 * 1024 * 1000
63
+ # @logger.debug "#{art[:label]}: Multipart etag: #{s3_obj.etag}"
64
+ # s3_obj.copy_to("#{art[:key]}.copy")
65
+ # s3_obj = bucket.objects["#{art[:key]}.copy"]
66
+ # s3_obj.move_to(art[:key])
67
+ # s3_obj = bucket.objects[art[:key]]
68
+ # @logger.debug "#{art[:label]}: Revised etag: #{s3_obj.etag}"
69
+ # else
70
+ # @logger.warn "#{art[:label]}: Multipart etag: #{s3_obj.etag} on asset > 5Gb"
71
+ # end
72
+ # end
73
+ end
74
+ }
75
+ 0
76
+ end
77
+
78
+ # ---------------------------------------------------------------------------------------------------------------
79
+ def getArtifactsDefinition()
80
+ nil
81
+ end
82
+
83
+ # ---------------------------------------------------------------------------------------------------------------
84
+ def getNamingDefinition()
85
+ nil
86
+ end
87
+
88
+ # ---------------------------------------------------------------------------------------------------------------
89
+ def initInventory()
90
+
91
+ hash =
92
+ {
93
+ id: "#{@vars[:project_name]}",
94
+ # In case future generations introduce incompatible features
95
+ gen: "#{@options[:gen]}",
96
+ container: {
97
+ artifacts: %w(assembly metainfo checksum),
98
+ naming: '<product>-<major>.<minor>.<patch>-<branch>-release-<number>-build-<number>.<extension>',
99
+ assembly: {
100
+ extension: 'tar.gz',
101
+ type: 'targz'
102
+ },
103
+ metainfo: {
104
+ extension: 'MANIFEST.json',
105
+ type: 'json'
106
+ },
107
+ checksum: {
108
+ extension: 'checksum',
109
+ type: 'Digest::SHA256'
110
+ },
111
+ variants: {
112
+ :"#{@vars[:variant]}" => {
113
+ latest: {
114
+ build: 0,
115
+ branch: 0,
116
+ version: 0,
117
+ release: 0,
118
+ },
119
+ versions: [ "#{@vars[:build_ver]}" ],
120
+ branches: [ "#{@vars[:build_bra]}" ],
121
+ builds: [
122
+ {
123
+ drawer: @vars[:build_nam],
124
+ build_name: @vars[:build_rel],
125
+ build_number: @vars[:build_num],
126
+ release: @vars[:release],
127
+ }
128
+ ],
129
+ }
130
+ }
131
+ }
132
+ }
133
+ artifacts = getArtifactsDefinition()
134
+ naming = getNamingDefinition()
135
+
136
+ # By default we use the internal definition ...
137
+ if artifacts
138
+ artifacts.each do |name,artifact|
139
+ hash[:container][name] = artifact
140
+ end
141
+ end
142
+
143
+ # By default we use the internal definition ...
144
+ if naming
145
+ hash[:container][:naming] = naming
146
+ end
147
+ JSON.pretty_generate( hash, { indent: "\t", space: ' '})
148
+ end
149
+
150
+ # ---------------------------------------------------------------------------------------------------------------
151
+ def takeInventory()
152
+ def _update(hash, key, value)
153
+ h = {}
154
+ i = -1
155
+ hash[key].each { |v| h[v] = i+=1 }
156
+ unless h.has_key?(value)
157
+ h[value] = h.keys.size # No -1 because this is evaluated BEFORE we make the addition!
158
+ end
159
+ s = h.sort_by { |_, v| v }
160
+ s = s.map { |v| v[0] }
161
+ hash[key] = s
162
+ h[value]
163
+ end
164
+
165
+ # Read and parse in JSON
166
+ json_s = ''
167
+ json = nil
168
+ varianth = nil
169
+
170
+ bucket = getS3Bucket()
171
+ key = "#{@vars[:project_name]}/INVENTORY.json"
172
+ s3_obj = bucket.objects[key]
173
+ # If the inventory has started then add to it
174
+ if s3_obj.exists?
175
+ s3_obj.read(){|chunk|
176
+ json_s << chunk
177
+ }
178
+ json = Yajl::Parser.parse(json_s)
179
+ over = false
180
+ # Is the inventory format up to date ...
181
+ # TODO: [2014-07-27 Christo] Use semver gem ...
182
+ require 'chef/exceptions'
183
+ require 'chef/version_constraint'
184
+ require 'chef/version_class'
185
+
186
+ begin
187
+ version = Chef::Version.new(json['gen'])
188
+ rescue Chef::Exceptions::InvalidCookbookVersion => e
189
+ json['gen'] = "#{json['gen']}.0.0"
190
+ version = Chef::Version.new(json['gen'])
191
+ end
192
+
193
+ begin
194
+ our_ver = Chef::Version.new(@options[:gen])
195
+ constraint = Chef::VersionConstraint.new("<= #{@options[:gen]}")
196
+ rescue Chef::Exceptions::InvalidVersionConstraint => e
197
+ raise CiCd::Builder::Errors::InvalidVersionConstraint.new e.message
198
+ rescue Chef::Exceptions::InvalidCookbookVersion => e
199
+ raise CiCd::Builder::Errors::InvalidVersion.new e.message
200
+ end
201
+
202
+ unless constraint.include?(version)
203
+ raise CiCd::Builder::Errors::InvalidVersion.new "The inventory generation is newer than I can manage: #{version} <=> #{our_ver}"
204
+ end
205
+ if json['container'] and json['container']['variants']
206
+ # but does not have our variant then add it
207
+ variants = json['container']['variants']
208
+ unless variants[@vars[:variant]]
209
+ variants[@vars[:variant]] = {}
210
+ varianth = variants[@vars[:variant]]
211
+ varianth['builds'] = []
212
+ varianth['branches'] = []
213
+ varianth['versions'] = []
214
+ varianth['releases'] = []
215
+ varianth['latest'] = {
216
+ branch: -1,
217
+ version: -1,
218
+ build: -1,
219
+ release: -1,
220
+ }
221
+ end
222
+ varianth = variants[@vars[:variant]]
223
+ # If the inventory 'latest' format is up to date ...
224
+ unless varianth['latest'] and
225
+ varianth['latest'].is_a?(Hash)
226
+ # Start over ... too old/ incompatible
227
+ over = true
228
+ end
229
+ else
230
+ # Start over ... too old/ incompatible
231
+ over = true
232
+ end
233
+ else
234
+ # Start a new inventory
235
+ over = true
236
+ end
237
+ # Starting fresh ?
238
+ if over or json.nil?
239
+ json_s = initInventory()
240
+ else
241
+ raise CiCd::Builder::Errors::Internal.new sprintf('Internal logic error! %s::%d', __FILE__,__LINE__) if varianth.nil?
242
+ # Add the new build if we don't have it
243
+ unless varianth['builds'].map { |b| b['build_name'] }.include?(@vars[:build_rel])
244
+ #noinspection RubyStringKeysInHashInspection
245
+ varianth['builds'] <<
246
+ {
247
+ 'drawer' => @vars[:build_nam],
248
+ 'build_name' => @vars[:build_rel],
249
+ 'build_number' => @vars[:build_num],
250
+ 'release' => @vars[:release],
251
+ }
252
+ end
253
+ build_lst = (varianth['builds'].size-1)
254
+ build_rel = build_lst
255
+ i = -1
256
+ varianth['builds'].each{ |h|
257
+ i += 1
258
+ convert_build(h)
259
+ convert_build(varianth['builds'][build_rel])
260
+ if h['release'].to_i > varianth['builds'][build_rel]['release'].to_i
261
+ build_rel = i
262
+ elsif h['release'] == varianth['builds'][build_rel]['release']
263
+ build_rel = i if h['build_number'].to_i > varianth['builds'][build_rel]['build_number'].to_i
264
+ end
265
+ }
266
+
267
+ # Add new branch ...
268
+ build_bra = _update(varianth, 'branches', @vars[:build_bra])
269
+ # Add new version ...
270
+ build_ver = _update(varianth, 'versions', @vars[:build_ver])
271
+
272
+ # Set latest
273
+ varianth['latest'] = {
274
+ branch: build_bra,
275
+ version: build_ver,
276
+ build: build_lst,
277
+ release: build_rel,
278
+ }
279
+ json['gen'] = @options[:gen]
280
+ json_s = JSON.pretty_generate( json, { indent: "\t", space: ' '})
281
+ end
282
+ begin
283
+ resp = s3_obj.write(:data => json_s)
284
+ case resp.class.name
285
+ when %r'^AWS::S3::(S3Object|ObjectVersion)'
286
+ return 0
287
+ else
288
+ return 1
289
+ end
290
+ rescue Exception => e
291
+ return -1
292
+ end
293
+ end
294
+
295
+ def convert_build(h)
296
+ if h.has_key?('number')
297
+ h['build_number'] = h['number']
298
+ h.delete 'number'
299
+ elsif h.has_key?('build_number')
300
+ h.delete 'number'
301
+ else
302
+ h_build = h.has_key?('build') ? h['build'] : h['build_name']
303
+ h_number = h_build.gsub(/^.*?-build-([0-9]+)$/, '\1').to_i
304
+
305
+ h['build_number'] = h_number
306
+ h['build_name'] = h_build
307
+ h.delete 'build'
308
+ h.delete 'number'
309
+ end
310
+ if h.has_key?('build')
311
+ h_build = h.has_key?('build')
312
+ h_number = h_build.gsub(/^.*?-build-([0-9]+)$/, '\1').to_i
313
+
314
+ h['build_number'] = h_number
315
+ h['build_name'] = h_build
316
+ h.delete 'build'
317
+ h.delete 'number'
318
+ end
319
+ h
320
+ end
321
+
322
+ # ---------------------------------------------------------------------------------------------------------------
323
+ def uploadBuildArtifacts()
324
+ if @vars.has_key?(:build_dir) and @vars.has_key?(:build_pkg)
325
+ begin
326
+ if File.exists?(@vars[:build_pkg])
327
+
328
+ artifacts = []
329
+
330
+ key = "#{@vars[:project_name]}/#{@vars[:variant]}/#{@vars[:build_nam]}/#{@vars[:build_rel]}"
331
+ # Store the assembly
332
+ artifacts << {
333
+ key: "#{key}.tar.gz",
334
+ data: {:file => @vars[:build_pkg]},
335
+ public_url: :build_url,
336
+ label: 'Package URL'
337
+ }
338
+
339
+ # Store the metadata
340
+ manifest = manifestMetadata()
341
+ artifacts << {
342
+ key: "#{key}.MANIFEST.json",
343
+ data: {:data => manifest},
344
+ public_url: :manifest_url,
345
+ read_url: :manifest_url,
346
+ label: 'Manifest URL'
347
+ }
348
+
349
+ # Store the checksum
350
+ artifacts << {
351
+ key: "#{@vars[:project_name]}/#{@vars[:variant]}/#{@vars[:build_nam]}/#{@vars[:build_rel]}.checksum",
352
+ data: {:data => @vars[:build_sha]},
353
+ public_url: :checksum_url,
354
+ read_url: :checksum_url,
355
+ label: 'Checksum URL'
356
+ }
357
+
358
+ @vars[:return_code] = uploadToS3(artifacts)
359
+ if 0 == @vars[:return_code]
360
+ @vars[:return_code] = takeInventory()
361
+ end
362
+ @vars[:return_code]
363
+ else
364
+ @vars[:return_code] = 1
365
+ end
366
+ rescue => e
367
+ @logger.error "#{e.class.name} #{e.message}"
368
+ @vars[:return_code] = -99
369
+ raise e
370
+ end
371
+ else
372
+ @vars[:return_code] = 2
373
+ end
374
+ @vars[:return_code]
375
+ end
376
+
377
+ # ---------------------------------------------------------------------------------------------------------------
378
+ def manifestMetadata
379
+ manifest = @vars[:build_mdd].dup
380
+
381
+ manifest[:manifest] = getBuilderVersion
382
+
383
+ version_major, version_minor, version_patch = manifest[:Version].split('.')
384
+
385
+ manifest[:version] = {
386
+ number: manifest[:Version],
387
+ major: version_major,
388
+ minor: version_minor,
389
+ patch: version_patch,
390
+ build: @vars[:build_num],
391
+ branch: @vars[:build_bra],
392
+ }
393
+ manifest[:build] = {
394
+ name: @vars[:build_rel],
395
+ base: @vars[:build_nam],
396
+ date: @vars[:build_dte],
397
+ vrb: @vars[:build_vrb],
398
+ branch: @vars[:build_bra],
399
+ checksum: @vars[:build_sha],
400
+ }
401
+ # we want lowercase but if we use the existing key we don't have to delete it afterwards ...
402
+ manifest[:Release] = {
403
+ number: manifest[:Release],
404
+ branch: manifest[:Branch],
405
+ date: manifest[:Date],
406
+ checksum: @vars[:build_mds],
407
+ }
408
+ manifest.delete(:Date)
409
+ # manifest.delete(:api)
410
+ # manifest.delete(:core)
411
+ manifest[:vars] = {}
412
+ @vars.each { |k, v|
413
+ unless %w(build_mdd build_txt).include?(k.to_s)
414
+ manifest[:vars][k.to_s] = v
415
+ end
416
+ }
417
+ manifest = downcaseHashKeys(manifest)
418
+ manifest[:env] = {}
419
+ ENV.each { |k, v|
420
+ unless %w(LS_COLORS AWS_ACCESS_KEY AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SECRET_KEY).include?(k.to_s)
421
+ manifest[:env][k.to_s] = v
422
+ end
423
+ }
424
+ JSON.pretty_generate( manifest, { indent: "\t", space: ' '})
425
+ end
426
+
427
+ end
428
+ end