manband 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,176 @@
1
+ require 'rubygems'
2
+ require 'yaml'
3
+ require 'optparse'
4
+ require 'amqp'
5
+ require 'json'
6
+ require 'mb-minion'
7
+ require 'fileutils'
8
+
9
+ include Minion
10
+
11
+ require 'manband/workflow.rb'
12
+ require 'manband/flowconfig.rb'
13
+
14
+ # Library to launch new workflows
15
+ class BandManager
16
+
17
+ @@log = Logger.new(STDOUT)
18
+ @@log.level = Logger::DEBUG
19
+
20
+
21
+ # Load workflow file
22
+ def self.load(wfile)
23
+ if !File.exists?(wfile)
24
+ @@log.error "Workflow file "+wfile+" does not exist!"
25
+ return nil
26
+ end
27
+ begin
28
+ workflow = YAML.load_file(wfile)
29
+ rescue
30
+ @@log.error "Error while load file"
31
+ return nil
32
+ end
33
+ return workflow
34
+ end
35
+
36
+ # Add a new workflow based on input workflow object
37
+ # wfile: workflow input file
38
+ # var: optional runtime variables
39
+ # uid: optional user id
40
+ # debug: activate debug mode, do not execute the commands
41
+ # @return workflow id
42
+ def self.add(wfile, var = [], uid='admin', bucket='manband' , debug=false)
43
+ rvariables = Hash.new
44
+ #fworkflow = YAML.load_file(wfile)
45
+ fworkflow = self.load(wfile)
46
+ if fworkflow==nil
47
+ return nil
48
+ end
49
+ terminals = 0
50
+ fworkflow["workflow"].each do |node|
51
+ if node[1]["command"]!=nil
52
+ # First init with vars defined in workflow
53
+ exprs = node[1]["command"].scan(/#var\.(.*?)#/)
54
+ if exprs.length > 0
55
+ for reg in 0..exprs.length-1
56
+ rvariables[exprs[reg][0]] = nil
57
+ end
58
+ end
59
+ if node[1]["next"]==nil
60
+ # This is a terminal node
61
+ terminals += 1
62
+ @@log.debug "terminal: "+node[0]
63
+ end
64
+ end
65
+ end
66
+ @@log.debug "nb terminals: "+terminals.to_s
67
+
68
+
69
+ if var!=nil
70
+ var.each do |rvariable|
71
+ rvardef = rvariable.split('=')
72
+ rvariables[rvardef[0]]=rvardef[1]
73
+ end
74
+ @@log.debug "Using runtime variables: "+rvariables.to_json
75
+ end
76
+
77
+ workflow = WorkFlow.new(:uid => uid , :name => fworkflow["workflow"]["name"], :description => fworkflow["workflow"]["description"], :created_at => Time.now, :file => wfile, :terminals => terminals, :status => STATUS_NEW, :workdir => FlowConfig.getjobdir(), :vars => rvariables.to_json, :bucket => bucket)
78
+
79
+ err =workflow.save
80
+
81
+ if err == false
82
+ return nil
83
+ end
84
+
85
+ instances = 0
86
+ regexp = nil
87
+ # Check if workflow need to be run for many files
88
+ if fworkflow["workflow"]["root"]["url"]!=nil
89
+ nodepath = fworkflow["workflow"]["root"]["url"]
90
+ if fworkflow["workflow"]["root"]["regexp"]!=nil
91
+ regexp = Regexp.new(fworkflow["workflow"]["root"]["regexp"])
92
+ end
93
+ filelist = Array.new
94
+ Dir.new(nodepath).entries.each do |n|
95
+ @@log.debug "Test file "+n
96
+ if (regexp==nil || regexp.match(n)) && File.file?(nodepath+"/"+n)
97
+ @@log.debug "New sub workflow"
98
+ instances += 1
99
+ subworkflow = WorkFlow.new(:uid => uid , :name => fworkflow["workflow"]["name"], :description => fworkflow["workflow"]["description"], :created_at => Time.now, :file => wfile, :terminals => terminals, :status => STATUS_NEW, :workdir => FlowConfig.getjobdir(), :vars => rvariables.to_json, :bucket => bucket, :parent => workflow.id)
100
+ err = subworkflow.save
101
+ if !File.exists?(nodepath+"/root")
102
+ FileUtils.mkdir_p subworkflow.workdir+"/root"
103
+ end
104
+ File.symlink(nodepath+"/"+n,subworkflow.workdir+"/root/"+n)
105
+ @@log.debug("Add new sub workflow "+subworkflow.id.to_s)
106
+ end
107
+ end
108
+ end
109
+ @@log.debug "Number of instances: "+instances.to_s
110
+ workflow.update(:instances => instances)
111
+
112
+
113
+ return workflow.id
114
+ end
115
+
116
+ # Start a workflow in state NEW
117
+ # In debug mode sends a OP_SKIP instead of OP_START
118
+ def self.start(id,debug=false)
119
+ workflow = WorkFlow.get(id)
120
+ wlist = Array.new
121
+ wflows = WorkFlow.all(:parent => id)
122
+ if wflows == nil || wflows.length==0
123
+ wlist.push(id)
124
+ else
125
+ wflows.each do |wflow|
126
+ wlist.push wflow.id
127
+ end
128
+ end
129
+
130
+ @@log.debug "Execute workflow list: "+wlist.to_s
131
+ wlist.each do |wflow|
132
+ workflow = WorkFlow.get(wflow)
133
+ rootjob = Job.new(:wid => workflow.id, :node => "root", :command => "", :status => STATUS_NEW, :instances => 0, :maxinstances => 0, :workdir => '')
134
+ rootjob.save
135
+ workflow.parse('root',rootjob.id)
136
+ # Request workflow management
137
+ msg = '{ "id" : "'+workflow.id.to_s+'", "root" : "'+rootjob.id.to_s+'"}'
138
+ if debug
139
+ Minion.enqueue("manband.master", { "operation" => OP_SKIP, "msg" => msg })
140
+ else
141
+ Minion.enqueue("manband.master", { "operation" => OP_START, "msg" => msg })
142
+ end
143
+ end
144
+ end
145
+
146
+ # Execute a workflow from an other one
147
+ def self.launchclone(id)
148
+ workflow = WorkFlow.get(id)
149
+ newworkflow = WorkFlow.new(:uid => workflow.uid , :name => workflow.name, :description => workflow.description, :created_at => Time.now, :file => workflow.file, :terminals => workflow.terminals, :status => STATUS_NEW, :workdir => FlowConfig.getjobdir(), :vars => workflow.vars, :bucket => workflow.bucket)
150
+ newworkflow.save
151
+ rootjob = Job.new(:wid => newworkflow.id, :node => "root", :command => "", :status => STATUS_NEW, :instances => 0, :maxinstances => 0, :workdir => '')
152
+ rootjob.save
153
+ newworkflow.parse('root',rootjob.id)
154
+ jobmsg = '{ "id" : "'+newworkflow.id.to_s+'", "root" : "'+rootjob.id.to_s+'"}'
155
+ job = Job.new(:wid => newworkflow.id, :node => "fake", :command => "", :status => STATUS_FAKE, :instances => 0, :workdir => '')
156
+ job.sendmessage(OP_START,jobmsg)
157
+ return newworkflow.id
158
+ end
159
+
160
+ # Launch a new workflow based on input workflow object
161
+ # wfile: workflow input file
162
+ #
163
+ # uid: optional user id
164
+ # debug: activate debug mode, do not execute the commands
165
+ # @return workflow id
166
+ def self.launch(wfile, var = [], uid='admin', bucket='manband' , debug=false)
167
+ workflow = self.add(wfile,var,uid,bucket,debug)
168
+ if workflow==nil
169
+ return nil
170
+ end
171
+ @@log.debug "Start workflow "+workflow.to_s
172
+ self.start(workflow,debug)
173
+ return workflow
174
+ end
175
+
176
+ end
@@ -0,0 +1,87 @@
1
+ require 'uuid'
2
+ require 'data_mapper'
3
+ require 'dm-migrations'
4
+
5
+ STATUS_SKIP = -2
6
+ STATUS_FAKE = -1
7
+ STATUS_NEW = 0
8
+ STATUS_RUNNING = 1
9
+ STATUS_OVER = 2
10
+ STATUS_SUSPEND = 3 # SUSPEND mode for a job means a job is over and paused
11
+ STATUS_ERROR = 4
12
+
13
+ STORE_NO = -1
14
+ STORE_DO = 0
15
+ STORE_RUN = 1
16
+ STORE_OVER = 2
17
+ STORE_ERROR = 4
18
+
19
+ OP_NEW = "new"
20
+ OP_START = "start"
21
+ OP_FINISH = "finish"
22
+ OP_ERROR = "error"
23
+ OP_WSUSPEND = "workflowsuspend"
24
+ OP_JSUSPEND = "jobsuspend"
25
+ OP_RUN = "run"
26
+ OP_WRESUME = "workflowresume"
27
+ OP_JRESUME = "jobresume"
28
+ OP_SKIP = "skip" # For if conditions, go through jobs but skip execution
29
+ OP_CLEAN = "clean" # Delete work dirs of workflow
30
+ OP_DESTROY = "destroy" # Delete workflow and its work dirs
31
+ OP_STORE = "store" # Store result to S3
32
+
33
+ # Base config
34
+ class FlowConfig
35
+ @@workdir='/tmp'
36
+ @@s3host = 'genokvm4.genouest.org'
37
+ @@s3port = '8773'
38
+ @@s3path = '/services/Walrus'
39
+
40
+ @@uploaddir = '/tmp/upload'
41
+
42
+ def self.uploaddir
43
+ return @@uploaddir
44
+ end
45
+
46
+ def self.s3host
47
+ return @@s3host
48
+ end
49
+
50
+ def self.sets3(host,port= '8773',path='/services/Walrus')
51
+ @@s3host = host
52
+ @@s3port = port
53
+ @@s3path = path
54
+ end
55
+
56
+ def self.s3port
57
+ return @@s3port
58
+ end
59
+
60
+ def self.s3path
61
+ return @@s3path
62
+ end
63
+
64
+ def self.workdir
65
+ return @@workdir
66
+ end
67
+
68
+ def self.setuploaddir(directory)
69
+ @@uploaddir = directory
70
+ end
71
+
72
+ def self.setworkdir(directory)
73
+ @@workdir=directory
74
+ end
75
+
76
+ def self.getjobdir(workflowdir = nil)
77
+ uuid = UUID.new
78
+ if workflowdir == nil
79
+ return @@workdir+"/"+uuid.generate.to_s
80
+ else
81
+ return workflowdir+"/"+uuid.generate.to_s
82
+ end
83
+ end
84
+
85
+
86
+ end
87
+
@@ -0,0 +1,282 @@
1
+ require 'data_mapper'
2
+ require 'dm-migrations'
3
+ require 'yaml'
4
+ require 'manband/workflow.rb'
5
+ require 'manband/flowconfig.rb'
6
+ require 'mb-minion'
7
+ require 'logger'
8
+
9
+ include Minion
10
+
11
+ #DataMapper.setup(:default, 'sqlite:///home/osallou/Desktop/genflow/project.db')
12
+ DataMapper.setup(:default, ENV['MYSQL_URL'])
13
+
14
+ #TODO manage IF operation and resutl.
15
+ # Exit code = 0 selects first node, other code select second node
16
+ # Should inform with a finish "code: exitcode" and manage it in workflowhandler to RUN selected node and SKIP other node
17
+
18
+ class JobLink
19
+ include DataMapper::Resource
20
+
21
+ property :id, Serial
22
+ property :wid, Integer # Workflow id
23
+ property :from, Integer # Job origin id
24
+ property :to, Integer # Job destination id
25
+
26
+ end
27
+
28
+ class Job
29
+ include DataMapper::Resource
30
+
31
+ @@debug = false
32
+
33
+ def self.debug(mode)
34
+ @@debug = mode
35
+ end
36
+
37
+ @@log = Logger.new(STDOUT)
38
+ @@log.level = Logger::DEBUG
39
+
40
+ property :id, Serial
41
+ property :wid, Integer # id of the worflow
42
+ property :node, String # id of the node
43
+ property :command, Text # Array of command lines to execute (1 per matching regexp)
44
+ property :status, Integer # is job in execution, finished or failed?
45
+ property :instances , Integer # Current number of job instance for this command according to pattern
46
+ property :maxinstances, Integer # Number of instances launched
47
+ property :workdir, Text # working dir for this job
48
+ property :handler, String # job handler managing this job
49
+ property :queue, String, :default => "" # Queue to handle the job
50
+ property :error, Text, :default => "[]" # JSON Array of error, per instance
51
+ property :store, Integer, :default => STORE_NO # Storage status
52
+
53
+ # Update job status and launch the command
54
+ def run(curhandler,instance=0)
55
+ # Send run command, possibly multiple ones according to pattern
56
+ # Command would send a message when over
57
+ curjob = Job.get(@id)
58
+ if @status != STATUS_SUSPEND
59
+ @status=STATUS_RUNNING
60
+ curjob.update(:status => STATUS_RUNNING, :handler => curhandler.to_s)
61
+ else
62
+ curjob.update(:handler => curhandler.to_s)
63
+ end
64
+ workdir = curjob.workdir
65
+ if instance>0
66
+ workdir = workdir + "/node" + instance.to_s
67
+ end
68
+ err = runcommand(workdir,instance)
69
+ if err == nil || err == false
70
+ # An error occured
71
+ jobmsg = '{ "workflow" : "'+@wid.to_s+'" , "node" : "'+@node+'", "id" : "'+@id.to_s+'", "handler" : "'+curhandler.to_s+'", "instance" : "'+instance.to_s+'" }'
72
+ sendmessage(OP_ERROR,jobmsg)
73
+ else
74
+ if curjob.store == STORE_DO || curjob.store == STORE_ERROR
75
+ workflow = WorkFlow.get(curjob.wid)
76
+ jobmsg = '{ "id" : "'+curjob.id.to_s+'", "bucket" : "'+workflow.bucket+'" }'
77
+ sendmessage(OP_STORE,jobmsg)
78
+ end
79
+ jobmsg = '{ "workflow" : "'+@wid.to_s+'" , "node" : "'+@node+'", "id" : "'+@id.to_s+'", "handler" : "'+curhandler.to_s+'", "instance" : "'+instance.to_s+'" }'
80
+ sendmessage(OP_FINISH,jobmsg)
81
+ end
82
+ end
83
+
84
+ # Skip treatment, just answer
85
+ def skip(curhandler)
86
+ curjob = Job.get(@id)
87
+ curjob.update(:handler => curhandler.to_s)
88
+ jobmsg = '{ "workflow" : "'+@wid.to_s+'" , "node" : "'+@node+'", "id" : "'+@id.to_s+'", "handler" : "'+curhandler.to_s+'" }'
89
+ sendmessage(OP_FINISH,jobmsg)
90
+ end
91
+
92
+ # Execute locally the command
93
+ def runcommand(workdir,instance)
94
+ initcmd = "AMQP_URL="" && MYSQL_URL="" && mkdir -p "+workdir+" && cd "+workdir+" && WORKDIR="+workdir+" && "
95
+ curjob = Job.get(@id)
96
+ command = JSON.parse(curjob.command)
97
+ if command.length > 1
98
+ system(initcmd+command[0])
99
+ else
100
+ system(initcmd+command[instance-1])
101
+ end
102
+ end
103
+
104
+ # Change instance counter
105
+ # If workflow is in suspend status, suspend the job at the end of its treatment
106
+ def finish
107
+ if @status == STATUS_SKIP
108
+ return
109
+ end
110
+ workflow = WorkFlow.get(@wid)
111
+ if workflow.status == STATUS_SUSPEND
112
+ @status= STATUS_SUSPEND
113
+ DataMapper.repository(:default).adapter.execute('UPDATE jobs SET instances = instances + 1, status = '+STATUS_SUSPEND+' WHERE id='+@id.to_s);
114
+ curjob = Job.get(@id)
115
+ #curjob = Job.get(@id)
116
+ #instancesplusone = curjob.instances + 1
117
+ #curjob.update(:status => STATUS_SUSPEND, :instances => instancesplusone)
118
+ elsif @status == STATUS_SUSPEND
119
+ # Job set in suspend mode (breakpoint), keep in this status
120
+ #instancesplusone = curjob.instances + 1
121
+ #curjob.update(:instances => instancesplusone)
122
+ DataMapper.repository(:default).adapter.execute('UPDATE jobs SET instances = instances + 1 WHERE id='+@id.to_s);
123
+ curjob = Job.get(@id)
124
+ else
125
+ @status= STATUS_OVER
126
+ DataMapper.repository(:default).adapter.execute('UPDATE jobs SET instances = instances + 1 WHERE id='+@id.to_s);
127
+ # TODO add a lock for update
128
+ curjob = Job.get(@id)
129
+ if curjob.instances >= curjob.maxinstances
130
+ curjob.update(:status => STATUS_OVER)
131
+ else
132
+ end
133
+ end
134
+ end
135
+
136
+ # Compare sintance counter to max instances
137
+ def isover?
138
+ if @status == STATUS_OVER
139
+ return true
140
+ elsif @status == STATUS_SKIP
141
+ return true
142
+ else
143
+ return false
144
+ end
145
+ end
146
+
147
+ # Resume a suspended job
148
+ # Update its status and run next jobs
149
+ def resume
150
+ workflow = WorkFlow.get(@wid)
151
+ # if status = suspend, continue to next
152
+ # if status = error, restart job
153
+ if @status == STATUS_SUSPEND
154
+ @@log.debug "Resume from suspended job "+@id.to_s+" for workflow "+@wid.to_s
155
+ @status= STATUS_OVER
156
+ curjob = Job.get(@id)
157
+ curjob.update(:status => STATUS_OVER)
158
+ runnext
159
+ elsif @status == STATUS_ERROR
160
+ @@log.debug "Resume from error job "+@id.to_s+" for workflow "+@wid.to_s
161
+ @status = STATUS_RUNNING
162
+ curjob = Job.get(@id)
163
+ commands = workflow.getnodecommand(curjob.node)
164
+ if commands == nil
165
+ @@log.error "Could not get command for node "+curjob.node
166
+ return
167
+ end
168
+ # Restart whole job, use new tmp dir, reset errors
169
+ curjob.update(:status => STATUS_RUNNING, :command => commands.to_json, :instances => 0, :workdir => FlowConfig.getjobdir(workflow.workdir), :error => "[]", :maxinstances => commands.length)
170
+ i = 0
171
+ if commands.length > 1
172
+ i = 1
173
+ end
174
+ commands.each do |command|
175
+ jobmsg = '{ "workflow" : "'+@wid.to_s+'" , "node" : "'+@node+'", "id" : "'+@id.to_s+'", "instance" : "'+i.to_s+'" }'
176
+ i += 1
177
+ # send message
178
+ sendmessage(OP_RUN,jobmsg,curjob.queue)
179
+ end
180
+ end
181
+ end
182
+
183
+ def error!(instance = 0)
184
+ @status= STATUS_ERROR
185
+ curjob = Job.get(@id)
186
+ err = JSON.parse(curjob.error)
187
+ err.push(instance)
188
+ curjob.update(:status => STATUS_ERROR, :error => err.to_json)
189
+ workflow = WorkFlow.get(@wid)
190
+ workflow.update(:status => STATUS_ERROR)
191
+ end
192
+
193
+ # Run next jobs
194
+ def runnext(skip=false)
195
+ workflow = WorkFlow.get(@wid)
196
+ # Are all previous jobs over?
197
+ previous = JobLink.all(:to => @id)
198
+ #nexts = workflow.getnextjobs(@node)
199
+ if previous!=nil
200
+ @@log.debug "Checking previous jobs: "+previous.length.to_s
201
+ end
202
+ if previous == nil || previous.length==0
203
+ # This is fine, must be a root node
204
+ elsif previous.length==1
205
+ # Only 1 parent. As we got the message, previous is over
206
+ else
207
+ # Check each job
208
+ previous.each do |link|
209
+ pjob = Job.get(link.from)
210
+ if pjob.status != STATUS_OVER
211
+ @@log.debug "At least one previous job is not over ("+pjob.id.to_s+"), wait for next message"
212
+ return false
213
+ end
214
+ end
215
+ @@log.debug "All previous jobs are over, continue...."
216
+ end
217
+
218
+ # Look at next jobs
219
+ nexts = JobLink.all(:from => @id)
220
+ #nexts = workflow.getnextjobs(@node)
221
+ if nexts == nil || nexts.length==0
222
+ workflow.isover?
223
+ return true
224
+ end
225
+ nexts.each do |link|
226
+ job = Job.get(link.to)
227
+ #nexts.each do |nextnode|
228
+ #nextnode.strip!
229
+ #queue = workflow.getnodequeue(nextnode)
230
+ if skip==true || @status == STATUS_SKIP
231
+ jobstatus = STATUS_SKIP
232
+ operation = OP_SKIP
233
+ commands = [ "skipcommand"]
234
+ else
235
+ jobstatus = STATUS_NEW
236
+ operation = OP_RUN
237
+ commands = workflow.getnodecommand(job.node)
238
+ end
239
+ if commands == nil
240
+ @@log.error "Could not get command for node "+job.id.to_s
241
+ job.update(:command => "", :status => STATUS_ERROR, :instances => 0, :maxinstances => 0, :workdir => FlowConfig.getjobdir(workflow.workdir))
242
+ workflow.update(:status => STATUS_ERROR)
243
+ return
244
+ end
245
+ @@log.debug "New job command: "+commands.to_s
246
+ job.update(:command => commands.to_json, :status => jobstatus, :instances => 0, :maxinstances => commands.length, :workdir => FlowConfig.getjobdir(workflow.workdir))
247
+ #err = job.save
248
+ i=0
249
+ # If multiple instances, differenciate and start at 1
250
+ if commands.length > 1
251
+ i = 1
252
+ end
253
+ commands.each do |command|
254
+ jobmsg = '{ "workflow" : "'+job.wid.to_s+'" , "node" : "'+job.node+'", "id" : "'+job.id.to_s+'", "instance" : "'+i.to_s+'" }'
255
+ i += 1
256
+ # send message
257
+ sendmessage(operation,jobmsg,job.queue)
258
+ end
259
+ end
260
+ end
261
+
262
+ def sendmessage(operation,msg,jobqueue='')
263
+ queue = "manband.master"
264
+ if operation == OP_RUN || operation == OP_SKIP || operation == OP_DESTROY || operation == OP_CLEAN|| operation == OP_STORE
265
+ queue = "manband.node"+jobqueue
266
+ end
267
+ if queue != nil
268
+ Minion.enqueue(queue, { "operation" => operation, "msg" => msg })
269
+ if @@debug == true
270
+ bmsg = BandMessage.new(:wid => @wid, :message => '{ "operation" => '+operation+', "msg" => '+msg+' }' )
271
+ bmsg.save
272
+ end
273
+ end
274
+ end
275
+
276
+ end
277
+
278
+
279
+
280
+ DataMapper.finalize
281
+ DataMapper.auto_upgrade!
282
+
@@ -0,0 +1,77 @@
1
+ require 'aws/s3.rb'
2
+ require 'logger'
3
+ require 'zip/zip'
4
+ require 'find'
5
+ require 'fileutils'
6
+
7
+ require 'manband/flowconfig.rb'
8
+ require 'manband/user.rb'
9
+
10
+
11
+ class Storeband
12
+
13
+ @@log = Logger.new(STDOUT)
14
+ @@log.level = Logger::DEBUG
15
+
16
+ # Compress a directory
17
+ def compress(dir,name)
18
+ Zip::ZipFile.open(name, Zip::ZipFile::CREATE)do |zipfile|
19
+ Find.find(dir) do |path|
20
+ Find.prune if File.basename(path)[0] == ?.
21
+ dest = /#{dir}\/(\w.*)/.match(path)
22
+ # Skip files if they exists
23
+ begin
24
+ zipfile.add(dest[1],path) if dest
25
+ rescue Zip::ZipEntryExistsError
26
+ end
27
+ end
28
+ end
29
+ end
30
+
31
+ # Store a job workdir to S3
32
+ def store(job,uid,bucket="manband")
33
+ user = User.get(uid)
34
+ if user == nil
35
+ job.update(:store => STORE_ERROR)
36
+ return false
37
+ end
38
+ if user.s3_access.nil? || user.s3_secret.nil?
39
+ job.update(:store => STORE_ERROR)
40
+ return false
41
+ end
42
+ job.update(:store => STORE_RUN)
43
+ zipfile = "manband-"+job.wid.to_s+"-"+job.node+".zip"
44
+ if File.exists?(FlowConfig.workdir+"/"+zipfile)
45
+ File.delete(FlowConfig.workdir+"/"+zipfile)
46
+ end
47
+ begin
48
+ # compress
49
+ @@log.debug "Compress to "+FlowConfig.workdir+"/"+zipfile
50
+ compress(job.workdir, FlowConfig.workdir+"/"+zipfile)
51
+ # Send file
52
+ sends3(FlowConfig.workdir+"/"+zipfile,bucket,user.s3_access,user.s3_secret)
53
+ rescue
54
+ @@log.error "An error occured during S3 operation: "+job.id.to_s
55
+ job.update(:store => STORE_ERROR)
56
+ return false
57
+ end
58
+ job.update(:store => STORE_OVER)
59
+ end
60
+
61
+ def sends3(name,bucket,access,secret)
62
+ @@log.debug "connect to s3"
63
+ AWS::S3::Base.establish_connection!(
64
+ :access_key_id => access,
65
+ :secret_access_key => secret,
66
+ :server => FlowConfig.s3host,
67
+ :port => FlowConfig.s3port,
68
+ :path => FlowConfig.s3path
69
+ )
70
+ fbase = File.basename(name)
71
+
72
+ AWS::S3::S3Object.store(fbase, open(name), bucket)
73
+ @@log.debug "object stored"
74
+ end
75
+
76
+
77
+ end
@@ -0,0 +1,47 @@
1
+ require 'uuid'
2
+ require 'data_mapper'
3
+ require 'dm-migrations'
4
+
5
+ require 'manband/flowconfig.rb'
6
+
7
+ #DataMapper.setup(:default, 'sqlite:///tmp/project.db')
8
+ DataMapper.setup(:default, ENV['MYSQL_URL'])
9
+
10
+ class User
11
+ include DataMapper::Resource
12
+
13
+ @@admin = 'admin'
14
+
15
+ property :login, String, :key => true # User login
16
+ property :password, String # USer password
17
+ property :s3_access, String # User id key
18
+ property :s3_secret, String # Secret key
19
+
20
+ def self.init
21
+ admin = User.get(@@admin)
22
+ if admin == nil
23
+ uuid = UUID.new
24
+ admin = User.new(:login => @@admin, :password => Digest::SHA1.hexdigest('admin'))
25
+ admin.save
26
+ end
27
+ end
28
+
29
+ def authenticate(password)
30
+ shapwd = Digest::SHA1.hexdigest(password)
31
+ if self.password == shapwd
32
+ return true
33
+ end
34
+ return false;
35
+ end
36
+
37
+ def self.admin
38
+ return @@admin
39
+ end
40
+
41
+ end
42
+
43
+ DataMapper.finalize
44
+ DataMapper.auto_upgrade!
45
+
46
+ User.init()
47
+