pampa_workers 0.0.39 → 1.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/client.rb +0 -56
- data/lib/mybotprocess.rb +18 -20
- data/lib/mylocalprocess.rb +13 -7
- data/lib/myparentprocess.rb +3 -2
- data/lib/myprocess.rb +9 -8
- data/lib/myremoteprocess.rb +25 -15
- data/lib/pampa-local.rb +3 -3
- data/lib/pampa_workers.rb +4 -8
- data/lib/remoteworker.rb +1 -1
- data/lib/worker.rb +97 -188
- metadata +3 -63
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 69e059adc568b1a74eddf7929d917d867965392c
|
4
|
+
data.tar.gz: 100540adc7fb2e14451c199beb03238275c93da4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 37e419cb21a3557761e6a088db8bcad6d49ce918d02aeb32a7070faf99d371b70a0dc13eb41aa773e189eb4be67f7aa22647501dabf61723f28c67c142fd6583
|
7
|
+
data.tar.gz: 7b00e1be1126b4f8ca316488bfa0ac2f79d6080c90210f45275d5c8f60217d71c3d44826fdedce0d202f17fc17a4815d8bc3fe852630ae283694ebd5ae0bae06
|
data/lib/client.rb
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
require 'invoicing_payments_processing'
|
2
1
|
require 'simple_host_monitoring'
|
3
2
|
require_relative './user'
|
4
3
|
require_relative './role'
|
@@ -34,62 +33,7 @@ module BlackStack
|
|
34
33
|
def own_hosts()
|
35
34
|
BlackStack::LocalHost.where(:id_client=>self.id, :delete_time=>nil)
|
36
35
|
end
|
37
|
-
|
38
|
-
# -----------------------------------------------------------------------------------------
|
39
|
-
# Storage:
|
40
|
-
#
|
41
|
-
#
|
42
|
-
# -----------------------------------------------------------------------------------------
|
43
|
-
|
44
|
-
# returns the location of the storage for this client
|
45
|
-
def storage_folder
|
46
|
-
"#{BlackStack::Pampa::storage_folder}/#{self.id.to_guid}"
|
47
|
-
end
|
48
|
-
|
49
|
-
def storage_sub_folder(name)
|
50
|
-
"#{BlackStack::Pampa::storage_folder}/#{self.id.to_guid}/#{name}"
|
51
|
-
end
|
52
|
-
|
53
|
-
# returns the max allowed KB in the storage for this client
|
54
|
-
def storage_total_kb
|
55
|
-
# TODO: get this parameter from the paid invoces
|
56
|
-
1024*1024 # 1 GB
|
57
|
-
end
|
58
|
-
|
59
|
-
# returns the max allowed KB in the storage for this client
|
60
|
-
def storage_used_kb
|
61
|
-
path = self.storage_folder
|
62
|
-
fso = WIN32OLE.new('Scripting.FileSystemObject')
|
63
|
-
folder = fso.GetFolder(path)
|
64
|
-
(folder.size.to_f / 1024.to_f)
|
65
|
-
end
|
66
|
-
|
67
|
-
# returns the free available KB in the storage for this client
|
68
|
-
def storage_free_kb
|
69
|
-
total = self.storage_total_kb
|
70
|
-
used = self.storage_used_kb
|
71
|
-
total - used
|
72
|
-
end
|
73
36
|
|
74
|
-
# si el cliente no tiene creado el storage, entonces se lo crea, carpeta por carpeta, ferificando cada una si no existe ya.
|
75
|
-
def create_storage
|
76
|
-
folder = self.storage_folder
|
77
|
-
Dir.mkdir BlackStack::Pampa::storage_folder if Dir[BlackStack::Pampa::storage_folder].size==0
|
78
|
-
if Dir[folder].size==0
|
79
|
-
Dir.mkdir folder
|
80
|
-
|
81
|
-
BlackStack::Pampa::storage_sub_folders.each { |name|
|
82
|
-
s = "#{folder}/#{name}"
|
83
|
-
Dir.mkdir s if Dir[s].size==0
|
84
|
-
}
|
85
|
-
end
|
86
|
-
end
|
87
|
-
|
88
|
-
# retorna la primera division habilitada a la que pertenezca este cliente
|
89
|
-
def division
|
90
|
-
return self.users.first.division
|
91
|
-
end
|
92
|
-
|
93
37
|
# -----------------------------------------------------------------------------------------
|
94
38
|
# Configuration
|
95
39
|
#
|
data/lib/mybotprocess.rb
CHANGED
@@ -55,14 +55,14 @@ module BlackStack
|
|
55
55
|
sError = parsed['status']
|
56
56
|
end
|
57
57
|
rescue Errno::ECONNREFUSED => e
|
58
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
58
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
59
59
|
rescue => e2
|
60
|
-
sError = "Exception: " + e2.
|
60
|
+
sError = "Exception: " + e2.to_console
|
61
61
|
end
|
62
62
|
end # while
|
63
63
|
|
64
64
|
if (bSuccess==false)
|
65
|
-
raise
|
65
|
+
raise BlackStack::Netting::ApiCallException.new(sError)
|
66
66
|
end
|
67
67
|
|
68
68
|
return lnuser
|
@@ -89,22 +89,21 @@ module BlackStack
|
|
89
89
|
sError = parsed['status']
|
90
90
|
end
|
91
91
|
rescue Errno::ECONNREFUSED => e
|
92
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
92
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
93
93
|
rescue => e2
|
94
|
-
sError = "Exception" + e2.
|
94
|
+
sError = "Exception:" + e2.to_console
|
95
95
|
end
|
96
96
|
end # while
|
97
97
|
|
98
98
|
if (bSuccess==false)
|
99
|
-
raise
|
99
|
+
raise BlackStack::Netting::ApiCallException.new(sError)
|
100
100
|
end
|
101
101
|
|
102
102
|
return lnuser
|
103
103
|
end # getLnUser()
|
104
104
|
|
105
105
|
#
|
106
|
-
def notifyLnUserUrl(id_lnuser,
|
107
|
-
=begin
|
106
|
+
def notifyLnUserUrl(id_lnuser, profile_url)
|
108
107
|
nTries = 0
|
109
108
|
parsed = nil
|
110
109
|
bSuccess = false
|
@@ -116,7 +115,7 @@ module BlackStack
|
|
116
115
|
res = BlackStack::Netting::call_post(url,
|
117
116
|
{:api_key => BlackStack::Pampa::api_key,
|
118
117
|
'id_lnuser' => id_lnuser,
|
119
|
-
'url' =>
|
118
|
+
'url' => profile_url,}
|
120
119
|
)
|
121
120
|
parsed = JSON.parse(res.body)
|
122
121
|
|
@@ -126,16 +125,15 @@ module BlackStack
|
|
126
125
|
sError = parsed['status']
|
127
126
|
end
|
128
127
|
rescue Errno::ECONNREFUSED => e
|
129
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
128
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
130
129
|
rescue => e2
|
131
|
-
sError = "Exception" + e2.
|
130
|
+
sError = "Exception:" + e2.to_console
|
132
131
|
end
|
133
132
|
end # while
|
134
133
|
|
135
134
|
if (bSuccess==false)
|
136
135
|
raise "#{sError}"
|
137
136
|
end
|
138
|
-
=end
|
139
137
|
end # notifyLnUserStatus
|
140
138
|
|
141
139
|
#
|
@@ -161,9 +159,9 @@ module BlackStack
|
|
161
159
|
sError = parsed['status']
|
162
160
|
end
|
163
161
|
rescue Errno::ECONNREFUSED => e
|
164
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
162
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
165
163
|
rescue => e2
|
166
|
-
sError = "Exception" + e2.
|
164
|
+
sError = "Exception:" + e2.to_console
|
167
165
|
end
|
168
166
|
end # while
|
169
167
|
|
@@ -196,9 +194,9 @@ module BlackStack
|
|
196
194
|
sError = parsed['status']
|
197
195
|
end
|
198
196
|
rescue Errno::ECONNREFUSED => e
|
199
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
197
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
200
198
|
rescue => e2
|
201
|
-
sError = "Exception" + e2.
|
199
|
+
sError = "Exception:" + e2.to_console
|
202
200
|
end
|
203
201
|
end # while
|
204
202
|
|
@@ -256,9 +254,9 @@ module BlackStack
|
|
256
254
|
sError = parsed['status']
|
257
255
|
end
|
258
256
|
rescue Errno::ECONNREFUSED => e
|
259
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
257
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
260
258
|
rescue => e2
|
261
|
-
sError = "Exception" + e2.
|
259
|
+
sError = "Exception:" + e2.to_console
|
262
260
|
end
|
263
261
|
end # while
|
264
262
|
|
@@ -330,9 +328,9 @@ module BlackStack
|
|
330
328
|
sError = parsed['status']
|
331
329
|
end
|
332
330
|
rescue Errno::ECONNREFUSED => e
|
333
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
331
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
334
332
|
rescue => e2
|
335
|
-
sError = "Exception" + e2.
|
333
|
+
sError = "Exception:" + e2.to_console
|
336
334
|
end
|
337
335
|
end # while
|
338
336
|
|
data/lib/mylocalprocess.rb
CHANGED
@@ -43,13 +43,13 @@ module BlackStack
|
|
43
43
|
w.last_ping_time = now()
|
44
44
|
w.name = self.fullWorkerName
|
45
45
|
w.assigned_process = self.assigned_process
|
46
|
-
w.
|
46
|
+
w.id_client = self.id_client
|
47
47
|
w.division_name = self.division_name
|
48
48
|
w.save
|
49
49
|
end
|
50
50
|
if (w!=nil)
|
51
51
|
w.assigned_process = self.assigned_process
|
52
|
-
w.
|
52
|
+
w.id_client = self.id_client
|
53
53
|
w.division_name = self.division_name
|
54
54
|
w.id_division = self.id_division
|
55
55
|
w.save
|
@@ -65,22 +65,28 @@ module BlackStack
|
|
65
65
|
BlackStack::Pampa::api_protocol,
|
66
66
|
BlackStack::Pampa::api_domain,
|
67
67
|
BlackStack::Pampa::api_port,
|
68
|
-
BlackStack::Pampa::api_key
|
68
|
+
BlackStack::Pampa::api_key,
|
69
|
+
self.id_client # ID of the client that has this thread assigned
|
69
70
|
)
|
70
71
|
|
71
72
|
# announcing my in the log
|
72
73
|
logger.log "Child process is alive!"
|
73
74
|
|
74
75
|
# obtengo los parametros del worker
|
75
|
-
logger.logs "Update from central (1)... "
|
76
|
+
logger.logs "Update from central (1-local)... "
|
76
77
|
self.get
|
77
78
|
logger.done
|
78
79
|
|
79
80
|
# actualizo los datos de este worker (parent process)
|
80
|
-
logger.logs "Update worker (1)... "
|
81
|
+
logger.logs "Update worker (1-local)... "
|
81
82
|
self.updateWorker
|
82
83
|
logger.done
|
83
|
-
|
84
|
+
|
85
|
+
# actualizo los datos de este worker (parent process)
|
86
|
+
logger.logs "Switch logger id_client (log folder may change)... "
|
87
|
+
self.logger.id_client = self.id_client
|
88
|
+
logger.done
|
89
|
+
|
84
90
|
while (self.canRun?)
|
85
91
|
# reseteo en contador nested del logger
|
86
92
|
self.logger.reset()
|
@@ -88,7 +94,7 @@ module BlackStack
|
|
88
94
|
# announcing my in the log
|
89
95
|
logger.log "Going to Run Local"
|
90
96
|
logger.log "Process: #{self.assigned_process.to_s}."
|
91
|
-
logger.log "
|
97
|
+
logger.log "Client: #{(self.id_client.to_s.size==0)? 'n/a' : self.id_client.to_s}."
|
92
98
|
|
93
99
|
# obtengo la hora de inicio
|
94
100
|
start_time = Time.now
|
data/lib/myparentprocess.rb
CHANGED
@@ -12,7 +12,8 @@ module BlackStack
|
|
12
12
|
BlackStack::Pampa::api_protocol,
|
13
13
|
BlackStack::Pampa::api_domain,
|
14
14
|
BlackStack::Pampa::api_port,
|
15
|
-
BlackStack::Pampa::api_key
|
15
|
+
BlackStack::Pampa::api_key,
|
16
|
+
self.id_client # ID of the client that has this thread assigned
|
16
17
|
)
|
17
18
|
|
18
19
|
#
|
@@ -54,7 +55,7 @@ module BlackStack
|
|
54
55
|
# map response
|
55
56
|
self.id = parsed['id']
|
56
57
|
self.assigned_process = parsed['assigned_process']
|
57
|
-
self.
|
58
|
+
self.id_client = parsed['id_client']
|
58
59
|
self.id_division = parsed['id_division']
|
59
60
|
self.division_name = parsed['division_name']
|
60
61
|
self.ws_url = parsed['ws_url']
|
data/lib/myprocess.rb
CHANGED
@@ -4,7 +4,7 @@ module BlackStack
|
|
4
4
|
DEFAULT_MINIMUM_ENLAPSED_SECONDS = 60
|
5
5
|
|
6
6
|
attr_accessor :assigned_process_changed, :assigned_division_changed, :verify_configuration
|
7
|
-
attr_accessor :logger, :id, :worker_name, :division_name, :minimum_enlapsed_seconds, :assigned_process, :
|
7
|
+
attr_accessor :logger, :id, :worker_name, :division_name, :minimum_enlapsed_seconds, :assigned_process, :id_client, :id_division, :ws_url, :ws_port
|
8
8
|
attr_accessor :email, :password
|
9
9
|
|
10
10
|
# constructor
|
@@ -100,15 +100,15 @@ module BlackStack
|
|
100
100
|
end
|
101
101
|
end # hello()
|
102
102
|
|
103
|
-
# notifico mis parametros (assigned_process,
|
104
|
-
def set(new_assigned_process,
|
103
|
+
# notifico mis parametros (assigned_process, id_client) a la division asignada
|
104
|
+
def set(new_assigned_process, new_id_client)
|
105
105
|
if (self.ws_url.to_s.size > 0 && self.ws_port.to_s.size > 0)
|
106
106
|
url = "#{BlackStack::Pampa::api_protocol}://#{self.ws_url.to_s}:#{self.ws_port.to_s}/api1.3/pampa/notify.json"
|
107
107
|
res = BlackStack::Netting::call_post(url, {
|
108
108
|
'api_key' => BlackStack::Pampa::api_key,
|
109
109
|
'name' => self.fullWorkerName,
|
110
110
|
'assigned_process' => new_assigned_process,
|
111
|
-
'
|
111
|
+
'id_client' => new_id_client }.merge( BlackStack::RemoteHost.new.poll )
|
112
112
|
)
|
113
113
|
end
|
114
114
|
end
|
@@ -148,18 +148,18 @@ module BlackStack
|
|
148
148
|
end # verify_configuration
|
149
149
|
|
150
150
|
# si ya tenia asignada una division, entonces le notifico mi nueva configuracion
|
151
|
-
self.set(parsed['assigned_process'], parsed['
|
151
|
+
self.set(parsed['assigned_process'], parsed['id_client'])
|
152
152
|
|
153
153
|
self.id = parsed['id']
|
154
154
|
self.assigned_process = parsed['assigned_process']
|
155
|
-
self.
|
155
|
+
self.id_client = parsed['id_client']
|
156
156
|
self.id_division = parsed['id_division']
|
157
157
|
self.division_name = parsed['division_name']
|
158
158
|
self.ws_url = parsed['ws_url']
|
159
159
|
self.ws_port = parsed['ws_port']
|
160
160
|
|
161
161
|
# le notifico a la nueva division asignada mi nueva configuracion
|
162
|
-
self.set(parsed['assigned_process'], parsed['
|
162
|
+
self.set(parsed['assigned_process'], parsed['id_client'])
|
163
163
|
end
|
164
164
|
end # get()
|
165
165
|
|
@@ -209,7 +209,8 @@ module BlackStack
|
|
209
209
|
'api_key' => BlackStack::Pampa::api_key,
|
210
210
|
'name' => self.fullWorkerName,
|
211
211
|
'assigned_process' => self.assigned_process,
|
212
|
-
'
|
212
|
+
'id_client' => self.id_client,
|
213
|
+
'id_division' => self.id_division }.merge( BlackStack::RemoteHost.new.poll )
|
213
214
|
)
|
214
215
|
parsed = JSON.parse(res.body)
|
215
216
|
if (parsed['status'] != "success")
|
data/lib/myremoteprocess.rb
CHANGED
@@ -7,10 +7,11 @@ module BlackStack
|
|
7
7
|
attr_accessor :worker
|
8
8
|
|
9
9
|
# update worker configuration in the division
|
10
|
-
def updateWorker()
|
11
|
-
#
|
10
|
+
def updateWorker()
|
11
|
+
# creo un remote worker que manejare en este proceso remote
|
12
12
|
self.worker = BlackStack::RemoteWorker.new
|
13
13
|
# me notifico a la central. obtengo asignacion si ya la tenia
|
14
|
+
# y vuelco la configuracion al remote worker
|
14
15
|
url = "#{BlackStack::Pampa::api_url}/api1.3/pampa/get.json"
|
15
16
|
res = BlackStack::Netting::call_post(url, {
|
16
17
|
'api_key' => BlackStack::Pampa::api_key,
|
@@ -20,16 +21,19 @@ module BlackStack
|
|
20
21
|
if (parsed['status'] != BlackStack::Netting::SUCCESS)
|
21
22
|
raise parsed['status'].to_s
|
22
23
|
else
|
23
|
-
self.worker.id
|
24
|
-
self.worker.assigned_process
|
25
|
-
self.worker.
|
26
|
-
self.worker.id_division
|
27
|
-
self.worker.division_name
|
28
|
-
self.worker.ws_url
|
29
|
-
self.worker.ws_port
|
30
|
-
self.worker.division
|
31
|
-
self.worker.division.name
|
24
|
+
self.worker.id = parsed['id']
|
25
|
+
self.worker.assigned_process = parsed['assigned_process']
|
26
|
+
self.worker.id_client = parsed['id_client']
|
27
|
+
self.worker.id_division = parsed['id_division']
|
28
|
+
self.worker.division_name = parsed['division_name']
|
29
|
+
self.worker.ws_url = parsed['ws_url']
|
30
|
+
self.worker.ws_port = parsed['ws_port']
|
31
|
+
self.worker.division = BlackStack::RemoteDivision.new
|
32
|
+
self.worker.division.name = parsed['division_name']
|
32
33
|
end
|
34
|
+
# llamo al metodo de la clase padre que reporta la configuracion a
|
35
|
+
# la division del worker
|
36
|
+
self.set(parsed['assigned_process'], parsed['id_client'])
|
33
37
|
end
|
34
38
|
|
35
39
|
#
|
@@ -42,20 +46,26 @@ module BlackStack
|
|
42
46
|
BlackStack::Pampa::api_protocol,
|
43
47
|
BlackStack::Pampa::api_domain,
|
44
48
|
BlackStack::Pampa::api_port,
|
45
|
-
BlackStack::Pampa::api_key
|
49
|
+
BlackStack::Pampa::api_key,
|
50
|
+
self.id_client # ID of the client that has this thread assigned
|
46
51
|
)
|
47
52
|
|
48
53
|
logger.log "Remote process is alive!"
|
49
54
|
|
50
55
|
# actualiza parametros de la central
|
51
|
-
logger.logs "Update from central (1)... "
|
56
|
+
logger.logs "Update from central (1-remote)... "
|
52
57
|
self.get
|
53
58
|
logger.done
|
54
59
|
|
55
60
|
# actualizo los datos de este worker (parent process)
|
56
|
-
logger.logs "Update worker (1)... "
|
61
|
+
logger.logs "Update worker (1-remote)... "
|
57
62
|
self.updateWorker
|
58
63
|
logger.done
|
64
|
+
|
65
|
+
# actualizo los datos de este worker (parent process)
|
66
|
+
logger.logs "Switch logger id_client (log folder may change)... "
|
67
|
+
self.logger.id_client = self.id_client
|
68
|
+
logger.done
|
59
69
|
|
60
70
|
while (self.canRun?)
|
61
71
|
|
@@ -65,7 +75,7 @@ module BlackStack
|
|
65
75
|
# announcing my in the log
|
66
76
|
logger.log "Going to Run Remote"
|
67
77
|
logger.log "Process: #{self.assigned_process.to_s}."
|
68
|
-
logger.log "
|
78
|
+
logger.log "Client: #{(self.id_client.to_s.size==0)? 'n/a' : self.id_client.to_s}."
|
69
79
|
|
70
80
|
# obtengo la hora de inicio
|
71
81
|
start_time = Time.now
|
data/lib/pampa-local.rb
CHANGED
@@ -29,13 +29,13 @@ def diff(unit, t0, t1)
|
|
29
29
|
end
|
30
30
|
|
31
31
|
def before(n) # n minutes
|
32
|
-
DB["SELECT DATEADD(mi, -#{n.to_s}, GETDATE()) AS [now]"].map(:now).to_s[0]
|
32
|
+
DB["SELECT DATEADD(mi, -#{n.to_s}, GETDATE()) AS [now]"].map(:now)[0].to_s[0..22]
|
33
33
|
end
|
34
34
|
|
35
35
|
def monthsFromNow(n) # n months
|
36
|
-
DB["SELECT DATEADD(mm, +#{n.to_s}, GETDATE()) AS [now]"].map(:now).to_s[0]
|
36
|
+
DB["SELECT DATEADD(mm, +#{n.to_s}, GETDATE()) AS [now]"].map(:now)[0].to_s[0..22]
|
37
37
|
end
|
38
38
|
|
39
39
|
def daysFromNow(n) # n days
|
40
|
-
DB["SELECT DATEADD(dd, +#{n.to_s}, GETDATE()) AS [now]"].map(:now).to_s[0]
|
40
|
+
DB["SELECT DATEADD(dd, +#{n.to_s}, GETDATE()) AS [now]"].map(:now)[0].to_s[0..22]
|
41
41
|
end
|
data/lib/pampa_workers.rb
CHANGED
@@ -1,6 +1,3 @@
|
|
1
|
-
require 'blackstack_commons'
|
2
|
-
require 'simple_command_line_parser'
|
3
|
-
require 'simple_cloud_logging'
|
4
1
|
require 'simple_host_monitoring'
|
5
2
|
require 'socket'
|
6
3
|
require 'time'
|
@@ -11,9 +8,6 @@ require 'openssl'
|
|
11
8
|
require 'tiny_tds'
|
12
9
|
require 'sequel'
|
13
10
|
|
14
|
-
#require './lib/base'
|
15
|
-
#require './config.rb'
|
16
|
-
|
17
11
|
require_relative './baseworker'
|
18
12
|
require_relative './basedivision'
|
19
13
|
|
@@ -34,6 +28,8 @@ module BlackStack
|
|
34
28
|
|
35
29
|
module Pampa
|
36
30
|
|
31
|
+
SLEEP_SECONDS = 10
|
32
|
+
|
37
33
|
#
|
38
34
|
@@division_name = nil
|
39
35
|
|
@@ -103,8 +99,8 @@ module BlackStack
|
|
103
99
|
def self.storage_folder()
|
104
100
|
@@storage_folder
|
105
101
|
end
|
106
|
-
def self.storage_sub_folders(
|
107
|
-
@@
|
102
|
+
def self.storage_sub_folders()
|
103
|
+
@@storage_sub_folders
|
108
104
|
end
|
109
105
|
|
110
106
|
#
|
data/lib/remoteworker.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
module BlackStack
|
2
2
|
|
3
3
|
class RemoteWorker
|
4
|
-
attr_accessor :id, :process, :last_ping_time, :name, :active, :id_division, :assigned_process, :
|
4
|
+
attr_accessor :id, :process, :last_ping_time, :name, :active, :id_division, :assigned_process, :id_client, :division_name, :ws_url, :ws_port, :division
|
5
5
|
include BlackStack::BaseWorker
|
6
6
|
end # Remote Worker
|
7
7
|
|
data/lib/worker.rb
CHANGED
@@ -11,147 +11,117 @@ module BlackStack
|
|
11
11
|
BlackStack::Worker.dataset = BlackStack::Worker.dataset.disable_insert_output
|
12
12
|
many_to_one :division, :class=>:'BlackStack::Division', :key=>:id_division
|
13
13
|
many_to_one :user, :class=>:'BlackStack::User', :key=>:id_user
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
worker.name = worker['name'].to_s
|
35
|
-
worker.last_ping_time = now() # esta fecha es actualiada por el mismo worker, para indicar que esta vivo y trabajando
|
36
|
-
worker.id_division = worker['id_division']
|
37
|
-
worker.process = worker['assigned_process']
|
38
|
-
worker.assigned_process = worker['assigned_process']
|
39
|
-
worker.id_object = worker['id_object']
|
40
|
-
worker.division_name = worker['division_name']
|
41
|
-
worker.save()
|
42
|
-
else
|
43
|
-
#puts "update" ?
|
44
|
-
end
|
45
|
-
|
46
|
-
DB.execute("UPDATE worker SET active=1 WHERE name='#{worker['name'].to_s}'")
|
47
|
-
|
48
|
-
if (worker['id_division'] != nil)
|
49
|
-
DB.execute("UPDATE worker SET id_division='#{worker['id_division'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
50
|
-
end
|
51
|
-
|
52
|
-
if (worker['assigned_process'] != nil)
|
53
|
-
DB.execute("UPDATE worker SET process='#{worker['assigned_process'].to_s}', assigned_process='#{worker['assigned_process'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
54
|
-
end
|
55
|
-
|
56
|
-
if (worker['id_object'] != nil)
|
57
|
-
DB.execute("UPDATE worker SET id_object='#{worker['id_object'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
58
|
-
end
|
59
|
-
|
60
|
-
if (worker['division_name'] != nil)
|
61
|
-
DB.execute("UPDATE worker SET division_name='#{worker['division_name'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
62
|
-
end
|
63
|
-
|
64
|
-
if (worker['type']==nil || worker['type'].to_i==MyProcess::TYPE_LOCAL)
|
65
|
-
DB.execute("UPDATE worker SET type=#{MyProcess::TYPE_LOCAL.to_s} WHERE name='#{worker['name'].to_s}'")
|
66
|
-
else
|
67
|
-
DB.execute("UPDATE worker SET type=#{MyProcess::TYPE_REMOTE.to_s} WHERE name='#{worker['name'].to_s}'")
|
68
|
-
end
|
69
|
-
end
|
70
|
-
|
71
|
-
# release resources
|
72
|
-
DB.disconnect
|
73
|
-
GC.start
|
74
|
-
}
|
75
|
-
|
76
|
-
end
|
14
|
+
many_to_one :client, :class=>:'BlackStack::Client', :key=>:id_client
|
15
|
+
many_to_one :owner, :class=>:'BlackStack::Client', :key=>:id_client_owner
|
16
|
+
many_to_one :host, :class=>:'BlackStack::LocalHost', :key=>:id_host
|
17
|
+
many_to_one :current_job, :class=>:'BlackStack::WorkerJob', :key=>:id_workerjob
|
18
|
+
many_to_one :lnuser, :class=>:'BlackStack::LnUser', :key=>:id_lnuser
|
19
|
+
many_to_one :proxy, :class=>:'BlackStack::Proxy', :key=>:id_proxy
|
20
|
+
|
21
|
+
# Usage seconds of all the workers assigned to the client.
|
22
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
23
|
+
# This method will sum the seconds used by this client only
|
24
|
+
def self.client_usage_seconds(id_client, period='M', units=1)
|
25
|
+
row = DB[
|
26
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
27
|
+
"from workerjob j with (nolock) " +
|
28
|
+
"where j.id_client = '#{id_client}' " +
|
29
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
30
|
+
"and j.job_start_time is not null " +
|
31
|
+
"and j.job_end_time is not null "
|
32
|
+
].first
|
33
|
+
row[:used_seconds].to_f
|
77
34
|
end
|
78
|
-
|
79
|
-
#
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
#
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
if (parsed['id_object'].to_s.size>0)
|
95
|
-
aux_id_object = "'#{parsed['id_object']}'"
|
96
|
-
else
|
97
|
-
aux_id_object = "NULL"
|
98
|
-
end
|
99
|
-
|
100
|
-
# NOTA: DEBO HACER EL UPDATE POR FUERA DE SQUEL, DEBIDO AL BUG DE MAPEO DE SEQUEL
|
101
|
-
q =
|
102
|
-
"UPDATE worker SET " +
|
103
|
-
"active=1, id_division='#{parsed['id_division']}', assigned_process='#{parsed['assigned_process'].to_s.gsub("'","''")}', id_object=#{aux_id_object}, division_name='#{parsed['division_name'].to_s.gsub("'","''")}' " +
|
104
|
-
"WHERE id='#{self.id}'"
|
105
|
-
DB.execute(q)
|
106
|
-
end
|
35
|
+
|
36
|
+
# Average usage ratio of all the workers assigned to the client.
|
37
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
38
|
+
# This method will compute the seconds used by this client only, over the total timeframe.
|
39
|
+
def self.client_usage_ratio(id_client, period='M', units=1)
|
40
|
+
row = DB[
|
41
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
42
|
+
"from workerjob j with (nolock) " +
|
43
|
+
"where j.id_client = '#{id_client}' " +
|
44
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
45
|
+
"and j.job_start_time is not null " +
|
46
|
+
"and j.job_end_time is not null "
|
47
|
+
].first
|
48
|
+
x = row[:used_seconds].to_f
|
49
|
+
y = row[:total_seconds].to_f
|
50
|
+
100.to_f * x / y
|
107
51
|
end
|
108
|
-
|
109
|
-
#
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
52
|
+
|
53
|
+
# Usage ratio this worker by this client.
|
54
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
55
|
+
# This method will sum the seconds used by this client only.
|
56
|
+
def usage_seconds(id_client, period='M', units=1)
|
57
|
+
row = DB[
|
58
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
59
|
+
"from workerjob j with (nolock) " +
|
60
|
+
"where j.id_client = '#{id_client}' " +
|
61
|
+
"and j.id_worker = '#{self.id}' " +
|
62
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
63
|
+
"and j.job_start_time is not null " +
|
64
|
+
"and j.job_end_time is not null "
|
65
|
+
].first
|
66
|
+
row[:used_seconds].to_f
|
114
67
|
end
|
115
|
-
|
116
|
-
|
68
|
+
|
69
|
+
# Usage ratio this worker by this client.
|
70
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
71
|
+
# This method will compute the seconds used by this client only, over the total timeframe.
|
72
|
+
def usage_ratio(id_client, period='M', units=1)
|
73
|
+
row = DB[
|
74
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
75
|
+
"from workerjob j with (nolock) " +
|
76
|
+
"where j.id_client = '#{id_client}' " +
|
77
|
+
"and j.id_worker = '#{self.id}' " +
|
78
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
79
|
+
"and j.job_start_time is not null " +
|
80
|
+
"and j.job_end_time is not null "
|
81
|
+
].first
|
82
|
+
x = row[:used_seconds].to_f
|
83
|
+
y = row[:total_seconds].to_f
|
84
|
+
100.to_f * x / y
|
85
|
+
end
|
86
|
+
|
117
87
|
#
|
118
|
-
def
|
119
|
-
w = BlackStack::Worker.where(:name=>name).first
|
120
|
-
if
|
88
|
+
def self.create(h)
|
89
|
+
w = BlackStack::Worker.where(:name=>h['name']).first
|
90
|
+
if w.nil?
|
121
91
|
w = BlackStack::Worker.new
|
92
|
+
w.id = h['id']
|
122
93
|
end
|
123
|
-
w.
|
124
|
-
w.
|
125
|
-
w.
|
126
|
-
w.
|
127
|
-
w.
|
128
|
-
w.
|
129
|
-
w.
|
130
|
-
w.ws_port = parsed['ws_port']
|
94
|
+
w.name = h['name']
|
95
|
+
w.process = h['process']
|
96
|
+
w.last_ping_time = h['last_ping_time']
|
97
|
+
w.assigned_process = h['assigned_process']
|
98
|
+
w.id_client = h['id_client']
|
99
|
+
w.id_division = h['id_division']
|
100
|
+
w.division_name = h['division_name']
|
131
101
|
w.save
|
132
102
|
end
|
103
|
+
|
104
|
+
#
|
105
|
+
def to_hash
|
106
|
+
h = {}
|
107
|
+
h['id'] = self.id
|
108
|
+
h['name'] = self.name
|
109
|
+
h['process'] = self.process
|
110
|
+
h['last_ping_time'] = self.last_ping_time
|
111
|
+
h['assigned_process'] = self.assigned_process
|
112
|
+
h['id_client'] = self.id_client
|
113
|
+
h['id_division'] = self.id_division
|
114
|
+
h['division_name'] = self.division_name
|
115
|
+
h
|
116
|
+
end
|
133
117
|
|
134
118
|
# Retorna true si este worker esta corriendo en nuestros propios servidores,
|
135
119
|
# Retorna false si este worker esta correiendo en otro host, asumiendo que es el host del cliente.
|
136
120
|
# Comparando la pulic_ip_address del worer con la lista en BlackStack::Pampa::set_farm_external_ip_addresses.
|
137
121
|
def hosted?
|
138
|
-
BlackStack::Pampa::
|
122
|
+
BlackStack::Pampa::farm_external_ip_addresses.include?(self.public_ip_address)
|
139
123
|
end # hosted?
|
140
|
-
|
141
|
-
# Si es un worker hosteado en nuestos servidores (ver metodo hosted?),
|
142
|
-
# => retorna la cantidad de dias que fa
|
143
|
-
def expirationDesc
|
144
|
-
s = "(unknown)"
|
145
|
-
if self.hosted?
|
146
|
-
if !self.expiration_time.nil?
|
147
|
-
s = DB["SELECT DATEDIFF(mi, GETDATE(), w.expiration_time) AS n FROM worker w WHERE w.id='#{self.id}'"].first[:n].to_i.to_time_spent
|
148
|
-
end
|
149
|
-
else # no hosted
|
150
|
-
s = "(self-hosted)"
|
151
|
-
end
|
152
|
-
s
|
153
|
-
end
|
154
|
-
|
124
|
+
|
155
125
|
# Retorna la cantidad de minutos desde que este worker envio una senial de vida.
|
156
126
|
# Este metodo se usa para saber si un worker esta activo o no.
|
157
127
|
def last_ping_minutes()
|
@@ -163,72 +133,11 @@ module BlackStack
|
|
163
133
|
def active?
|
164
134
|
self.last_ping_minutes < BlackStack::BaseWorker::KEEP_ACTIVE_MINUTES
|
165
135
|
end
|
166
|
-
|
167
|
-
# escribe en el archivo de log de este worker
|
168
|
-
def log(s, level=1, is_error=false)
|
169
|
-
logw(s, self.process, self.id, level, is_error)
|
170
|
-
end
|
171
136
|
|
172
137
|
# envia una senial de vida a la division
|
173
|
-
# TODO: guardar fecha-hora del ultimo ping en un atributo privado, y evitar el acceso escesivo a la base de datos
|
174
138
|
def ping()
|
175
139
|
DB.execute("UPDATE worker SET last_ping_time=GETDATE() WHERE id='#{self.id}'")
|
176
140
|
end
|
177
|
-
|
178
|
-
# DEPRECATED
|
179
|
-
def self.getActivesCount(processName)
|
180
|
-
raise "Method needs some code inside."
|
181
|
-
end
|
182
|
-
|
183
|
-
# obtiene array de workers actives, filtrados por proceso y por tipo de worker.
|
184
|
-
def self.getActives(assigned_process_name=nil, worker_name_filter=nil)
|
185
|
-
a = Array.new
|
186
|
-
q = ""
|
187
|
-
if (assigned_process_name!=nil)
|
188
|
-
q =
|
189
|
-
"SELECT p.id AS [id] " +
|
190
|
-
"FROM worker p WITH (NOLOCK INDEX(IX_peer__process__last_ping_time)) " +
|
191
|
-
"WHERE last_ping_time>DATEADD(mi,-5,GETDATE()) " +
|
192
|
-
"AND ISNULL(active,0)=1 " + # active indica si este worker fue asignado a esta division en la central
|
193
|
-
"AND assigned_process='#{assigned_process_name}' "
|
194
|
-
|
195
|
-
if worker_name_filter != nil
|
196
|
-
q = q +
|
197
|
-
"AND p.name LIKE '%#{worker_name_filter.to_s}%' "
|
198
|
-
end
|
199
|
-
|
200
|
-
q = q +
|
201
|
-
"ORDER BY p.name "
|
202
|
-
DB[q].all do |row|
|
203
|
-
a << BlackStack::Worker.where(:id=>row[:id]).first
|
204
|
-
end
|
205
|
-
else
|
206
|
-
q =
|
207
|
-
"SELECT p.id AS [id] " +
|
208
|
-
"FROM worker p WITH (NOLOCK INDEX(IX_peer__process__last_ping_time)) " +
|
209
|
-
"WHERE last_ping_time>DATEADD(mi,-5,GETDATE()) " +
|
210
|
-
"AND ISNULL(active,0)=1 "
|
211
|
-
|
212
|
-
if worker_name_filter != nil
|
213
|
-
q = q +
|
214
|
-
"AND p.name LIKE '%#{worker_name_filter.to_s}%' "
|
215
|
-
end
|
216
|
-
|
217
|
-
q = q +
|
218
|
-
"ORDER BY p.name "
|
219
|
-
DB[q].all do |row|
|
220
|
-
a << BlackStack::Worker.where(:id=>row[:id]).first
|
221
|
-
end
|
222
|
-
end
|
223
|
-
|
224
|
-
return a
|
225
|
-
end
|
226
|
-
|
227
|
-
# obtiene cantidad de registros en cola para incrawl.lnsearchvariation
|
228
|
-
def getPendingLnSearchVariationBlockInCrawlCount()
|
229
|
-
return DB.from(:lnsearchvariationblock).where(:incrawl_reservation_id=>self.id, :incrawl_start_time=>nil).count
|
230
|
-
end
|
231
|
-
|
232
141
|
end # class Worker
|
233
142
|
|
234
143
|
end # module BlackStack
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pampa_workers
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 1.1.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Leandro Daniel Sardi
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2020-01-02 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: websocket
|
@@ -91,47 +91,7 @@ dependencies:
|
|
91
91
|
- !ruby/object:Gem::Version
|
92
92
|
version: 4.28.0
|
93
93
|
- !ruby/object:Gem::Dependency
|
94
|
-
name:
|
95
|
-
requirement: !ruby/object:Gem::Requirement
|
96
|
-
requirements:
|
97
|
-
- - "~>"
|
98
|
-
- !ruby/object:Gem::Version
|
99
|
-
version: 0.0.20
|
100
|
-
- - ">="
|
101
|
-
- !ruby/object:Gem::Version
|
102
|
-
version: 0.0.20
|
103
|
-
type: :runtime
|
104
|
-
prerelease: false
|
105
|
-
version_requirements: !ruby/object:Gem::Requirement
|
106
|
-
requirements:
|
107
|
-
- - "~>"
|
108
|
-
- !ruby/object:Gem::Version
|
109
|
-
version: 0.0.20
|
110
|
-
- - ">="
|
111
|
-
- !ruby/object:Gem::Version
|
112
|
-
version: 0.0.20
|
113
|
-
- !ruby/object:Gem::Dependency
|
114
|
-
name: simple_cloud_logging
|
115
|
-
requirement: !ruby/object:Gem::Requirement
|
116
|
-
requirements:
|
117
|
-
- - "~>"
|
118
|
-
- !ruby/object:Gem::Version
|
119
|
-
version: 1.1.16
|
120
|
-
- - ">="
|
121
|
-
- !ruby/object:Gem::Version
|
122
|
-
version: 1.1.16
|
123
|
-
type: :runtime
|
124
|
-
prerelease: false
|
125
|
-
version_requirements: !ruby/object:Gem::Requirement
|
126
|
-
requirements:
|
127
|
-
- - "~>"
|
128
|
-
- !ruby/object:Gem::Version
|
129
|
-
version: 1.1.16
|
130
|
-
- - ">="
|
131
|
-
- !ruby/object:Gem::Version
|
132
|
-
version: 1.1.16
|
133
|
-
- !ruby/object:Gem::Dependency
|
134
|
-
name: simple_command_line_parser
|
94
|
+
name: simple_host_monitoring
|
135
95
|
requirement: !ruby/object:Gem::Requirement
|
136
96
|
requirements:
|
137
97
|
- - "~>"
|
@@ -150,26 +110,6 @@ dependencies:
|
|
150
110
|
- - ">="
|
151
111
|
- !ruby/object:Gem::Version
|
152
112
|
version: 1.1.1
|
153
|
-
- !ruby/object:Gem::Dependency
|
154
|
-
name: simple_host_monitoring
|
155
|
-
requirement: !ruby/object:Gem::Requirement
|
156
|
-
requirements:
|
157
|
-
- - "~>"
|
158
|
-
- !ruby/object:Gem::Version
|
159
|
-
version: 0.0.11
|
160
|
-
- - ">="
|
161
|
-
- !ruby/object:Gem::Version
|
162
|
-
version: 0.0.11
|
163
|
-
type: :runtime
|
164
|
-
prerelease: false
|
165
|
-
version_requirements: !ruby/object:Gem::Requirement
|
166
|
-
requirements:
|
167
|
-
- - "~>"
|
168
|
-
- !ruby/object:Gem::Version
|
169
|
-
version: 0.0.11
|
170
|
-
- - ">="
|
171
|
-
- !ruby/object:Gem::Version
|
172
|
-
version: 0.0.11
|
173
113
|
description: 'THIS GEM IS STILL IN DEVELOPMENT STAGE. Find documentation here: https://github.com/leandrosardi/pampa.'
|
174
114
|
email: leandro.sardi@expandedventure.com
|
175
115
|
executables: []
|