pampa_workers 0.0.39 → 1.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/client.rb +0 -56
- data/lib/mybotprocess.rb +51 -21
- data/lib/mylocalprocess.rb +13 -7
- data/lib/myparentprocess.rb +3 -2
- data/lib/myprocess.rb +9 -8
- data/lib/myremoteprocess.rb +25 -15
- data/lib/pampa-local.rb +3 -3
- data/lib/pampa_workers.rb +4 -8
- data/lib/remoteworker.rb +1 -1
- data/lib/worker.rb +110 -188
- metadata +3 -63
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA1:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: caea45d434da43ea67afef7bc6bb493f5a40b44b
|
|
4
|
+
data.tar.gz: 073300b12dae57a3b354ee2bd12ad094ea3661bc
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 3ad4f1eec949e91fd694afbc36043bf45e6fea95634175adb3b171e128c1f92327fca5b1b2bf14cfcdf38d27bf5ef528c6d57189787ff70577570b4c654f23e2
|
|
7
|
+
data.tar.gz: a50659c82af48217c6a2c48b92f678aabdff92daa73fe472a60d4dbc869ac44761b0d1de8a5860daae7fb5a801e46125daec14de37d10ccc52bff34f69d1ba1e
|
data/lib/client.rb
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
require 'invoicing_payments_processing'
|
|
2
1
|
require 'simple_host_monitoring'
|
|
3
2
|
require_relative './user'
|
|
4
3
|
require_relative './role'
|
|
@@ -34,62 +33,7 @@ module BlackStack
|
|
|
34
33
|
def own_hosts()
|
|
35
34
|
BlackStack::LocalHost.where(:id_client=>self.id, :delete_time=>nil)
|
|
36
35
|
end
|
|
37
|
-
|
|
38
|
-
# -----------------------------------------------------------------------------------------
|
|
39
|
-
# Storage:
|
|
40
|
-
#
|
|
41
|
-
#
|
|
42
|
-
# -----------------------------------------------------------------------------------------
|
|
43
|
-
|
|
44
|
-
# returns the location of the storage for this client
|
|
45
|
-
def storage_folder
|
|
46
|
-
"#{BlackStack::Pampa::storage_folder}/#{self.id.to_guid}"
|
|
47
|
-
end
|
|
48
|
-
|
|
49
|
-
def storage_sub_folder(name)
|
|
50
|
-
"#{BlackStack::Pampa::storage_folder}/#{self.id.to_guid}/#{name}"
|
|
51
|
-
end
|
|
52
|
-
|
|
53
|
-
# returns the max allowed KB in the storage for this client
|
|
54
|
-
def storage_total_kb
|
|
55
|
-
# TODO: get this parameter from the paid invoces
|
|
56
|
-
1024*1024 # 1 GB
|
|
57
|
-
end
|
|
58
|
-
|
|
59
|
-
# returns the max allowed KB in the storage for this client
|
|
60
|
-
def storage_used_kb
|
|
61
|
-
path = self.storage_folder
|
|
62
|
-
fso = WIN32OLE.new('Scripting.FileSystemObject')
|
|
63
|
-
folder = fso.GetFolder(path)
|
|
64
|
-
(folder.size.to_f / 1024.to_f)
|
|
65
|
-
end
|
|
66
|
-
|
|
67
|
-
# returns the free available KB in the storage for this client
|
|
68
|
-
def storage_free_kb
|
|
69
|
-
total = self.storage_total_kb
|
|
70
|
-
used = self.storage_used_kb
|
|
71
|
-
total - used
|
|
72
|
-
end
|
|
73
36
|
|
|
74
|
-
# si el cliente no tiene creado el storage, entonces se lo crea, carpeta por carpeta, ferificando cada una si no existe ya.
|
|
75
|
-
def create_storage
|
|
76
|
-
folder = self.storage_folder
|
|
77
|
-
Dir.mkdir BlackStack::Pampa::storage_folder if Dir[BlackStack::Pampa::storage_folder].size==0
|
|
78
|
-
if Dir[folder].size==0
|
|
79
|
-
Dir.mkdir folder
|
|
80
|
-
|
|
81
|
-
BlackStack::Pampa::storage_sub_folders.each { |name|
|
|
82
|
-
s = "#{folder}/#{name}"
|
|
83
|
-
Dir.mkdir s if Dir[s].size==0
|
|
84
|
-
}
|
|
85
|
-
end
|
|
86
|
-
end
|
|
87
|
-
|
|
88
|
-
# retorna la primera division habilitada a la que pertenezca este cliente
|
|
89
|
-
def division
|
|
90
|
-
return self.users.first.division
|
|
91
|
-
end
|
|
92
|
-
|
|
93
37
|
# -----------------------------------------------------------------------------------------
|
|
94
38
|
# Configuration
|
|
95
39
|
#
|
data/lib/mybotprocess.rb
CHANGED
|
@@ -55,14 +55,14 @@ module BlackStack
|
|
|
55
55
|
sError = parsed['status']
|
|
56
56
|
end
|
|
57
57
|
rescue Errno::ECONNREFUSED => e
|
|
58
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
58
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
59
59
|
rescue => e2
|
|
60
|
-
sError = "Exception: " + e2.
|
|
60
|
+
sError = "Exception: " + e2.to_console
|
|
61
61
|
end
|
|
62
62
|
end # while
|
|
63
63
|
|
|
64
64
|
if (bSuccess==false)
|
|
65
|
-
raise
|
|
65
|
+
raise BlackStack::Netting::ApiCallException.new(sError)
|
|
66
66
|
end
|
|
67
67
|
|
|
68
68
|
return lnuser
|
|
@@ -89,22 +89,51 @@ module BlackStack
|
|
|
89
89
|
sError = parsed['status']
|
|
90
90
|
end
|
|
91
91
|
rescue Errno::ECONNREFUSED => e
|
|
92
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
92
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
93
93
|
rescue => e2
|
|
94
|
-
sError = "Exception" + e2.
|
|
94
|
+
sError = "Exception:" + e2.to_console
|
|
95
95
|
end
|
|
96
96
|
end # while
|
|
97
97
|
|
|
98
98
|
if (bSuccess==false)
|
|
99
|
-
raise
|
|
99
|
+
raise BlackStack::Netting::ApiCallException.new(sError)
|
|
100
100
|
end
|
|
101
101
|
|
|
102
102
|
return lnuser
|
|
103
103
|
end # getLnUser()
|
|
104
|
+
|
|
105
|
+
#
|
|
106
|
+
def notifyInbox(lnuser, conv)
|
|
107
|
+
conv[:chats].each { |chat|
|
|
108
|
+
# armo URL de notificacion
|
|
109
|
+
# se usa URI.encode para codificar caracteres no-ascii en los mensajes
|
|
110
|
+
url =
|
|
111
|
+
"#{BlackStack::Pampa::api_protocol}://#{self.ws_url}:#{self.ws_port}/api1.3/pampa/scrape.inbox/notify_lnchat.json?" +
|
|
112
|
+
"api_key=#{BlackStack::Pampa::api_key}&" +
|
|
113
|
+
"profile_code=#{URI.escape(conv[:profile_code])}&" +
|
|
114
|
+
"profile_name=#{URI.escape(conv[:profile_name])}&" +
|
|
115
|
+
"profile_headline=#{URI.escape(conv[:profile_headline])}&" +
|
|
116
|
+
"first=#{URI.escape(conv[:first])}&" +
|
|
117
|
+
"position=#{chat[:position].to_s}&" +
|
|
118
|
+
"uid=#{lnuser['id']}&" +
|
|
119
|
+
"sender_name=#{URI.escape(chat[:sender_name])}&" +
|
|
120
|
+
"body=#{URI.escape(chat[:body])}&"
|
|
121
|
+
|
|
122
|
+
# HELP: File.open('./output3.txt', 'a') { |file| file.write(url + "\r\n") }
|
|
104
123
|
|
|
124
|
+
# push the chat
|
|
125
|
+
uri = URI.parse(url.to_s)
|
|
126
|
+
req = Net::HTTP::Get.new(uri.to_s)
|
|
127
|
+
res = Net::HTTP.start(uri.host, uri.port, :use_ssl => true, :verify_mode => OpenSSL::SSL::VERIFY_NONE) {|http|
|
|
128
|
+
http.request(req)
|
|
129
|
+
}
|
|
130
|
+
parsed = JSON.parse(res.body)
|
|
131
|
+
raise "error uploading chat: #{parsed['status']}" if parsed['status'] != 'success'
|
|
132
|
+
} # conv[:chats].each
|
|
133
|
+
end
|
|
134
|
+
|
|
105
135
|
#
|
|
106
|
-
def notifyLnUserUrl(id_lnuser,
|
|
107
|
-
=begin
|
|
136
|
+
def notifyLnUserUrl(id_lnuser, profile_url)
|
|
108
137
|
nTries = 0
|
|
109
138
|
parsed = nil
|
|
110
139
|
bSuccess = false
|
|
@@ -116,7 +145,7 @@ module BlackStack
|
|
|
116
145
|
res = BlackStack::Netting::call_post(url,
|
|
117
146
|
{:api_key => BlackStack::Pampa::api_key,
|
|
118
147
|
'id_lnuser' => id_lnuser,
|
|
119
|
-
'url' =>
|
|
148
|
+
'url' => profile_url,}
|
|
120
149
|
)
|
|
121
150
|
parsed = JSON.parse(res.body)
|
|
122
151
|
|
|
@@ -126,16 +155,15 @@ module BlackStack
|
|
|
126
155
|
sError = parsed['status']
|
|
127
156
|
end
|
|
128
157
|
rescue Errno::ECONNREFUSED => e
|
|
129
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
158
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
130
159
|
rescue => e2
|
|
131
|
-
sError = "Exception" + e2.
|
|
160
|
+
sError = "Exception:" + e2.to_console
|
|
132
161
|
end
|
|
133
162
|
end # while
|
|
134
163
|
|
|
135
164
|
if (bSuccess==false)
|
|
136
165
|
raise "#{sError}"
|
|
137
166
|
end
|
|
138
|
-
=end
|
|
139
167
|
end # notifyLnUserStatus
|
|
140
168
|
|
|
141
169
|
#
|
|
@@ -161,9 +189,9 @@ module BlackStack
|
|
|
161
189
|
sError = parsed['status']
|
|
162
190
|
end
|
|
163
191
|
rescue Errno::ECONNREFUSED => e
|
|
164
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
192
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
165
193
|
rescue => e2
|
|
166
|
-
sError = "Exception" + e2.
|
|
194
|
+
sError = "Exception:" + e2.to_console
|
|
167
195
|
end
|
|
168
196
|
end # while
|
|
169
197
|
|
|
@@ -196,9 +224,9 @@ module BlackStack
|
|
|
196
224
|
sError = parsed['status']
|
|
197
225
|
end
|
|
198
226
|
rescue Errno::ECONNREFUSED => e
|
|
199
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
227
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
200
228
|
rescue => e2
|
|
201
|
-
sError = "Exception" + e2.
|
|
229
|
+
sError = "Exception:" + e2.to_console
|
|
202
230
|
end
|
|
203
231
|
end # while
|
|
204
232
|
|
|
@@ -256,9 +284,9 @@ module BlackStack
|
|
|
256
284
|
sError = parsed['status']
|
|
257
285
|
end
|
|
258
286
|
rescue Errno::ECONNREFUSED => e
|
|
259
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
287
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
260
288
|
rescue => e2
|
|
261
|
-
sError = "Exception" + e2.
|
|
289
|
+
sError = "Exception:" + e2.to_console
|
|
262
290
|
end
|
|
263
291
|
end # while
|
|
264
292
|
|
|
@@ -306,8 +334,9 @@ module BlackStack
|
|
|
306
334
|
return ret
|
|
307
335
|
end # isLnUserAvailable
|
|
308
336
|
|
|
309
|
-
#
|
|
337
|
+
# TODO: deprecated
|
|
310
338
|
def releaseLnUser(id_lnuser, workflow_name='incrawl.lnsearchvariation')
|
|
339
|
+
=begin
|
|
311
340
|
nTries = 0
|
|
312
341
|
parsed = nil
|
|
313
342
|
bSuccess = false
|
|
@@ -330,9 +359,9 @@ module BlackStack
|
|
|
330
359
|
sError = parsed['status']
|
|
331
360
|
end
|
|
332
361
|
rescue Errno::ECONNREFUSED => e
|
|
333
|
-
sError = "Errno::ECONNREFUSED:" + e.
|
|
362
|
+
sError = "Errno::ECONNREFUSED:" + e.to_console
|
|
334
363
|
rescue => e2
|
|
335
|
-
sError = "Exception" + e2.
|
|
364
|
+
sError = "Exception:" + e2.to_console
|
|
336
365
|
end
|
|
337
366
|
end # while
|
|
338
367
|
|
|
@@ -341,6 +370,7 @@ module BlackStack
|
|
|
341
370
|
end
|
|
342
371
|
|
|
343
372
|
return ret
|
|
373
|
+
=end
|
|
344
374
|
end # isLnUserAvailable
|
|
345
375
|
|
|
346
376
|
end # class MyBotProcess
|
data/lib/mylocalprocess.rb
CHANGED
|
@@ -43,13 +43,13 @@ module BlackStack
|
|
|
43
43
|
w.last_ping_time = now()
|
|
44
44
|
w.name = self.fullWorkerName
|
|
45
45
|
w.assigned_process = self.assigned_process
|
|
46
|
-
w.
|
|
46
|
+
w.id_client = self.id_client
|
|
47
47
|
w.division_name = self.division_name
|
|
48
48
|
w.save
|
|
49
49
|
end
|
|
50
50
|
if (w!=nil)
|
|
51
51
|
w.assigned_process = self.assigned_process
|
|
52
|
-
w.
|
|
52
|
+
w.id_client = self.id_client
|
|
53
53
|
w.division_name = self.division_name
|
|
54
54
|
w.id_division = self.id_division
|
|
55
55
|
w.save
|
|
@@ -65,22 +65,28 @@ module BlackStack
|
|
|
65
65
|
BlackStack::Pampa::api_protocol,
|
|
66
66
|
BlackStack::Pampa::api_domain,
|
|
67
67
|
BlackStack::Pampa::api_port,
|
|
68
|
-
BlackStack::Pampa::api_key
|
|
68
|
+
BlackStack::Pampa::api_key,
|
|
69
|
+
self.id_client # ID of the client that has this thread assigned
|
|
69
70
|
)
|
|
70
71
|
|
|
71
72
|
# announcing my in the log
|
|
72
73
|
logger.log "Child process is alive!"
|
|
73
74
|
|
|
74
75
|
# obtengo los parametros del worker
|
|
75
|
-
logger.logs "Update from central (1)... "
|
|
76
|
+
logger.logs "Update from central (1-local)... "
|
|
76
77
|
self.get
|
|
77
78
|
logger.done
|
|
78
79
|
|
|
79
80
|
# actualizo los datos de este worker (parent process)
|
|
80
|
-
logger.logs "Update worker (1)... "
|
|
81
|
+
logger.logs "Update worker (1-local)... "
|
|
81
82
|
self.updateWorker
|
|
82
83
|
logger.done
|
|
83
|
-
|
|
84
|
+
|
|
85
|
+
# actualizo los datos de este worker (parent process)
|
|
86
|
+
logger.logs "Switch logger id_client (log folder may change)... "
|
|
87
|
+
self.logger.id_client = self.id_client
|
|
88
|
+
logger.done
|
|
89
|
+
|
|
84
90
|
while (self.canRun?)
|
|
85
91
|
# reseteo en contador nested del logger
|
|
86
92
|
self.logger.reset()
|
|
@@ -88,7 +94,7 @@ module BlackStack
|
|
|
88
94
|
# announcing my in the log
|
|
89
95
|
logger.log "Going to Run Local"
|
|
90
96
|
logger.log "Process: #{self.assigned_process.to_s}."
|
|
91
|
-
logger.log "
|
|
97
|
+
logger.log "Client: #{(self.id_client.to_s.size==0)? 'n/a' : self.id_client.to_s}."
|
|
92
98
|
|
|
93
99
|
# obtengo la hora de inicio
|
|
94
100
|
start_time = Time.now
|
data/lib/myparentprocess.rb
CHANGED
|
@@ -12,7 +12,8 @@ module BlackStack
|
|
|
12
12
|
BlackStack::Pampa::api_protocol,
|
|
13
13
|
BlackStack::Pampa::api_domain,
|
|
14
14
|
BlackStack::Pampa::api_port,
|
|
15
|
-
BlackStack::Pampa::api_key
|
|
15
|
+
BlackStack::Pampa::api_key,
|
|
16
|
+
self.id_client # ID of the client that has this thread assigned
|
|
16
17
|
)
|
|
17
18
|
|
|
18
19
|
#
|
|
@@ -54,7 +55,7 @@ module BlackStack
|
|
|
54
55
|
# map response
|
|
55
56
|
self.id = parsed['id']
|
|
56
57
|
self.assigned_process = parsed['assigned_process']
|
|
57
|
-
self.
|
|
58
|
+
self.id_client = parsed['id_client']
|
|
58
59
|
self.id_division = parsed['id_division']
|
|
59
60
|
self.division_name = parsed['division_name']
|
|
60
61
|
self.ws_url = parsed['ws_url']
|
data/lib/myprocess.rb
CHANGED
|
@@ -4,7 +4,7 @@ module BlackStack
|
|
|
4
4
|
DEFAULT_MINIMUM_ENLAPSED_SECONDS = 60
|
|
5
5
|
|
|
6
6
|
attr_accessor :assigned_process_changed, :assigned_division_changed, :verify_configuration
|
|
7
|
-
attr_accessor :logger, :id, :worker_name, :division_name, :minimum_enlapsed_seconds, :assigned_process, :
|
|
7
|
+
attr_accessor :logger, :id, :worker_name, :division_name, :minimum_enlapsed_seconds, :assigned_process, :id_client, :id_division, :ws_url, :ws_port
|
|
8
8
|
attr_accessor :email, :password
|
|
9
9
|
|
|
10
10
|
# constructor
|
|
@@ -100,15 +100,15 @@ module BlackStack
|
|
|
100
100
|
end
|
|
101
101
|
end # hello()
|
|
102
102
|
|
|
103
|
-
# notifico mis parametros (assigned_process,
|
|
104
|
-
def set(new_assigned_process,
|
|
103
|
+
# notifico mis parametros (assigned_process, id_client) a la division asignada
|
|
104
|
+
def set(new_assigned_process, new_id_client)
|
|
105
105
|
if (self.ws_url.to_s.size > 0 && self.ws_port.to_s.size > 0)
|
|
106
106
|
url = "#{BlackStack::Pampa::api_protocol}://#{self.ws_url.to_s}:#{self.ws_port.to_s}/api1.3/pampa/notify.json"
|
|
107
107
|
res = BlackStack::Netting::call_post(url, {
|
|
108
108
|
'api_key' => BlackStack::Pampa::api_key,
|
|
109
109
|
'name' => self.fullWorkerName,
|
|
110
110
|
'assigned_process' => new_assigned_process,
|
|
111
|
-
'
|
|
111
|
+
'id_client' => new_id_client }.merge( BlackStack::RemoteHost.new.poll )
|
|
112
112
|
)
|
|
113
113
|
end
|
|
114
114
|
end
|
|
@@ -148,18 +148,18 @@ module BlackStack
|
|
|
148
148
|
end # verify_configuration
|
|
149
149
|
|
|
150
150
|
# si ya tenia asignada una division, entonces le notifico mi nueva configuracion
|
|
151
|
-
self.set(parsed['assigned_process'], parsed['
|
|
151
|
+
self.set(parsed['assigned_process'], parsed['id_client'])
|
|
152
152
|
|
|
153
153
|
self.id = parsed['id']
|
|
154
154
|
self.assigned_process = parsed['assigned_process']
|
|
155
|
-
self.
|
|
155
|
+
self.id_client = parsed['id_client']
|
|
156
156
|
self.id_division = parsed['id_division']
|
|
157
157
|
self.division_name = parsed['division_name']
|
|
158
158
|
self.ws_url = parsed['ws_url']
|
|
159
159
|
self.ws_port = parsed['ws_port']
|
|
160
160
|
|
|
161
161
|
# le notifico a la nueva division asignada mi nueva configuracion
|
|
162
|
-
self.set(parsed['assigned_process'], parsed['
|
|
162
|
+
self.set(parsed['assigned_process'], parsed['id_client'])
|
|
163
163
|
end
|
|
164
164
|
end # get()
|
|
165
165
|
|
|
@@ -209,7 +209,8 @@ module BlackStack
|
|
|
209
209
|
'api_key' => BlackStack::Pampa::api_key,
|
|
210
210
|
'name' => self.fullWorkerName,
|
|
211
211
|
'assigned_process' => self.assigned_process,
|
|
212
|
-
'
|
|
212
|
+
'id_client' => self.id_client,
|
|
213
|
+
'id_division' => self.id_division }.merge( BlackStack::RemoteHost.new.poll )
|
|
213
214
|
)
|
|
214
215
|
parsed = JSON.parse(res.body)
|
|
215
216
|
if (parsed['status'] != "success")
|
data/lib/myremoteprocess.rb
CHANGED
|
@@ -7,10 +7,11 @@ module BlackStack
|
|
|
7
7
|
attr_accessor :worker
|
|
8
8
|
|
|
9
9
|
# update worker configuration in the division
|
|
10
|
-
def updateWorker()
|
|
11
|
-
#
|
|
10
|
+
def updateWorker()
|
|
11
|
+
# creo un remote worker que manejare en este proceso remote
|
|
12
12
|
self.worker = BlackStack::RemoteWorker.new
|
|
13
13
|
# me notifico a la central. obtengo asignacion si ya la tenia
|
|
14
|
+
# y vuelco la configuracion al remote worker
|
|
14
15
|
url = "#{BlackStack::Pampa::api_url}/api1.3/pampa/get.json"
|
|
15
16
|
res = BlackStack::Netting::call_post(url, {
|
|
16
17
|
'api_key' => BlackStack::Pampa::api_key,
|
|
@@ -20,16 +21,19 @@ module BlackStack
|
|
|
20
21
|
if (parsed['status'] != BlackStack::Netting::SUCCESS)
|
|
21
22
|
raise parsed['status'].to_s
|
|
22
23
|
else
|
|
23
|
-
self.worker.id
|
|
24
|
-
self.worker.assigned_process
|
|
25
|
-
self.worker.
|
|
26
|
-
self.worker.id_division
|
|
27
|
-
self.worker.division_name
|
|
28
|
-
self.worker.ws_url
|
|
29
|
-
self.worker.ws_port
|
|
30
|
-
self.worker.division
|
|
31
|
-
self.worker.division.name
|
|
24
|
+
self.worker.id = parsed['id']
|
|
25
|
+
self.worker.assigned_process = parsed['assigned_process']
|
|
26
|
+
self.worker.id_client = parsed['id_client']
|
|
27
|
+
self.worker.id_division = parsed['id_division']
|
|
28
|
+
self.worker.division_name = parsed['division_name']
|
|
29
|
+
self.worker.ws_url = parsed['ws_url']
|
|
30
|
+
self.worker.ws_port = parsed['ws_port']
|
|
31
|
+
self.worker.division = BlackStack::RemoteDivision.new
|
|
32
|
+
self.worker.division.name = parsed['division_name']
|
|
32
33
|
end
|
|
34
|
+
# llamo al metodo de la clase padre que reporta la configuracion a
|
|
35
|
+
# la division del worker
|
|
36
|
+
self.set(parsed['assigned_process'], parsed['id_client'])
|
|
33
37
|
end
|
|
34
38
|
|
|
35
39
|
#
|
|
@@ -42,20 +46,26 @@ module BlackStack
|
|
|
42
46
|
BlackStack::Pampa::api_protocol,
|
|
43
47
|
BlackStack::Pampa::api_domain,
|
|
44
48
|
BlackStack::Pampa::api_port,
|
|
45
|
-
BlackStack::Pampa::api_key
|
|
49
|
+
BlackStack::Pampa::api_key,
|
|
50
|
+
self.id_client # ID of the client that has this thread assigned
|
|
46
51
|
)
|
|
47
52
|
|
|
48
53
|
logger.log "Remote process is alive!"
|
|
49
54
|
|
|
50
55
|
# actualiza parametros de la central
|
|
51
|
-
logger.logs "Update from central (1)... "
|
|
56
|
+
logger.logs "Update from central (1-remote)... "
|
|
52
57
|
self.get
|
|
53
58
|
logger.done
|
|
54
59
|
|
|
55
60
|
# actualizo los datos de este worker (parent process)
|
|
56
|
-
logger.logs "Update worker (1)... "
|
|
61
|
+
logger.logs "Update worker (1-remote)... "
|
|
57
62
|
self.updateWorker
|
|
58
63
|
logger.done
|
|
64
|
+
|
|
65
|
+
# actualizo los datos de este worker (parent process)
|
|
66
|
+
logger.logs "Switch logger id_client (log folder may change)... "
|
|
67
|
+
self.logger.id_client = self.id_client
|
|
68
|
+
logger.done
|
|
59
69
|
|
|
60
70
|
while (self.canRun?)
|
|
61
71
|
|
|
@@ -65,7 +75,7 @@ module BlackStack
|
|
|
65
75
|
# announcing my in the log
|
|
66
76
|
logger.log "Going to Run Remote"
|
|
67
77
|
logger.log "Process: #{self.assigned_process.to_s}."
|
|
68
|
-
logger.log "
|
|
78
|
+
logger.log "Client: #{(self.id_client.to_s.size==0)? 'n/a' : self.id_client.to_s}."
|
|
69
79
|
|
|
70
80
|
# obtengo la hora de inicio
|
|
71
81
|
start_time = Time.now
|
data/lib/pampa-local.rb
CHANGED
|
@@ -29,13 +29,13 @@ def diff(unit, t0, t1)
|
|
|
29
29
|
end
|
|
30
30
|
|
|
31
31
|
def before(n) # n minutes
|
|
32
|
-
DB["SELECT DATEADD(mi, -#{n.to_s}, GETDATE()) AS [now]"].map(:now).to_s[0]
|
|
32
|
+
DB["SELECT DATEADD(mi, -#{n.to_s}, GETDATE()) AS [now]"].map(:now)[0].to_s[0..22]
|
|
33
33
|
end
|
|
34
34
|
|
|
35
35
|
def monthsFromNow(n) # n months
|
|
36
|
-
DB["SELECT DATEADD(mm, +#{n.to_s}, GETDATE()) AS [now]"].map(:now).to_s[0]
|
|
36
|
+
DB["SELECT DATEADD(mm, +#{n.to_s}, GETDATE()) AS [now]"].map(:now)[0].to_s[0..22]
|
|
37
37
|
end
|
|
38
38
|
|
|
39
39
|
def daysFromNow(n) # n days
|
|
40
|
-
DB["SELECT DATEADD(dd, +#{n.to_s}, GETDATE()) AS [now]"].map(:now).to_s[0]
|
|
40
|
+
DB["SELECT DATEADD(dd, +#{n.to_s}, GETDATE()) AS [now]"].map(:now)[0].to_s[0..22]
|
|
41
41
|
end
|
data/lib/pampa_workers.rb
CHANGED
|
@@ -1,6 +1,3 @@
|
|
|
1
|
-
require 'blackstack_commons'
|
|
2
|
-
require 'simple_command_line_parser'
|
|
3
|
-
require 'simple_cloud_logging'
|
|
4
1
|
require 'simple_host_monitoring'
|
|
5
2
|
require 'socket'
|
|
6
3
|
require 'time'
|
|
@@ -11,9 +8,6 @@ require 'openssl'
|
|
|
11
8
|
require 'tiny_tds'
|
|
12
9
|
require 'sequel'
|
|
13
10
|
|
|
14
|
-
#require './lib/base'
|
|
15
|
-
#require './config.rb'
|
|
16
|
-
|
|
17
11
|
require_relative './baseworker'
|
|
18
12
|
require_relative './basedivision'
|
|
19
13
|
|
|
@@ -34,6 +28,8 @@ module BlackStack
|
|
|
34
28
|
|
|
35
29
|
module Pampa
|
|
36
30
|
|
|
31
|
+
SLEEP_SECONDS = 10
|
|
32
|
+
|
|
37
33
|
#
|
|
38
34
|
@@division_name = nil
|
|
39
35
|
|
|
@@ -103,8 +99,8 @@ module BlackStack
|
|
|
103
99
|
def self.storage_folder()
|
|
104
100
|
@@storage_folder
|
|
105
101
|
end
|
|
106
|
-
def self.storage_sub_folders(
|
|
107
|
-
@@
|
|
102
|
+
def self.storage_sub_folders()
|
|
103
|
+
@@storage_sub_folders
|
|
108
104
|
end
|
|
109
105
|
|
|
110
106
|
#
|
data/lib/remoteworker.rb
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
module BlackStack
|
|
2
2
|
|
|
3
3
|
class RemoteWorker
|
|
4
|
-
attr_accessor :id, :process, :last_ping_time, :name, :active, :id_division, :assigned_process, :
|
|
4
|
+
attr_accessor :id, :process, :last_ping_time, :name, :active, :id_division, :assigned_process, :id_client, :division_name, :ws_url, :ws_port, :division
|
|
5
5
|
include BlackStack::BaseWorker
|
|
6
6
|
end # Remote Worker
|
|
7
7
|
|
data/lib/worker.rb
CHANGED
|
@@ -11,147 +11,130 @@ module BlackStack
|
|
|
11
11
|
BlackStack::Worker.dataset = BlackStack::Worker.dataset.disable_insert_output
|
|
12
12
|
many_to_one :division, :class=>:'BlackStack::Division', :key=>:id_division
|
|
13
13
|
many_to_one :user, :class=>:'BlackStack::User', :key=>:id_user
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
worker.name = worker['name'].to_s
|
|
35
|
-
worker.last_ping_time = now() # esta fecha es actualiada por el mismo worker, para indicar que esta vivo y trabajando
|
|
36
|
-
worker.id_division = worker['id_division']
|
|
37
|
-
worker.process = worker['assigned_process']
|
|
38
|
-
worker.assigned_process = worker['assigned_process']
|
|
39
|
-
worker.id_object = worker['id_object']
|
|
40
|
-
worker.division_name = worker['division_name']
|
|
41
|
-
worker.save()
|
|
42
|
-
else
|
|
43
|
-
#puts "update" ?
|
|
44
|
-
end
|
|
45
|
-
|
|
46
|
-
DB.execute("UPDATE worker SET active=1 WHERE name='#{worker['name'].to_s}'")
|
|
47
|
-
|
|
48
|
-
if (worker['id_division'] != nil)
|
|
49
|
-
DB.execute("UPDATE worker SET id_division='#{worker['id_division'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
|
50
|
-
end
|
|
51
|
-
|
|
52
|
-
if (worker['assigned_process'] != nil)
|
|
53
|
-
DB.execute("UPDATE worker SET process='#{worker['assigned_process'].to_s}', assigned_process='#{worker['assigned_process'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
|
54
|
-
end
|
|
55
|
-
|
|
56
|
-
if (worker['id_object'] != nil)
|
|
57
|
-
DB.execute("UPDATE worker SET id_object='#{worker['id_object'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
|
58
|
-
end
|
|
59
|
-
|
|
60
|
-
if (worker['division_name'] != nil)
|
|
61
|
-
DB.execute("UPDATE worker SET division_name='#{worker['division_name'].to_s}' WHERE name='#{worker['name'].to_s}'")
|
|
62
|
-
end
|
|
63
|
-
|
|
64
|
-
if (worker['type']==nil || worker['type'].to_i==MyProcess::TYPE_LOCAL)
|
|
65
|
-
DB.execute("UPDATE worker SET type=#{MyProcess::TYPE_LOCAL.to_s} WHERE name='#{worker['name'].to_s}'")
|
|
66
|
-
else
|
|
67
|
-
DB.execute("UPDATE worker SET type=#{MyProcess::TYPE_REMOTE.to_s} WHERE name='#{worker['name'].to_s}'")
|
|
68
|
-
end
|
|
69
|
-
end
|
|
70
|
-
|
|
71
|
-
# release resources
|
|
72
|
-
DB.disconnect
|
|
73
|
-
GC.start
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
end
|
|
14
|
+
many_to_one :client, :class=>:'BlackStack::Client', :key=>:id_client
|
|
15
|
+
many_to_one :owner, :class=>:'BlackStack::Client', :key=>:id_client_owner
|
|
16
|
+
many_to_one :host, :class=>:'BlackStack::LocalHost', :key=>:id_host
|
|
17
|
+
many_to_one :current_job, :class=>:'BlackStack::WorkerJob', :key=>:id_workerjob
|
|
18
|
+
many_to_one :lnuser, :class=>:'BlackStack::LnUser', :key=>:id_lnuser
|
|
19
|
+
many_to_one :proxy, :class=>:'BlackStack::Proxy', :key=>:id_proxy
|
|
20
|
+
|
|
21
|
+
# Usage seconds of all the workers assigned to the client.
|
|
22
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
|
23
|
+
# This method will sum the seconds used by this client only
|
|
24
|
+
def self.client_usage_seconds(id_client, period='M', units=1)
|
|
25
|
+
row = DB[
|
|
26
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
|
27
|
+
"from workerjob j with (nolock) " +
|
|
28
|
+
"where j.id_client = '#{id_client}' " +
|
|
29
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
|
30
|
+
"and j.job_start_time is not null " +
|
|
31
|
+
"and j.job_end_time is not null "
|
|
32
|
+
].first
|
|
33
|
+
row[:used_seconds].to_f
|
|
77
34
|
end
|
|
78
|
-
|
|
79
|
-
#
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
#
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
DB.execute(q)
|
|
106
|
-
end
|
|
35
|
+
|
|
36
|
+
# Average usage ratio of all the workers assigned to the client.
|
|
37
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
|
38
|
+
# This method will compute the seconds used by this client only, over the total timeframe.
|
|
39
|
+
def self.client_usage_ratio(id_client, period='M', units=1)
|
|
40
|
+
#
|
|
41
|
+
row = DB[
|
|
42
|
+
"select count(*) as total_workers " +
|
|
43
|
+
"from worker w with (nolock) " +
|
|
44
|
+
"where w.id_client = '#{id_client}' "
|
|
45
|
+
].first
|
|
46
|
+
t = row[:total_workers].to_f
|
|
47
|
+
|
|
48
|
+
#
|
|
49
|
+
row = DB[
|
|
50
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
|
51
|
+
"from workerjob j with (nolock) " +
|
|
52
|
+
"where j.id_client = '#{id_client}' " +
|
|
53
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
|
54
|
+
"and j.job_start_time is not null " +
|
|
55
|
+
"and j.job_end_time is not null "
|
|
56
|
+
].first
|
|
57
|
+
|
|
58
|
+
#
|
|
59
|
+
x = row[:used_seconds].to_f
|
|
60
|
+
y = row[:total_seconds].to_f
|
|
61
|
+
100.to_f * ( x / t ) / y
|
|
107
62
|
end
|
|
108
|
-
|
|
109
|
-
#
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
63
|
+
|
|
64
|
+
# Usage ratio this worker by this client.
|
|
65
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
|
66
|
+
# This method will sum the seconds used by this client only.
|
|
67
|
+
def usage_seconds(id_client, period='M', units=1)
|
|
68
|
+
row = DB[
|
|
69
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
|
70
|
+
"from workerjob j with (nolock) " +
|
|
71
|
+
"where j.id_client = '#{id_client}' " +
|
|
72
|
+
"and j.id_worker = '#{self.id}' " +
|
|
73
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
|
74
|
+
"and j.job_start_time is not null " +
|
|
75
|
+
"and j.job_end_time is not null "
|
|
76
|
+
].first
|
|
77
|
+
row[:used_seconds].to_f
|
|
114
78
|
end
|
|
115
|
-
|
|
116
|
-
|
|
79
|
+
|
|
80
|
+
# Usage ratio this worker by this client.
|
|
81
|
+
# Note that the same worker may has been assigned to different clients withing the same timeframe.
|
|
82
|
+
# This method will compute the seconds used by this client only, over the total timeframe.
|
|
83
|
+
def usage_ratio(id_client, period='M', units=1)
|
|
84
|
+
row = DB[
|
|
85
|
+
"select datediff(ss, dateadd(#{period}#{period}, -#{units.to_s}, getdate()), getdate()) as total_seconds, isnull(sum(datediff(ss, j.job_start_time, j.job_end_time)), 0) as used_seconds " +
|
|
86
|
+
"from workerjob j with (nolock) " +
|
|
87
|
+
"where j.id_client = '#{id_client}' " +
|
|
88
|
+
"and j.id_worker = '#{self.id}' " +
|
|
89
|
+
"and j.create_time > dateadd(#{period}#{period}, -#{units.to_s}, getdate()) " +
|
|
90
|
+
"and j.job_start_time is not null " +
|
|
91
|
+
"and j.job_end_time is not null "
|
|
92
|
+
].first
|
|
93
|
+
x = row[:used_seconds].to_f
|
|
94
|
+
y = row[:total_seconds].to_f
|
|
95
|
+
100.to_f * x / y
|
|
96
|
+
end
|
|
97
|
+
|
|
117
98
|
#
|
|
118
|
-
def
|
|
119
|
-
w = BlackStack::Worker.where(:name=>name).first
|
|
120
|
-
if
|
|
99
|
+
def self.create(h)
|
|
100
|
+
w = BlackStack::Worker.where(:name=>h['name']).first
|
|
101
|
+
if w.nil?
|
|
121
102
|
w = BlackStack::Worker.new
|
|
103
|
+
w.id = h['id']
|
|
122
104
|
end
|
|
123
|
-
w.
|
|
124
|
-
w.
|
|
125
|
-
w.
|
|
126
|
-
w.
|
|
127
|
-
w.
|
|
128
|
-
w.
|
|
129
|
-
w.
|
|
130
|
-
w.
|
|
105
|
+
w.name = h['name']
|
|
106
|
+
w.process = h['process']
|
|
107
|
+
w.last_ping_time = h['last_ping_time']
|
|
108
|
+
w.assigned_process = h['assigned_process']
|
|
109
|
+
w.id_client = h['id_client']
|
|
110
|
+
w.id_division = h['id_division']
|
|
111
|
+
w.division_name = h['division_name']
|
|
112
|
+
w.public_ip_address = h['public_ip_address']
|
|
131
113
|
w.save
|
|
132
114
|
end
|
|
115
|
+
|
|
116
|
+
#
|
|
117
|
+
def to_hash
|
|
118
|
+
h = {}
|
|
119
|
+
h['id'] = self.id
|
|
120
|
+
h['name'] = self.name
|
|
121
|
+
h['process'] = self.process
|
|
122
|
+
h['last_ping_time'] = self.last_ping_time
|
|
123
|
+
h['assigned_process'] = self.assigned_process
|
|
124
|
+
h['id_client'] = self.id_client
|
|
125
|
+
h['id_division'] = self.id_division
|
|
126
|
+
h['division_name'] = self.division_name
|
|
127
|
+
h['public_ip_address'] = self.public_ip_address
|
|
128
|
+
h
|
|
129
|
+
end
|
|
133
130
|
|
|
134
131
|
# Retorna true si este worker esta corriendo en nuestros propios servidores,
|
|
135
132
|
# Retorna false si este worker esta correiendo en otro host, asumiendo que es el host del cliente.
|
|
136
133
|
# Comparando la pulic_ip_address del worer con la lista en BlackStack::Pampa::set_farm_external_ip_addresses.
|
|
137
134
|
def hosted?
|
|
138
|
-
BlackStack::Pampa::
|
|
135
|
+
BlackStack::Pampa::farm_external_ip_addresses.include?(self.public_ip_address)
|
|
139
136
|
end # hosted?
|
|
140
|
-
|
|
141
|
-
# Si es un worker hosteado en nuestos servidores (ver metodo hosted?),
|
|
142
|
-
# => retorna la cantidad de dias que fa
|
|
143
|
-
def expirationDesc
|
|
144
|
-
s = "(unknown)"
|
|
145
|
-
if self.hosted?
|
|
146
|
-
if !self.expiration_time.nil?
|
|
147
|
-
s = DB["SELECT DATEDIFF(mi, GETDATE(), w.expiration_time) AS n FROM worker w WHERE w.id='#{self.id}'"].first[:n].to_i.to_time_spent
|
|
148
|
-
end
|
|
149
|
-
else # no hosted
|
|
150
|
-
s = "(self-hosted)"
|
|
151
|
-
end
|
|
152
|
-
s
|
|
153
|
-
end
|
|
154
|
-
|
|
137
|
+
|
|
155
138
|
# Retorna la cantidad de minutos desde que este worker envio una senial de vida.
|
|
156
139
|
# Este metodo se usa para saber si un worker esta activo o no.
|
|
157
140
|
def last_ping_minutes()
|
|
@@ -163,72 +146,11 @@ module BlackStack
|
|
|
163
146
|
def active?
|
|
164
147
|
self.last_ping_minutes < BlackStack::BaseWorker::KEEP_ACTIVE_MINUTES
|
|
165
148
|
end
|
|
166
|
-
|
|
167
|
-
# escribe en el archivo de log de este worker
|
|
168
|
-
def log(s, level=1, is_error=false)
|
|
169
|
-
logw(s, self.process, self.id, level, is_error)
|
|
170
|
-
end
|
|
171
149
|
|
|
172
150
|
# envia una senial de vida a la division
|
|
173
|
-
# TODO: guardar fecha-hora del ultimo ping en un atributo privado, y evitar el acceso escesivo a la base de datos
|
|
174
151
|
def ping()
|
|
175
152
|
DB.execute("UPDATE worker SET last_ping_time=GETDATE() WHERE id='#{self.id}'")
|
|
176
153
|
end
|
|
177
|
-
|
|
178
|
-
# DEPRECATED
|
|
179
|
-
def self.getActivesCount(processName)
|
|
180
|
-
raise "Method needs some code inside."
|
|
181
|
-
end
|
|
182
|
-
|
|
183
|
-
# obtiene array de workers actives, filtrados por proceso y por tipo de worker.
|
|
184
|
-
def self.getActives(assigned_process_name=nil, worker_name_filter=nil)
|
|
185
|
-
a = Array.new
|
|
186
|
-
q = ""
|
|
187
|
-
if (assigned_process_name!=nil)
|
|
188
|
-
q =
|
|
189
|
-
"SELECT p.id AS [id] " +
|
|
190
|
-
"FROM worker p WITH (NOLOCK INDEX(IX_peer__process__last_ping_time)) " +
|
|
191
|
-
"WHERE last_ping_time>DATEADD(mi,-5,GETDATE()) " +
|
|
192
|
-
"AND ISNULL(active,0)=1 " + # active indica si este worker fue asignado a esta division en la central
|
|
193
|
-
"AND assigned_process='#{assigned_process_name}' "
|
|
194
|
-
|
|
195
|
-
if worker_name_filter != nil
|
|
196
|
-
q = q +
|
|
197
|
-
"AND p.name LIKE '%#{worker_name_filter.to_s}%' "
|
|
198
|
-
end
|
|
199
|
-
|
|
200
|
-
q = q +
|
|
201
|
-
"ORDER BY p.name "
|
|
202
|
-
DB[q].all do |row|
|
|
203
|
-
a << BlackStack::Worker.where(:id=>row[:id]).first
|
|
204
|
-
end
|
|
205
|
-
else
|
|
206
|
-
q =
|
|
207
|
-
"SELECT p.id AS [id] " +
|
|
208
|
-
"FROM worker p WITH (NOLOCK INDEX(IX_peer__process__last_ping_time)) " +
|
|
209
|
-
"WHERE last_ping_time>DATEADD(mi,-5,GETDATE()) " +
|
|
210
|
-
"AND ISNULL(active,0)=1 "
|
|
211
|
-
|
|
212
|
-
if worker_name_filter != nil
|
|
213
|
-
q = q +
|
|
214
|
-
"AND p.name LIKE '%#{worker_name_filter.to_s}%' "
|
|
215
|
-
end
|
|
216
|
-
|
|
217
|
-
q = q +
|
|
218
|
-
"ORDER BY p.name "
|
|
219
|
-
DB[q].all do |row|
|
|
220
|
-
a << BlackStack::Worker.where(:id=>row[:id]).first
|
|
221
|
-
end
|
|
222
|
-
end
|
|
223
|
-
|
|
224
|
-
return a
|
|
225
|
-
end
|
|
226
|
-
|
|
227
|
-
# obtiene cantidad de registros en cola para incrawl.lnsearchvariation
|
|
228
|
-
def getPendingLnSearchVariationBlockInCrawlCount()
|
|
229
|
-
return DB.from(:lnsearchvariationblock).where(:incrawl_reservation_id=>self.id, :incrawl_start_time=>nil).count
|
|
230
|
-
end
|
|
231
|
-
|
|
232
154
|
end # class Worker
|
|
233
155
|
|
|
234
156
|
end # module BlackStack
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: pampa_workers
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version:
|
|
4
|
+
version: 1.1.5
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Leandro Daniel Sardi
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date:
|
|
11
|
+
date: 2020-02-19 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: websocket
|
|
@@ -91,47 +91,7 @@ dependencies:
|
|
|
91
91
|
- !ruby/object:Gem::Version
|
|
92
92
|
version: 4.28.0
|
|
93
93
|
- !ruby/object:Gem::Dependency
|
|
94
|
-
name:
|
|
95
|
-
requirement: !ruby/object:Gem::Requirement
|
|
96
|
-
requirements:
|
|
97
|
-
- - "~>"
|
|
98
|
-
- !ruby/object:Gem::Version
|
|
99
|
-
version: 0.0.20
|
|
100
|
-
- - ">="
|
|
101
|
-
- !ruby/object:Gem::Version
|
|
102
|
-
version: 0.0.20
|
|
103
|
-
type: :runtime
|
|
104
|
-
prerelease: false
|
|
105
|
-
version_requirements: !ruby/object:Gem::Requirement
|
|
106
|
-
requirements:
|
|
107
|
-
- - "~>"
|
|
108
|
-
- !ruby/object:Gem::Version
|
|
109
|
-
version: 0.0.20
|
|
110
|
-
- - ">="
|
|
111
|
-
- !ruby/object:Gem::Version
|
|
112
|
-
version: 0.0.20
|
|
113
|
-
- !ruby/object:Gem::Dependency
|
|
114
|
-
name: simple_cloud_logging
|
|
115
|
-
requirement: !ruby/object:Gem::Requirement
|
|
116
|
-
requirements:
|
|
117
|
-
- - "~>"
|
|
118
|
-
- !ruby/object:Gem::Version
|
|
119
|
-
version: 1.1.16
|
|
120
|
-
- - ">="
|
|
121
|
-
- !ruby/object:Gem::Version
|
|
122
|
-
version: 1.1.16
|
|
123
|
-
type: :runtime
|
|
124
|
-
prerelease: false
|
|
125
|
-
version_requirements: !ruby/object:Gem::Requirement
|
|
126
|
-
requirements:
|
|
127
|
-
- - "~>"
|
|
128
|
-
- !ruby/object:Gem::Version
|
|
129
|
-
version: 1.1.16
|
|
130
|
-
- - ">="
|
|
131
|
-
- !ruby/object:Gem::Version
|
|
132
|
-
version: 1.1.16
|
|
133
|
-
- !ruby/object:Gem::Dependency
|
|
134
|
-
name: simple_command_line_parser
|
|
94
|
+
name: simple_host_monitoring
|
|
135
95
|
requirement: !ruby/object:Gem::Requirement
|
|
136
96
|
requirements:
|
|
137
97
|
- - "~>"
|
|
@@ -150,26 +110,6 @@ dependencies:
|
|
|
150
110
|
- - ">="
|
|
151
111
|
- !ruby/object:Gem::Version
|
|
152
112
|
version: 1.1.1
|
|
153
|
-
- !ruby/object:Gem::Dependency
|
|
154
|
-
name: simple_host_monitoring
|
|
155
|
-
requirement: !ruby/object:Gem::Requirement
|
|
156
|
-
requirements:
|
|
157
|
-
- - "~>"
|
|
158
|
-
- !ruby/object:Gem::Version
|
|
159
|
-
version: 0.0.11
|
|
160
|
-
- - ">="
|
|
161
|
-
- !ruby/object:Gem::Version
|
|
162
|
-
version: 0.0.11
|
|
163
|
-
type: :runtime
|
|
164
|
-
prerelease: false
|
|
165
|
-
version_requirements: !ruby/object:Gem::Requirement
|
|
166
|
-
requirements:
|
|
167
|
-
- - "~>"
|
|
168
|
-
- !ruby/object:Gem::Version
|
|
169
|
-
version: 0.0.11
|
|
170
|
-
- - ">="
|
|
171
|
-
- !ruby/object:Gem::Version
|
|
172
|
-
version: 0.0.11
|
|
173
113
|
description: 'THIS GEM IS STILL IN DEVELOPMENT STAGE. Find documentation here: https://github.com/leandrosardi/pampa.'
|
|
174
114
|
email: leandro.sardi@expandedventure.com
|
|
175
115
|
executables: []
|