mkit 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +21 -0
- data/Gemfile.lock +137 -0
- data/LICENSE +21 -0
- data/README.md +126 -0
- data/Rakefile +54 -0
- data/bin/mkitc +31 -0
- data/bin/mkitd +55 -0
- data/config/database.yml +5 -0
- data/config/mkit_config.yml +15 -0
- data/config/mkitd_config.sh +5 -0
- data/db/migrate/001_setup.rb +105 -0
- data/db/migrate/002_mkit_jobs.rb +17 -0
- data/db/schema.rb +108 -0
- data/lib/mkit/app/controllers/mkitjobs_controller.rb +37 -0
- data/lib/mkit/app/controllers/pods_controller.rb +30 -0
- data/lib/mkit/app/controllers/services_controller.rb +87 -0
- data/lib/mkit/app/helpers/docker_helper.rb +75 -0
- data/lib/mkit/app/helpers/erb_helper.rb +18 -0
- data/lib/mkit/app/helpers/haproxy.rb +41 -0
- data/lib/mkit/app/helpers/interface_helper.rb +17 -0
- data/lib/mkit/app/helpers/services_helper.rb +54 -0
- data/lib/mkit/app/mkit_server.rb +8 -0
- data/lib/mkit/app/model/dns_host.rb +11 -0
- data/lib/mkit/app/model/lease.rb +26 -0
- data/lib/mkit/app/model/mkit_job.rb +48 -0
- data/lib/mkit/app/model/pod.rb +95 -0
- data/lib/mkit/app/model/pool.rb +60 -0
- data/lib/mkit/app/model/service.rb +266 -0
- data/lib/mkit/app/model/service_config.rb +16 -0
- data/lib/mkit/app/model/service_port.rb +30 -0
- data/lib/mkit/app/model/setting.rb +1 -0
- data/lib/mkit/app/model/volume.rb +53 -0
- data/lib/mkit/app/templates/docker/docker_run.sh.erb +1 -0
- data/lib/mkit/app/templates/haproxy/0000_defaults.cfg +23 -0
- data/lib/mkit/app/templates/haproxy/xapp_haproxy.cfg.erb +30 -0
- data/lib/mkit/cmd_runner.rb +27 -0
- data/lib/mkit/config/config.rb +18 -0
- data/lib/mkit/config/environment.rb +26 -0
- data/lib/mkit/config/initializers/001_hash.rb +11 -0
- data/lib/mkit/config/initializers/002_openstruct.rb +7 -0
- data/lib/mkit/config/load_default_configs.rb +29 -0
- data/lib/mkit/config/the_config.yml +3 -0
- data/lib/mkit/ctypes.rb +31 -0
- data/lib/mkit/docker_listener.rb +97 -0
- data/lib/mkit/exceptions.rb +30 -0
- data/lib/mkit/haproxy.rb +48 -0
- data/lib/mkit/job_manager.rb +53 -0
- data/lib/mkit/mkit_dns.rb +54 -0
- data/lib/mkit/mkit_interface.rb +31 -0
- data/lib/mkit/sagas/asaga.rb +11 -0
- data/lib/mkit/sagas/create_pod_saga.rb +28 -0
- data/lib/mkit/sagas/saga_manager.rb +10 -0
- data/lib/mkit/status.rb +47 -0
- data/lib/mkit/utils.rb +51 -0
- data/lib/mkit/version.rb +4 -0
- data/lib/mkit/workers/aworker.rb +11 -0
- data/lib/mkit/workers/haproxy_worker.rb +35 -0
- data/lib/mkit/workers/pod_worker.rb +39 -0
- data/lib/mkit/workers/service_worker.rb +27 -0
- data/lib/mkit/workers/worker_manager.rb +14 -0
- data/lib/mkit.rb +158 -0
- data/mkit.gemspec +40 -0
- data/mkitd +10 -0
- data/samples/apps/postgres.yml +22 -0
- data/samples/apps/rabbitmq.yml +19 -0
- data/samples/daemontools/log/run +44 -0
- data/samples/daemontools/run +42 -0
- data/samples/systemd/mkitd.service +12 -0
- metadata +393 -0
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'mkit/app/model/lease'
|
2
|
+
require 'mkit/status'
|
3
|
+
require 'mkit/exceptions'
|
4
|
+
|
5
|
+
class Pool < ActiveRecord::Base
|
6
|
+
has_many :lease, dependent: :destroy
|
7
|
+
|
8
|
+
def check_status
|
9
|
+
if status == MKIt::PoolStatus::EXAUSTED
|
10
|
+
raise PoolExaustedException.new
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def next_lease_ip
|
15
|
+
self.check_status
|
16
|
+
ips = range.split('-')
|
17
|
+
next_ip = ips[0]
|
18
|
+
next_ip = next_ip.to_i
|
19
|
+
lease.select(:status == MKIt::PoolStatus::IN_USE || :status == MKIt::PoolStatus::RESERVED).each { |l|
|
20
|
+
leased_ip = l.ip.split('.')[3]
|
21
|
+
leased_ip = leased_ip.to_i
|
22
|
+
if leased_ip >= next_ip
|
23
|
+
next_ip = leased_ip+1
|
24
|
+
end
|
25
|
+
}
|
26
|
+
if next_ip > ips[1].to_i
|
27
|
+
self.status = MKIt::PoolStatus::EXAUSTED
|
28
|
+
self.save
|
29
|
+
raise PoolExaustedException.new
|
30
|
+
end
|
31
|
+
|
32
|
+
ip_add = self.ip.split('.')
|
33
|
+
|
34
|
+
"#{ip_add[0]}.#{ip_add[1]}.#{ip_add[2]}.#{next_ip}"
|
35
|
+
end
|
36
|
+
|
37
|
+
def request(service:, status:)
|
38
|
+
lease_ip = next_lease_ip
|
39
|
+
idx = lease_ip.split('.')[3]
|
40
|
+
new_lease = Lease.new(
|
41
|
+
pool: self,
|
42
|
+
service: service,
|
43
|
+
interface_name: "vmkit#{idx}",
|
44
|
+
interface_type: 'tun',
|
45
|
+
status: status,
|
46
|
+
ip: lease_ip
|
47
|
+
)
|
48
|
+
new_lease.save
|
49
|
+
new_lease
|
50
|
+
end
|
51
|
+
|
52
|
+
def request_for(service)
|
53
|
+
request(service: service, status: MKIt::PoolStatus::IN_USE)
|
54
|
+
end
|
55
|
+
|
56
|
+
def reserve_for(service)
|
57
|
+
request(service: service, status: MKIt::PoolStatus::RESERVED)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
@@ -0,0 +1,266 @@
|
|
1
|
+
require 'mkit/app/model/volume'
|
2
|
+
require 'mkit/app/model/service_port'
|
3
|
+
require 'mkit/app/model/service_config'
|
4
|
+
require 'mkit/app/model/pod'
|
5
|
+
require 'mkit/app/helpers/erb_helper'
|
6
|
+
require 'mkit/app/helpers/docker_helper'
|
7
|
+
require 'mkit/mkit_interface'
|
8
|
+
require 'mkit/status'
|
9
|
+
require 'mkit/utils'
|
10
|
+
require 'mkit/ctypes'
|
11
|
+
require 'mkit/app/model/pool'
|
12
|
+
require 'mkit/app/model/service'
|
13
|
+
require 'mkit/app/model/pod'
|
14
|
+
require 'mkit/app/model/dns_host'
|
15
|
+
require 'mkit/app/helpers/erb_helper'
|
16
|
+
require 'mkit/app/helpers/docker_helper'
|
17
|
+
require 'mkit/app/helpers/haproxy'
|
18
|
+
|
19
|
+
#
|
20
|
+
class Service < ActiveRecord::Base
|
21
|
+
has_many :pod, dependent: :destroy
|
22
|
+
has_many :volume, dependent: :destroy
|
23
|
+
has_many :service_config, dependent: :destroy
|
24
|
+
has_many :service_port, dependent: :destroy
|
25
|
+
|
26
|
+
has_one :lease, dependent: :destroy
|
27
|
+
has_one :dns_host, dependent: :destroy
|
28
|
+
|
29
|
+
before_destroy :clean_up
|
30
|
+
|
31
|
+
validates :name, uniqueness: true
|
32
|
+
validates :name, presence: true
|
33
|
+
|
34
|
+
include MKIt::ERBHelper
|
35
|
+
include MKIt::DockerHelper
|
36
|
+
|
37
|
+
def self.create(yaml)
|
38
|
+
config = yaml["service"]
|
39
|
+
raise MKIt::ServiceAlreadyExists.new unless Service.find_by_name(config.name).nil?
|
40
|
+
|
41
|
+
srv = Service.new(
|
42
|
+
name: config.name,
|
43
|
+
version: 1,
|
44
|
+
image: config.image,
|
45
|
+
command: config.command,
|
46
|
+
status: MKIt::Status::CREATING
|
47
|
+
)
|
48
|
+
|
49
|
+
# docker network
|
50
|
+
if config.network.nil? || config.network.empty?
|
51
|
+
srv.pods_network="mkit"
|
52
|
+
else
|
53
|
+
srv.pods_network=config.network
|
54
|
+
end
|
55
|
+
|
56
|
+
# reserve pool ip
|
57
|
+
srv.lease = Pool.find_by_name(MKIt::Utils.me).reserve_for(srv)
|
58
|
+
|
59
|
+
srv.dns_host = DnsHost.new(
|
60
|
+
service: srv,
|
61
|
+
name: srv.name,
|
62
|
+
ip: srv.lease.ip
|
63
|
+
)
|
64
|
+
|
65
|
+
# create service network
|
66
|
+
srv.deploy_network
|
67
|
+
|
68
|
+
# configure
|
69
|
+
srv.configure(config)
|
70
|
+
#
|
71
|
+
srv.status = MKIt::Status::CREATED
|
72
|
+
srv.save
|
73
|
+
data = { service_id: srv.id, version: srv.version }
|
74
|
+
# create pod
|
75
|
+
(1..srv.min_replicas).each { |i|
|
76
|
+
MkitJob.publish(topic: :create_pod_saga, service_id: srv.id, data: data)
|
77
|
+
}
|
78
|
+
srv
|
79
|
+
end
|
80
|
+
|
81
|
+
def configure(config)
|
82
|
+
self.image = config.image if config.image != self.image
|
83
|
+
self.command = config.command if config.command != self.command
|
84
|
+
|
85
|
+
unless config.resources.nil?
|
86
|
+
self.max_replicas = config.resources.max_replicas unless config.resources.max_replicas.nil? || config.resources.max_replicas < 1
|
87
|
+
self.min_replicas = config.resources.min_replicas unless config.resources.min_replicas.nil? || config.resources.min_replicas < 1
|
88
|
+
else
|
89
|
+
self.min_replicas = 1
|
90
|
+
self.max_replicas = 1
|
91
|
+
end
|
92
|
+
self.max_replicas = self.min_replicas if self.min_replicas > self.max_replicas
|
93
|
+
|
94
|
+
# haproxy ports
|
95
|
+
self.service_port = []
|
96
|
+
config.ports&.each do |p|
|
97
|
+
port = ServicePort.create(service: self, config: p)
|
98
|
+
self.service_port << port
|
99
|
+
end
|
100
|
+
|
101
|
+
# volumes
|
102
|
+
self.volume = []
|
103
|
+
config.volumes&.each { |volume|
|
104
|
+
self.add_volume(volume)
|
105
|
+
}
|
106
|
+
# environment
|
107
|
+
self.service_config=[]
|
108
|
+
config.environment&.each_pair { |key,value|
|
109
|
+
self.add_service_config(key, value)
|
110
|
+
}
|
111
|
+
self.volume.each { | volume |
|
112
|
+
volume.deploy
|
113
|
+
}
|
114
|
+
end
|
115
|
+
|
116
|
+
def update!(yaml)
|
117
|
+
config = yaml["service"]
|
118
|
+
raise MKIt::ServiceNameMismatch.new unless config.name == self.name
|
119
|
+
self.version+=1
|
120
|
+
self.configure(config)
|
121
|
+
|
122
|
+
# start new pod, destroy old pod...
|
123
|
+
self.pod.each { |pod| MkitJob.publish(topic: :destroy_pod, pod_id: pod.id, data: {}) }
|
124
|
+
# create pod
|
125
|
+
data = { service_id: self.id, version: self.version }
|
126
|
+
(1..self.min_replicas).each { |i|
|
127
|
+
MkitJob.publish(topic: :create_pod_saga, service_id: self.id, data: data)
|
128
|
+
}
|
129
|
+
self.save
|
130
|
+
end
|
131
|
+
|
132
|
+
def create_pods_network
|
133
|
+
netw = inspect_network(self.pods_network)
|
134
|
+
create_network(self.pods_network) if netw.nil?
|
135
|
+
end
|
136
|
+
|
137
|
+
def deploy_network
|
138
|
+
# create service interface...
|
139
|
+
self.lease.confirm
|
140
|
+
self.lease.up
|
141
|
+
# ...and pods network
|
142
|
+
self.create_pods_network
|
143
|
+
end
|
144
|
+
|
145
|
+
def add_volume(volume_config)
|
146
|
+
v = Volume.create(self, volume_config)
|
147
|
+
self.volume << v
|
148
|
+
v
|
149
|
+
end
|
150
|
+
|
151
|
+
def add_service_config(key, value)
|
152
|
+
v = ServiceConfig.create(service: self, key: key, value: value)
|
153
|
+
self.service_config << v
|
154
|
+
v
|
155
|
+
end
|
156
|
+
|
157
|
+
def current_configs
|
158
|
+
self.service_config&.select{ |x| x.ctype == MKIt::CType::ENVIRONMENT.to_s && x.version == self.version}
|
159
|
+
end
|
160
|
+
|
161
|
+
def current_ports
|
162
|
+
self.service_port&.select{ |x| x.version == self.version}
|
163
|
+
end
|
164
|
+
|
165
|
+
def my_dns
|
166
|
+
MKIt::Interface.ip
|
167
|
+
end
|
168
|
+
|
169
|
+
def update_status!
|
170
|
+
combined_status = nil
|
171
|
+
self.pod.each { |pod|
|
172
|
+
child_status = pod.set_status_from_docker
|
173
|
+
if combined_status
|
174
|
+
case combined_status
|
175
|
+
when MKIt::Status::RUNNING
|
176
|
+
case child_status
|
177
|
+
when MKIt::Status::STOPPED || MKIt::Status::PENDING
|
178
|
+
combined_status = MKIt::Status::DEGRATED
|
179
|
+
end
|
180
|
+
when MKIt::Status::STOPPED
|
181
|
+
case child_status
|
182
|
+
when MKIt::Status::RUNNING || MKIt::Status::PENDING
|
183
|
+
combined_status = MKIt::Status::DEGRATED
|
184
|
+
end
|
185
|
+
when MKIt::Status::PENDING
|
186
|
+
case child_status
|
187
|
+
when MKIt::Status::RUNNING || MKIt::Status::STOPPED
|
188
|
+
combined_status = MKIt::Status::DEGRATED
|
189
|
+
end
|
190
|
+
end
|
191
|
+
else
|
192
|
+
combined_status = child_status
|
193
|
+
end
|
194
|
+
}
|
195
|
+
combined_status = MKIt::Status::CREATING unless combined_status
|
196
|
+
self.status = combined_status
|
197
|
+
self.save
|
198
|
+
self.status
|
199
|
+
end
|
200
|
+
|
201
|
+
#
|
202
|
+
# ha proxy configs & template
|
203
|
+
#
|
204
|
+
def public_ports
|
205
|
+
self.service_port.each.map{|p| p.external_port}.uniq
|
206
|
+
end
|
207
|
+
|
208
|
+
def ports_by_external(external_port)
|
209
|
+
self.service_port.where('external_port = ?', external_port)
|
210
|
+
end
|
211
|
+
|
212
|
+
def ports_mode_by_external(external_port)
|
213
|
+
ports = self.service_port.where('external_port = ?', external_port).first
|
214
|
+
ports.mode if ports
|
215
|
+
end
|
216
|
+
|
217
|
+
def update_proxy
|
218
|
+
MkitJob.publish(topic: :update_proxy_config, application_id: self.id, data: proxy_config)
|
219
|
+
end
|
220
|
+
|
221
|
+
def proxy_config
|
222
|
+
# config
|
223
|
+
haproxy = parse
|
224
|
+
my_addr = self.lease.ip.split('.')[3]
|
225
|
+
filename = "#{'%04i' % my_addr.to_i}_#{self.name}.cfg"
|
226
|
+
MKItLogger.debug("haproxy config file: #{filename}")
|
227
|
+
{filename: filename, data: haproxy}
|
228
|
+
end
|
229
|
+
|
230
|
+
def parse
|
231
|
+
parse_model(MKIt::Templates::HAPROXY).result(binding)
|
232
|
+
end
|
233
|
+
|
234
|
+
def clean_up
|
235
|
+
my_addr = self.lease.ip.split('.')[3]
|
236
|
+
filename = "#{'%04i' % my_addr.to_i}_#{self.name}.cfg"
|
237
|
+
MkitJob.publish(topic: :destroy_proxy_config, data: {filename: filename})
|
238
|
+
end
|
239
|
+
|
240
|
+
#
|
241
|
+
# ctrl
|
242
|
+
#
|
243
|
+
def start
|
244
|
+
self.pod.each { |p|
|
245
|
+
MkitJob.publish(topic: :start_pod, service_id: self.id, pod_id: p.id)
|
246
|
+
}
|
247
|
+
end
|
248
|
+
|
249
|
+
def stop
|
250
|
+
self.pod.each { |p|
|
251
|
+
MkitJob.publish(topic: :stop_pod, service_id: self.id, pod_id: p.id)
|
252
|
+
}
|
253
|
+
end
|
254
|
+
|
255
|
+
def as_json(options = {})
|
256
|
+
srv = super
|
257
|
+
a=[:pod, :volume, :service_config, :service_port]
|
258
|
+
a.each { | k |
|
259
|
+
srv[k] ||= []
|
260
|
+
self.send(k).each { |v|
|
261
|
+
srv[k] << v.as_json
|
262
|
+
}
|
263
|
+
}
|
264
|
+
srv
|
265
|
+
end
|
266
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
require 'mkit/ctypes'
|
2
|
+
|
3
|
+
class ServiceConfig < ActiveRecord::Base
|
4
|
+
belongs_to :service
|
5
|
+
|
6
|
+
def self.create(service:, key:, value:, ctype: MKIt::CType::ENVIRONMENT)
|
7
|
+
ServiceConfig.new(
|
8
|
+
service: service,
|
9
|
+
key: key,
|
10
|
+
value: value,
|
11
|
+
version: service.version,
|
12
|
+
ctype: ctype
|
13
|
+
)
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
@@ -0,0 +1,30 @@
|
|
1
|
+
require 'mkit/app/model/service'
|
2
|
+
|
3
|
+
class ServicePort < ActiveRecord::Base
|
4
|
+
belongs_to :service
|
5
|
+
|
6
|
+
def self.create(service:, config:)
|
7
|
+
sp = ServicePort.new(service: service, version: service.version)
|
8
|
+
sp.parse_config(config)
|
9
|
+
sp
|
10
|
+
end
|
11
|
+
|
12
|
+
# haproxy support for port range - leave src blank
|
13
|
+
# service:
|
14
|
+
# ports:
|
15
|
+
# # src:dest:tcp|http:load-balancing
|
16
|
+
# - 5532:5432:tcp:round_robin
|
17
|
+
# model:
|
18
|
+
# service_ports:
|
19
|
+
# - external: 5432
|
20
|
+
# internal: 5432
|
21
|
+
# mode: tcp|http
|
22
|
+
# load_bal:
|
23
|
+
def parse_config(config)
|
24
|
+
ports = config.split(':')
|
25
|
+
self.external_port = ports[0]
|
26
|
+
self.internal_port = ports[1]
|
27
|
+
self.mode = ports[2]
|
28
|
+
self.load_bal = ports[3]
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1 @@
|
|
1
|
+
class Setting < ActiveRecord::Base; end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
require 'mkit/ctypes'
|
2
|
+
require 'mkit/app/helpers/docker_helper'
|
3
|
+
require 'fileutils'
|
4
|
+
|
5
|
+
class Volume < ActiveRecord::Base
|
6
|
+
belongs_to :service
|
7
|
+
before_destroy :clean_up
|
8
|
+
|
9
|
+
def self.create(service, volume)
|
10
|
+
case volume
|
11
|
+
when /^docker:\/\//
|
12
|
+
ctype = MKIt::CType::DOCKER_STORAGE
|
13
|
+
paths = volume[9..].split(':')
|
14
|
+
# vname="#{service.name}.#{service.application.name}.#{paths[0]}"
|
15
|
+
vname = paths[0]
|
16
|
+
when /^\//
|
17
|
+
ctype = MKIt::CType::LOCAL_STORAGE
|
18
|
+
paths = volume.split(':')
|
19
|
+
vname = paths[0]
|
20
|
+
end
|
21
|
+
Volume.new(
|
22
|
+
service: service,
|
23
|
+
name: vname,
|
24
|
+
path: paths[1],
|
25
|
+
ctype: ctype
|
26
|
+
)
|
27
|
+
end
|
28
|
+
|
29
|
+
def deploy
|
30
|
+
create_volume
|
31
|
+
end
|
32
|
+
|
33
|
+
def create_volume
|
34
|
+
case self.ctype
|
35
|
+
when MKIt::CType::DOCKER_STORAGE
|
36
|
+
MKIt::DockerHelper.create_volume(self.name)
|
37
|
+
when MKIt::CType::LOCAL_STORAGE
|
38
|
+
# nop
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def delete_volume
|
43
|
+
case self.ctype
|
44
|
+
when MKIt::CType::DOCKER_STORAGE
|
45
|
+
MKIt::DockerHelper.delete_volume(self.name)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def clean_up
|
50
|
+
# nop
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
@@ -0,0 +1 @@
|
|
1
|
+
docker run -d --name <%=name%> <%service.service_config&.select{ |x| x.ctype == MKIt::CType::ENVIRONMENT.to_s }.each { |env|%><%=" -e #{env.key}=\"#{env.value}\""%><%}%> <%service.volume&.each { |vol|%><%=" -v \"#{vol.name}:#{vol.path}\""%><%}%> --network <%=service.pods_network%> --dns <%=service.my_dns%> <%=service.image%> <%=service.command unless service.command.nil?%>
|
@@ -0,0 +1,23 @@
|
|
1
|
+
global
|
2
|
+
log 127.0.0.1 local2
|
3
|
+
maxconn 4096
|
4
|
+
user nobody
|
5
|
+
group nobody
|
6
|
+
|
7
|
+
defaults
|
8
|
+
log global
|
9
|
+
mode http
|
10
|
+
option tcplog
|
11
|
+
retries 3
|
12
|
+
timeout http-request 10s
|
13
|
+
timeout queue 1m
|
14
|
+
timeout connect 10s
|
15
|
+
timeout client 1m
|
16
|
+
timeout server 1m
|
17
|
+
timeout http-keep-alive 10s
|
18
|
+
timeout check 10s
|
19
|
+
maxconn 3000
|
20
|
+
stats enable
|
21
|
+
stats auth someuser:somepassword
|
22
|
+
stats uri /haproxy_stats
|
23
|
+
|
@@ -0,0 +1,30 @@
|
|
1
|
+
#
|
2
|
+
# MKIt generated file
|
3
|
+
#
|
4
|
+
<% public_ports.each { |external_port|%>
|
5
|
+
#
|
6
|
+
# start <%=name%>-<%=external_port%>
|
7
|
+
#
|
8
|
+
frontend <%=name%>-<%=external_port%>-front
|
9
|
+
bind <%=lease.ip%>:<%=external_port%>
|
10
|
+
mode <%=ports_mode_by_external(external_port)%>
|
11
|
+
#
|
12
|
+
use_backend <%=name%>-<%=external_port%>-back
|
13
|
+
|
14
|
+
backend <%=name%>-<%=external_port%>-back
|
15
|
+
mode <%=ports_mode_by_external(external_port)%>
|
16
|
+
#balance leastconn
|
17
|
+
balance roundrobin
|
18
|
+
<%if ports_mode_by_external(external_port) == 'http'%>
|
19
|
+
option httpclose
|
20
|
+
option forwardfor
|
21
|
+
cookie JSESSIONID prefix
|
22
|
+
<%end%>
|
23
|
+
<%ports_by_external(external_port).each { |port| %>
|
24
|
+
<%port.service.pod.each { | pod | %>
|
25
|
+
server <%=pod.name%> <%=pod.ip%>:<%=port.internal_port%> <%if port.mode == 'http'%>cookie A<%end%> check<%}%><%}%>
|
26
|
+
#
|
27
|
+
# end of <%=name%>-<%=external_port%>
|
28
|
+
#
|
29
|
+
<%}%>
|
30
|
+
|
@@ -0,0 +1,27 @@
|
|
1
|
+
require 'pty'
|
2
|
+
require 'mkit/exceptions'
|
3
|
+
|
4
|
+
module MKIt
|
5
|
+
class CmdRunner
|
6
|
+
def self.run(cmd)
|
7
|
+
result=''
|
8
|
+
begin
|
9
|
+
shell = PTY.spawn( cmd )
|
10
|
+
begin
|
11
|
+
shell[0].each { |line| result << line.strip! }
|
12
|
+
rescue Errno::EIO
|
13
|
+
# nothing
|
14
|
+
ensure
|
15
|
+
shell[0].close
|
16
|
+
end
|
17
|
+
shell[1].close
|
18
|
+
Process.wait(shell[2])
|
19
|
+
rescue PTY::ChildExited
|
20
|
+
# nothing
|
21
|
+
end
|
22
|
+
raise CmdRunnerException.new("command '#{cmd[0..30]}...' returned an error response") if !$?.nil? && $?.exitstatus != 0
|
23
|
+
result
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require 'yaml'
|
2
|
+
#
|
3
|
+
# MKIt::Config.load_yml!('samples/mkit.yml')
|
4
|
+
# MKIt::Config.application.services
|
5
|
+
# requires Hash.to_o
|
6
|
+
module MKIt
|
7
|
+
module Config
|
8
|
+
extend self
|
9
|
+
def load_yml!(path)
|
10
|
+
@config = YAML.load(File.new(path).read).to_o
|
11
|
+
end
|
12
|
+
#
|
13
|
+
def method_missing(name,*args)
|
14
|
+
return @config.send(name,*args)
|
15
|
+
super.method_missing name
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
require 'bundler/setup'
|
2
|
+
require 'dry-container'
|
3
|
+
require 'sinatra/activerecord'
|
4
|
+
require 'rubydns'
|
5
|
+
require 'sinatra'
|
6
|
+
|
7
|
+
require_relative 'initializers/001_hash'
|
8
|
+
require_relative 'initializers/002_openstruct'
|
9
|
+
|
10
|
+
SOCKET_PATH = File.expand_path('/tmp/app.sock')
|
11
|
+
|
12
|
+
# sinatra conf
|
13
|
+
configure do
|
14
|
+
# set :public_folder, 'public'
|
15
|
+
# set :views, 'app/views'
|
16
|
+
set :server, :thin
|
17
|
+
# enable/disable the built-in web server
|
18
|
+
# set :run, :false
|
19
|
+
# server hostname or IP address
|
20
|
+
# set :bind, SOCKET_PATH, "localhost:4567"
|
21
|
+
# set :port, 4567
|
22
|
+
#enable :sessions
|
23
|
+
#set :session_secret, 'password_security'
|
24
|
+
set :default_content_type, :json
|
25
|
+
end
|
26
|
+
|
@@ -0,0 +1,29 @@
|
|
1
|
+
require 'mkit/app/model/setting'
|
2
|
+
require 'mkit/app/model/pool'
|
3
|
+
require 'mkit/config/config'
|
4
|
+
require 'fileutils'
|
5
|
+
require 'mkit/utils'
|
6
|
+
|
7
|
+
module MKIt
|
8
|
+
module Initializers
|
9
|
+
|
10
|
+
def self.load_my_configuration(config_dir: MKIt::Utils.config_dir)
|
11
|
+
MKIt::Utils.log.info "loading configurations from '#{config_dir}'..."
|
12
|
+
MKIt::Config.load_yml!("#{config_dir}/mkit_config.yml")
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.load_default_configs
|
16
|
+
if Pool.find_by_name(MKIt::Utils.me).nil?
|
17
|
+
st = Pool.new(
|
18
|
+
name: MKIt::Utils.me,
|
19
|
+
ip: MKIt::Config.mkit.my_network.ip,
|
20
|
+
range: '10-200',
|
21
|
+
netmask: '24',
|
22
|
+
preferred: true
|
23
|
+
)
|
24
|
+
st.save
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
data/lib/mkit/ctypes.rb
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
#
|
2
|
+
#
|
3
|
+
module MKIt
|
4
|
+
class MKItCType
|
5
|
+
def initialize(status)
|
6
|
+
@status = status.to_s
|
7
|
+
end
|
8
|
+
|
9
|
+
def to_s
|
10
|
+
@status.downcase
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
module CType
|
15
|
+
ENVIRONMENT = MKIt::MKItCType.new(:environment)
|
16
|
+
DOCKER_STORAGE = MKIt::MKItCType.new(:docker)
|
17
|
+
LOCAL_STORAGE = MKIt::MKItCType.new(:local)
|
18
|
+
|
19
|
+
NETWORK_SPECIFIC = MKIt::MKItCType.new(:specific)
|
20
|
+
NETWORK_BRIDGE = MKIt::MKItCType.new(:bridge)
|
21
|
+
TUN_INTERFACE = MKIt::MKItCType.new(:tun)
|
22
|
+
TAP_INTERFACE = MKIt::MKItCType.new(:tap)
|
23
|
+
end
|
24
|
+
|
25
|
+
module Templates
|
26
|
+
DOCKER_RUN = 'docker/docker_run.sh'
|
27
|
+
DOCKER_BUILD = 'docker/docker_build.sh'
|
28
|
+
HAPROXY = 'haproxy/xapp_haproxy.cfg'
|
29
|
+
HAPROXY_DEFAULTS = 'haproxy/0000_defaults.cfg'
|
30
|
+
end
|
31
|
+
end
|