nex_client 0.17.0 → 0.18.0.pre1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/nex_client.rb +6 -0
- data/lib/nex_client/addon.rb +15 -0
- data/lib/nex_client/app.rb +4 -0
- data/lib/nex_client/base_resource.rb +15 -0
- data/lib/nex_client/cli.rb +35 -124
- data/lib/nex_client/commands.rb +5 -0
- data/lib/nex_client/commands/addons.rb +11 -31
- data/lib/nex_client/commands/apps.rb +8 -44
- data/lib/nex_client/commands/cube_instances.rb +0 -21
- data/lib/nex_client/commands/events.rb +60 -0
- data/lib/nex_client/commands/helpers.rb +0 -50
- data/lib/nex_client/commands/ip_whitelisting.rb +124 -0
- data/lib/nex_client/commands/logs.rb +58 -0
- data/lib/nex_client/commands/policies.rb +184 -0
- data/lib/nex_client/commands/waf.rb +134 -0
- data/lib/nex_client/cube_instance.rb +9 -0
- data/lib/nex_client/exec_cmd.rb +17 -0
- data/lib/nex_client/faraday_middleware.rb +8 -0
- data/lib/nex_client/faraday_middleware/handle_nex_api_errors.rb +39 -0
- data/lib/nex_client/version.rb +1 -1
- data/spec/nex_client/app_spec.rb +2 -0
- data/spec/nex_client/cube_instance_spec.rb +2 -0
- data/spec/nex_client/exec_cmd_spec.rb +14 -0
- data/spec/shared/base_resource.rb +38 -0
- data/spec/spec_helper.rb +8 -53
- data/spec/support/is_expected_block.rb +5 -0
- metadata +19 -5
@@ -42,22 +42,6 @@ module NexClient
|
|
42
42
|
self.display_cubes(e)
|
43
43
|
end
|
44
44
|
|
45
|
-
# Retrieve application logs from all containers
|
46
|
-
def self.logs(args,opts)
|
47
|
-
name = args.first
|
48
|
-
e = NexClient::CubeInstance.find(uuid: name).first
|
49
|
-
|
50
|
-
# Display error
|
51
|
-
unless e
|
52
|
-
error("Error! Could not find cube: #{name}")
|
53
|
-
return false
|
54
|
-
end
|
55
|
-
|
56
|
-
# Retrieve logs and display them
|
57
|
-
logs = e.logs(tail: opts.tail).first
|
58
|
-
self.display_logs(logs.log_ret)
|
59
|
-
end
|
60
|
-
|
61
45
|
def self.snapshots(args,opts)
|
62
46
|
name = args.first
|
63
47
|
e = NexClient::CubeInstance.find(uuid: name).first
|
@@ -232,11 +216,6 @@ module NexClient
|
|
232
216
|
"#{o.type.singularize}:#{o.name}"
|
233
217
|
end
|
234
218
|
|
235
|
-
def self.display_logs(logs)
|
236
|
-
puts logs
|
237
|
-
puts "\n"
|
238
|
-
end
|
239
|
-
|
240
219
|
def self.display_snapshots(snapshots)
|
241
220
|
table = Terminal::Table.new title: SNAPSHOTS_TITLE, headings: SNAPSHOTS_HEADERS do |t|
|
242
221
|
[snapshots].flatten.compact.each do |e|
|
@@ -0,0 +1,60 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module NexClient
|
4
|
+
module Commands
|
5
|
+
module Events
|
6
|
+
def self.configure(c, klass)
|
7
|
+
entity = klass.entity_name
|
8
|
+
entities = klass.entities_name
|
9
|
+
|
10
|
+
c.syntax = "nex-cli #{entities}:events APP_NAME [options]"
|
11
|
+
c.summary = "Gather system events for a given #{entity}"
|
12
|
+
c.description = "Gather system events for a given #{entity}"
|
13
|
+
c.example "display events for my#{entity}", "nex-cli #{entities}:events my#{entity}"
|
14
|
+
c.example "display 100 events for my#{entity}", "nex-cli #{entities}:events --tail 100 my#{entity}"
|
15
|
+
c.option '--tail NUMBER', String, 'number of events to retrieve (default: 50)'
|
16
|
+
c.option '--type TYPE', String, 'filter events on type (e.g. status, container)'
|
17
|
+
c.action do |args, options|
|
18
|
+
manage(args, options, klass)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
# Retrieve resource events
|
23
|
+
def self.manage(args, opts, _klass)
|
24
|
+
filters = {}
|
25
|
+
filters[:'source.name'] = args.first
|
26
|
+
filters[:event] = opts.type if opts.type
|
27
|
+
tail_size = (opts.tail || 50).to_i
|
28
|
+
|
29
|
+
events = NexClient::Event.where(filters).order(id: :desc)
|
30
|
+
list = events.to_a
|
31
|
+
while list.count < tail_size
|
32
|
+
events = events.pages.next
|
33
|
+
break unless events
|
34
|
+
list |= events.to_a
|
35
|
+
end
|
36
|
+
list = list.first(tail_size)
|
37
|
+
|
38
|
+
display_events(list.to_a)
|
39
|
+
end
|
40
|
+
|
41
|
+
# Display a list of events
|
42
|
+
def self.display_events(events)
|
43
|
+
# Content
|
44
|
+
events.sort_by { |e| e.id.to_i }.each do |e|
|
45
|
+
username = e&.username || 'system'
|
46
|
+
session_id = e&.session_id || '-'
|
47
|
+
puts [
|
48
|
+
e.created_at,
|
49
|
+
e.event.ljust(12,' '),
|
50
|
+
username.ljust(15,' '),
|
51
|
+
session_id.ljust(6,' '),
|
52
|
+
e.level.ljust(6,' '),
|
53
|
+
e.message
|
54
|
+
].join(" | ")
|
55
|
+
end
|
56
|
+
puts "\n"
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -3,7 +3,6 @@
|
|
3
3
|
module NexClient
|
4
4
|
module Commands
|
5
5
|
module Helpers
|
6
|
-
LOG_COLORS = [:cyan,:green,:red,:yellow,:magenta]
|
7
6
|
VARS_TITLE = "Environment Variables".colorize(:blue)
|
8
7
|
VARS_HEADERS = ['key','value'].map(&:upcase)
|
9
8
|
|
@@ -21,55 +20,6 @@ module NexClient
|
|
21
20
|
puts msg.colorize(:red)
|
22
21
|
end
|
23
22
|
|
24
|
-
def display_logs(logs)
|
25
|
-
color_index = 0
|
26
|
-
logs.each do |container_id,log_lines|
|
27
|
-
color_index = (color_index + 1) % LOG_COLORS.size
|
28
|
-
puts "\n"
|
29
|
-
puts "Node: #{container_id}".colorize(LOG_COLORS[color_index])
|
30
|
-
puts "-"*50
|
31
|
-
puts log_lines.join("\n")
|
32
|
-
end
|
33
|
-
puts "\n"
|
34
|
-
end
|
35
|
-
|
36
|
-
# Retrieve resource events
|
37
|
-
def events(args,opts)
|
38
|
-
filters = {}
|
39
|
-
filters[:'source.name'] = args.first
|
40
|
-
filters[:event] = opts.type if opts.type
|
41
|
-
tail_size = (opts.tail || 50).to_i
|
42
|
-
|
43
|
-
events = NexClient::Event.where(filters).order(id: :desc)
|
44
|
-
list = events.to_a
|
45
|
-
while list.count < tail_size
|
46
|
-
events = events.pages.next
|
47
|
-
break unless events
|
48
|
-
list |= events.to_a
|
49
|
-
end
|
50
|
-
list = list.first(tail_size)
|
51
|
-
|
52
|
-
self.display_events(list.to_a)
|
53
|
-
end
|
54
|
-
|
55
|
-
# Display a list of events
|
56
|
-
def display_events(events)
|
57
|
-
# Content
|
58
|
-
events.sort_by { |e| e.id.to_i }.each do |e|
|
59
|
-
username = e&.username || 'system'
|
60
|
-
session_id = e&.session_id || '-'
|
61
|
-
puts [
|
62
|
-
e.created_at,
|
63
|
-
e.event.ljust(12,' '),
|
64
|
-
username.ljust(15,' '),
|
65
|
-
session_id.ljust(6,' '),
|
66
|
-
e.level.ljust(6,' '),
|
67
|
-
e.message
|
68
|
-
].join(" | ")
|
69
|
-
end
|
70
|
-
puts "\n"
|
71
|
-
end
|
72
|
-
|
73
23
|
def with_cert_identity
|
74
24
|
# Fetch user
|
75
25
|
me = NexClient::Me.find.first
|
@@ -0,0 +1,124 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module NexClient
|
4
|
+
module Commands
|
5
|
+
module IpWhitelisting
|
6
|
+
extend Helpers
|
7
|
+
|
8
|
+
IP_RULES_TITLE = "IP Rules".colorize(:cyan)
|
9
|
+
IP_RULES_HEADERS = ['Pattern', 'Action', 'CIDRS'].map(&:upcase)
|
10
|
+
|
11
|
+
def self.configure(c)
|
12
|
+
c.syntax = 'nex-cli apps:ip-rules APP_NAME [options]'
|
13
|
+
c.summary = 'Manage Application Endpoint IP whitelisting'
|
14
|
+
c.description = <<~HEREDOC
|
15
|
+
Manage Application Endpoint IP whitelisting
|
16
|
+
|
17
|
+
Specify a JSON (.json) or YAML (.yml) file describing the security rules to apply/ignore. If no files are specified you will be prompted to
|
18
|
+
paste rules in JSON format in the terminal.
|
19
|
+
|
20
|
+
Security rules are a list of regex patterns allowing or denying access to a specific endpoint based on an IP range (CIDR) in the following format:
|
21
|
+
{ "pattern": "<regex_matching_a_path>", "cidrs": ["<ip_range_1>","<ip_range_2>"], "action": "<allow|deny>" }
|
22
|
+
|
23
|
+
Rules must be matched:
|
24
|
+
----------------------
|
25
|
+
Rules must be matched (path + cidr) to be applied. Therefore you may want to have a "deny all" rule as a last rule if your application is private.
|
26
|
+
|
27
|
+
Rules order matter:
|
28
|
+
-------------------
|
29
|
+
Rules are matched in order. The first rule matched for a given [path, cidr] will be applied. A "deny all" rule should always be last.
|
30
|
+
|
31
|
+
Regex Patterns:
|
32
|
+
---------------
|
33
|
+
Regex patterns must be Lua-compatible because these patterns ultimately get parsed by the routing infrastructure (nginx/lua). You can test your lua
|
34
|
+
pattern in the Lua online console to confirm it does what you want.
|
35
|
+
1) Go to: https://www.lua.org/cgi-bin/demo
|
36
|
+
2) Run the following in the console (example): return string.match("/api/v1/ping","^/api/v1")
|
37
|
+
|
38
|
+
Ruleset Example:
|
39
|
+
----------------
|
40
|
+
Make your application only accessible from specific IP addresses but leave the /ping endpoint public
|
41
|
+
[
|
42
|
+
# Allow ALL IP addresses to access the ping endpoint
|
43
|
+
{ "pattern": "^/ping", "cidrs": ["0.0.0.0/0"], "action": "allow" },
|
44
|
+
|
45
|
+
# Allow specific IP addresses and IP ranges to access your application
|
46
|
+
{ "pattern": ".*", "cidrs": ["55.54.52.53/32", "80.70.60.0/24"], "action": "allow" },
|
47
|
+
|
48
|
+
# "Deny all" rule - All other IPs will be rejected on all endpoints
|
49
|
+
{ "pattern": ".*", "cidrs": ["0.0.0.0/0"], "action": "deny" }
|
50
|
+
]
|
51
|
+
|
52
|
+
HEREDOC
|
53
|
+
c.example 'update IP rules via command line prompt', 'nex-cli apps:ip-rules myapp --ruleset'
|
54
|
+
c.example 'update IP rules via file input', 'nex-cli apps:ip-rules myapp --ruleset /tmp/myruleset.json'
|
55
|
+
c.option '--ruleset [PATH]', String, 'specify web application IP rules using JSON or YAML file (prompt will appear otherwise). [restart required]'
|
56
|
+
c.option '--clear-ruleset', String, 'remove all IP rules'
|
57
|
+
|
58
|
+
c.action do |args, options|
|
59
|
+
manage(args, options)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def self.manage(args, opts)
|
64
|
+
name = args.first
|
65
|
+
app = NexClient::App.find(name: name).first
|
66
|
+
|
67
|
+
# Display error
|
68
|
+
unless app
|
69
|
+
error("Error! Could not find app: #{name}")
|
70
|
+
return false
|
71
|
+
end
|
72
|
+
|
73
|
+
a = update(app, opts)
|
74
|
+
show(a)
|
75
|
+
end
|
76
|
+
|
77
|
+
def self.update(app, opts)
|
78
|
+
# Deep duplicate
|
79
|
+
app_opts = Marshal.load(Marshal.dump((app.opts || {})))
|
80
|
+
|
81
|
+
# Clear all constraints
|
82
|
+
if opts.clear_ruleset
|
83
|
+
app_opts.delete('ip_whitelisting_rules')
|
84
|
+
end
|
85
|
+
|
86
|
+
# Add IP rules
|
87
|
+
if opts.ruleset.present?
|
88
|
+
ip_rules = begin
|
89
|
+
if opts.ruleset.is_a?(String)
|
90
|
+
hash_from_file(opts.ruleset)
|
91
|
+
else
|
92
|
+
val = ask("Copy/paste your IP rules below in JSON format:") { |q| q.gather = "" }
|
93
|
+
JSON.parse(val.join(""))
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
app_opts['ip_whitelisting_rules'] = ip_rules
|
98
|
+
end
|
99
|
+
|
100
|
+
# Update policies
|
101
|
+
if app.opts != app_opts
|
102
|
+
app.update_attributes({ opts: app_opts })
|
103
|
+
success("Successfully updated IP rules. Please restart your app to apply these changes...")
|
104
|
+
end
|
105
|
+
|
106
|
+
# Return updated app
|
107
|
+
app
|
108
|
+
end
|
109
|
+
|
110
|
+
def self.show(app)
|
111
|
+
ruleset = (app.opts['ip_whitelisting_rules'] || [])
|
112
|
+
|
113
|
+
table = Terminal::Table.new title: IP_RULES_TITLE, headings: IP_RULES_HEADERS do |t|
|
114
|
+
ruleset.each do |rule|
|
115
|
+
t.add_row([rule['pattern'], rule['action'], rule['cidrs']&.join(' , ')])
|
116
|
+
end
|
117
|
+
end
|
118
|
+
puts table
|
119
|
+
puts "\n"
|
120
|
+
end
|
121
|
+
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module NexClient
|
4
|
+
module Commands
|
5
|
+
module Logs
|
6
|
+
LOG_COLORS = [:cyan, :green, :red, :yellow, :magenta].freeze
|
7
|
+
|
8
|
+
def self.configure(c, klass)
|
9
|
+
entity = klass.entity_name
|
10
|
+
entities = klass.entities_name
|
11
|
+
|
12
|
+
c.syntax = "nex-cli #{entities}:logs APP_NAME [options]"
|
13
|
+
c.summary = "Gather #{entity} logs"
|
14
|
+
c.description = "Gather container logs for a given #{entity}"
|
15
|
+
c.example "gather logs for my#{entity}", "nex-cli #{entities}:logs my#{entity}"
|
16
|
+
c.example "gather logs for my#{entity} with a tail of 50", "nex-cli #{entities}:logs --tail 50 my#{entity}"
|
17
|
+
c.option '--tail NUMBER', String, 'number of lines to retrieve for each container'
|
18
|
+
c.action do |args, options|
|
19
|
+
manage(args, options, klass)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
# Retrieve resource events
|
24
|
+
def self.manage(args, opts, klass)
|
25
|
+
name = args.first
|
26
|
+
cluster = klass.find(klass.main_key => name).first
|
27
|
+
|
28
|
+
# Display error
|
29
|
+
unless cluster
|
30
|
+
error("Error! Could not find #{klass.entity_name}: #{name}")
|
31
|
+
return false
|
32
|
+
end
|
33
|
+
|
34
|
+
# Retrieve logs and display them
|
35
|
+
logs = cluster.logs(tail: opts.tail).first
|
36
|
+
self.display_logs(logs.log_ret)
|
37
|
+
end
|
38
|
+
|
39
|
+
def display_logs(logs)
|
40
|
+
color_index = 0
|
41
|
+
|
42
|
+
if logs.is_a?(Hash)
|
43
|
+
logs.each do |container_id,log_lines|
|
44
|
+
color_index = (color_index + 1) % LOG_COLORS.size
|
45
|
+
puts "\n"
|
46
|
+
puts "Node: #{container_id}".colorize(LOG_COLORS[color_index])
|
47
|
+
puts "-"*50
|
48
|
+
puts log_lines.join("\n")
|
49
|
+
end
|
50
|
+
else
|
51
|
+
puts logs
|
52
|
+
end
|
53
|
+
|
54
|
+
puts "\n"
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,184 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module NexClient
|
4
|
+
module Commands
|
5
|
+
module Policies
|
6
|
+
REGION_BALANCING_TITLE = "Region Balancing".colorize(:cyan)
|
7
|
+
REGION_BALANCING_HEADERS = ['Region','Rule'].map(&:upcase)
|
8
|
+
|
9
|
+
CONSTRAINTS_TITLE = "Constraints".colorize(:red)
|
10
|
+
CONSTRAINTS_HEADERS = ['Type','Attribute','Value'].map(&:upcase)
|
11
|
+
|
12
|
+
def self.configure(c, klass)
|
13
|
+
entity = klass.entity_name
|
14
|
+
entities = klass.entities_name
|
15
|
+
|
16
|
+
c.syntax = "nex-cli #{entities}:policies #{entity.upcase}_NAME [options]"
|
17
|
+
c.summary = "Display and manage #{entity} region-balancing policies and deployment constraints"
|
18
|
+
c.description = <<~HEREDOC
|
19
|
+
Constraints:
|
20
|
+
------------
|
21
|
+
Use: --constraints CONSTRAINTS
|
22
|
+
Use: --clear-constraints
|
23
|
+
Specify compute or storage attributes to constrain the deployment of #{entity} containers to specific racks.
|
24
|
+
|
25
|
+
For example if you wish to restrict the deployment of #{entity} containers to servers using a specific security group,
|
26
|
+
you can run the following command:
|
27
|
+
E.g.:
|
28
|
+
nex-cli #{entities}:policies my#{entity} --constraints compute.network_security_group=my-security-group
|
29
|
+
|
30
|
+
You can define multiple constraints - in which case the rules will be additive (logical AND). If you wish to rebuild the constraints
|
31
|
+
from scratch you can clear the constraints (--clear-constraints) then re-add constraints (--rack-contraint)
|
32
|
+
E.g.:
|
33
|
+
nex-cli #{entities}:policies my#{entity} --clear-constraints
|
34
|
+
nex-cli #{entities}:policies my#{entity} --constraints compute.machine_type=m4.2xlarge,compute.network_security_group=my-security-group
|
35
|
+
|
36
|
+
If the orchestrator is unable to find a rack matching the defined constraints then the launch of new containers will fail. An error
|
37
|
+
will appear in the #{entities}:events logs telling you so.
|
38
|
+
|
39
|
+
NOTE for storage constraints:
|
40
|
+
- only applicable if your application has persistence enabled
|
41
|
+
- storage constraints are only applied when provisioning the first container as storage racks are permanently assigned.
|
42
|
+
|
43
|
+
The list of attributes that can be used is (examples are based on AWS):
|
44
|
+
- id - e.g. 653
|
45
|
+
- network_zone - e.g. ap-southeast-1
|
46
|
+
- machine_image - e.g. ami-123
|
47
|
+
- machine_type - e.g. m4.2xlarge
|
48
|
+
- network_security_group - e.g. my-compute-security-group
|
49
|
+
- private_ip_address - e.g. 10.1.1.1
|
50
|
+
- vpc_id - e.g. vpc-234
|
51
|
+
- network_subnet_id - e.g. subnet-123
|
52
|
+
- platform_provider - e.g. aws (provider name depends on orchestrator configuration)
|
53
|
+
|
54
|
+
Server regions are not available through rack constraints. If you wish to lock you application to a certain region,
|
55
|
+
use region-balancing policies.
|
56
|
+
|
57
|
+
Region-Balancing:
|
58
|
+
-----------------
|
59
|
+
Use: --region-balancing REGIONS
|
60
|
+
Use: --clear-region-balancing
|
61
|
+
Specify how your containers should be distributed across the available Nex!™ regions. This is only relevant when the Nex!™ platform has been deployed
|
62
|
+
in multiple regions.
|
63
|
+
|
64
|
+
Options are the following:
|
65
|
+
- evenly distribute across all regions: --region-balancing=all
|
66
|
+
- eventy distribute across specified regions: --region-balancing="ap-southeast-1,us-west-2"
|
67
|
+
- distribute with relative weights: --region-balancing="ap-southeast-1=1,us-west-2=2"
|
68
|
+
- Remove all region-balancing constraints: --clear-region-balancing
|
69
|
+
HEREDOC
|
70
|
+
|
71
|
+
c.example 'display all region policies and constraints', "nex-cli #{entities}:policies my#{entity}"
|
72
|
+
c.example 'balance containers equally between regions', "nex-cli #{entities}:policies my#{entity} --region-balancing=\"ap-southeast-1,us-west-2\""
|
73
|
+
c.example 'deploy more containers in one region', "nex-cli #{entities}:policies my#{entity} --region-balancing=\"ap-southeast-1=1,us-west-2=2\""
|
74
|
+
c.example 'scale containers across all regions', "nex-cli #{entities}:policies my#{entity} --region-balancing=all"
|
75
|
+
|
76
|
+
c.option '--region-balancing REGIONS', String, "specify how the #{entity} should be proportionally distributed geographically. E.g. \"all\" or \"ap-southeast-1,us-west-2\" or \"ap-southeast-1=1,us-west-2=2\". [restart required]"
|
77
|
+
c.option '--clear-region-balancing', String, 'remove region balancing policy and revert to default region'
|
78
|
+
c.option '--constraints CONSTRAINT1=value,CONSTRAINT1=value,...', Array, 'restrict deployment of containers to servers having specific attributes [restart required]'
|
79
|
+
c.option '--clear-constraints', String, 'remove all constraints'
|
80
|
+
|
81
|
+
c.action do |args, options|
|
82
|
+
manage(args, options, klass)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def self.manage(args, opts, klass)
|
87
|
+
name = args.first
|
88
|
+
cluster = klass.find(name: name).first
|
89
|
+
|
90
|
+
# Display error
|
91
|
+
unless cluster
|
92
|
+
error("Error! Could not find #{entity}: #{name}")
|
93
|
+
return false
|
94
|
+
end
|
95
|
+
|
96
|
+
c = update(cluster, opts)
|
97
|
+
show(c)
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.show(cluster)
|
101
|
+
display_region_balancing(cluster)
|
102
|
+
display_constraints(cluster)
|
103
|
+
end
|
104
|
+
|
105
|
+
def self.update(cluster, opts)
|
106
|
+
# Deep duplicate
|
107
|
+
cluster_opts = Marshal.load(Marshal.dump((cluster.opts || {})))
|
108
|
+
|
109
|
+
# Clear all constraints
|
110
|
+
if opts.clear_constraints
|
111
|
+
cluster_opts.delete('constraints')
|
112
|
+
end
|
113
|
+
|
114
|
+
# Clear region balancing policies
|
115
|
+
if opts.clear_region_balancing
|
116
|
+
cluster_opts.delete('region_balancing')
|
117
|
+
end
|
118
|
+
|
119
|
+
# Add list of constraints
|
120
|
+
if opts.constraints.present?
|
121
|
+
cluster_opts['constraints'] ||= {}
|
122
|
+
|
123
|
+
# Transform compute.server_type=foo
|
124
|
+
# into
|
125
|
+
# { 'compute' => { 'server_type' => 'foo' } }
|
126
|
+
Array(opts.constraints).each do |constraint|
|
127
|
+
key,val = constraint.split('=')
|
128
|
+
server_type,rule = key.split('.')
|
129
|
+
cluster_opts['constraints'][server_type] ||= {}
|
130
|
+
cluster_opts['constraints'][server_type][rule] = val
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
# Add list of constraints
|
135
|
+
if opts.region_balancing.present?
|
136
|
+
cluster_opts['region_balancing'] = opts.region_balancing
|
137
|
+
end
|
138
|
+
|
139
|
+
# Update policies
|
140
|
+
cluster.update_attributes({ opts: cluster_opts }) if cluster.opts != cluster_opts
|
141
|
+
cluster
|
142
|
+
end
|
143
|
+
|
144
|
+
def self.display_region_balancing(cluster)
|
145
|
+
policies = cluster.opts.dig('region_balancing')
|
146
|
+
default_region = cluster.preferred_region
|
147
|
+
|
148
|
+
table = Terminal::Table.new title: REGION_BALANCING_TITLE, headings: REGION_BALANCING_HEADERS do |t|
|
149
|
+
if policies.blank?
|
150
|
+
t.add_row(["#{default_region} (default)", "All containers will be deployed in #{default_region} (preferred_region)"])
|
151
|
+
elsif policies == 'all'
|
152
|
+
t.add_row(['All regions', 'Containers will be spread evenly across all available regions'])
|
153
|
+
else
|
154
|
+
policies.split(',').each do |policy|
|
155
|
+
region,multiplier = policy.split('=')
|
156
|
+
t.add_row([region, "Containers will be deployed in #{region} with a ratio #{multiplier}:1"])
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
160
|
+
puts table
|
161
|
+
puts "\n"
|
162
|
+
end
|
163
|
+
|
164
|
+
def self.display_constraints(cluster)
|
165
|
+
constraints = cluster.opts&.dig('constraints')
|
166
|
+
|
167
|
+
table = Terminal::Table.new title: CONSTRAINTS_TITLE, headings: CONSTRAINTS_HEADERS do |t|
|
168
|
+
if constraints.blank?
|
169
|
+
t.add_row(["None (default)", "None", "Containers will use all available servers in selected regions"])
|
170
|
+
else
|
171
|
+
constraints.each do |server_type,rules|
|
172
|
+
rules.each do |rule,val|
|
173
|
+
t.add_row([server_type, rule, val])
|
174
|
+
end
|
175
|
+
end
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
puts table
|
180
|
+
puts "\n"
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|