vmpooler 3.7.0 → 3.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b87824aca1844a9ce818ee4ed73ed50421b66ef55d7196ad58f43697ba4d1519
4
- data.tar.gz: c6abab19d5230d045be2197ecae237ab22b82c3e8ebf83b0e45db964b25cb034
3
+ metadata.gz: 82a54b52698f8e24f5677370ab21664e97ec9412812a47447ac8d4619c5a23c4
4
+ data.tar.gz: 98a148c9febf6d9dae630b382b5ffc0a6da5bff7d1bdca113c9da79d9ab022da
5
5
  SHA512:
6
- metadata.gz: 86d7c75a40b018031a955e43397a50e4f12c63a8e9548348f9c7849aa5b78fb1fd673554beff098a5520502154c14f4d2edcdb150a55da3b57d746fef632301d
7
- data.tar.gz: 0ac6b7105b4318954459c40efc10c16857536b94b0a8a1c8de4705000da974b1c24455ca48778e671a278ea21c04a64c9c0de0f2060c98b8b62526e2be488e82
6
+ metadata.gz: 4dba5aa2774558fc34d405f06b3307471671f4074da2e058b5c670eb35099472a1779436fb84c83f270b652413cdbcb97121782f3184e232b7871034ad6e0cfb
7
+ data.tar.gz: 47ba9d7f37f75ebfe5f48f5d99afb4beda77d0d11351173f77fb7f88026710ea72e76d1b1cb5512e2b86851b955e443b6471c0e68b7ca58630b767b47e4edfc0
@@ -0,0 +1,130 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Vmpooler
4
+ # Adaptive timeout that adjusts based on observed connection performance
5
+ # to optimize between responsiveness and reliability.
6
+ #
7
+ # Tracks recent connection durations and adjusts timeout to p95 + buffer,
8
+ # reducing timeout on failures to fail faster during outages.
9
+ class AdaptiveTimeout
10
+ attr_reader :current_timeout
11
+
12
+ # Initialize adaptive timeout
13
+ #
14
+ # @param name [String] Name for logging (e.g., "vsphere_connections")
15
+ # @param logger [Object] Logger instance
16
+ # @param metrics [Object] Metrics instance
17
+ # @param min [Integer] Minimum timeout in seconds
18
+ # @param max [Integer] Maximum timeout in seconds
19
+ # @param initial [Integer] Initial timeout in seconds
20
+ # @param max_samples [Integer] Number of recent samples to track
21
+ def initialize(name:, logger:, metrics:, min: 5, max: 60, initial: 30, max_samples: 100)
22
+ @name = name
23
+ @logger = logger
24
+ @metrics = metrics
25
+ @min_timeout = min
26
+ @max_timeout = max
27
+ @current_timeout = initial
28
+ @recent_durations = []
29
+ @max_samples = max_samples
30
+ @mutex = Mutex.new
31
+ end
32
+
33
+ # Get current timeout value (thread-safe)
34
+ # @return [Integer] Current timeout in seconds
35
+ def timeout
36
+ @mutex.synchronize { @current_timeout }
37
+ end
38
+
39
+ # Record a successful operation duration
40
+ # @param duration [Float] Duration in seconds
41
+ def record_success(duration)
42
+ @mutex.synchronize do
43
+ @recent_durations << duration
44
+ @recent_durations.shift if @recent_durations.size > @max_samples
45
+
46
+ # Adjust timeout based on recent performance
47
+ adjust_timeout if @recent_durations.size >= 10
48
+ end
49
+ end
50
+
51
+ # Record a failure (timeout or error)
52
+ # Reduces current timeout to fail faster on subsequent attempts
53
+ def record_failure
54
+ @mutex.synchronize do
55
+ old_timeout = @current_timeout
56
+ # Reduce timeout by 20% on failure, but don't go below minimum
57
+ @current_timeout = [(@current_timeout * 0.8).round, @min_timeout].max
58
+
59
+ if old_timeout != @current_timeout
60
+ @logger.log('d', "[*] [adaptive_timeout] '#{@name}' reduced timeout #{old_timeout}s → #{@current_timeout}s after failure")
61
+ @metrics.gauge("adaptive_timeout.current.#{@name}", @current_timeout)
62
+ end
63
+ end
64
+ end
65
+
66
+ # Reset to initial timeout (useful after recovery)
67
+ def reset
68
+ @mutex.synchronize do
69
+ @recent_durations.clear
70
+ old_timeout = @current_timeout
71
+ @current_timeout = [@max_timeout, 30].min
72
+
73
+ @logger.log('d', "[*] [adaptive_timeout] '#{@name}' reset timeout #{old_timeout}s → #{@current_timeout}s")
74
+ @metrics.gauge("adaptive_timeout.current.#{@name}", @current_timeout)
75
+ end
76
+ end
77
+
78
+ # Get statistics about recent durations
79
+ # @return [Hash] Statistics including min, max, avg, p95
80
+ def stats
81
+ @mutex.synchronize do
82
+ return { samples: 0 } if @recent_durations.empty?
83
+
84
+ sorted = @recent_durations.sort
85
+ {
86
+ samples: sorted.size,
87
+ min: sorted.first.round(2),
88
+ max: sorted.last.round(2),
89
+ avg: (sorted.sum / sorted.size.to_f).round(2),
90
+ p50: percentile(sorted, 0.50).round(2),
91
+ p95: percentile(sorted, 0.95).round(2),
92
+ p99: percentile(sorted, 0.99).round(2),
93
+ current_timeout: @current_timeout
94
+ }
95
+ end
96
+ end
97
+
98
+ private
99
+
100
+ def adjust_timeout
101
+ return if @recent_durations.empty?
102
+
103
+ sorted = @recent_durations.sort
104
+ p95_duration = percentile(sorted, 0.95)
105
+
106
+ # Set timeout to p95 + 50% buffer, bounded by min/max
107
+ new_timeout = (p95_duration * 1.5).round
108
+ new_timeout = [[new_timeout, @min_timeout].max, @max_timeout].min
109
+
110
+ # Only adjust if change is significant (> 5 seconds)
111
+ if (new_timeout - @current_timeout).abs > 5
112
+ old_timeout = @current_timeout
113
+ @current_timeout = new_timeout
114
+
115
+ @logger.log('d', "[*] [adaptive_timeout] '#{@name}' adjusted timeout #{old_timeout}s → #{@current_timeout}s (p95: #{p95_duration.round(2)}s)")
116
+ @metrics.gauge("adaptive_timeout.current.#{@name}", @current_timeout)
117
+ @metrics.gauge("adaptive_timeout.p95.#{@name}", p95_duration)
118
+ end
119
+ end
120
+
121
+ def percentile(sorted_array, percentile)
122
+ return 0 if sorted_array.empty?
123
+
124
+ index = (sorted_array.size * percentile).ceil - 1
125
+ index = [index, 0].max
126
+ index = [index, sorted_array.size - 1].min
127
+ sorted_array[index]
128
+ end
129
+ end
130
+ end
@@ -1,10 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'vmpooler/api/input_validator'
4
+
3
5
  module Vmpooler
4
6
 
5
7
  class API
6
8
 
7
9
  module Helpers
10
+ include InputValidator
8
11
 
9
12
  def tracer
10
13
  @tracer ||= OpenTelemetry.tracer_provider.tracer('api', Vmpooler::VERSION)
@@ -299,17 +302,35 @@ module Vmpooler
299
302
  total: 0
300
303
  }
301
304
 
302
- queue[:requested] = get_total_across_pools_redis_scard(pools, 'vmpooler__provisioning__request', backend) + get_total_across_pools_redis_scard(pools, 'vmpooler__provisioning__processing', backend) + get_total_across_pools_redis_scard(pools, 'vmpooler__odcreate__task', backend)
303
-
304
- queue[:pending] = get_total_across_pools_redis_scard(pools, 'vmpooler__pending__', backend)
305
- queue[:ready] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
306
- queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
307
- queue[:completed] = get_total_across_pools_redis_scard(pools, 'vmpooler__completed__', backend)
305
+ # Use a single pipeline to fetch all queue counts at once for better performance
306
+ results = backend.pipelined do |pipeline|
307
+ # Order matters - we'll use indices to extract values
308
+ pools.each do |pool|
309
+ pipeline.scard("vmpooler__provisioning__request#{pool['name']}") # 0..n-1
310
+ pipeline.scard("vmpooler__provisioning__processing#{pool['name']}") # n..2n-1
311
+ pipeline.scard("vmpooler__odcreate__task#{pool['name']}") # 2n..3n-1
312
+ pipeline.scard("vmpooler__pending__#{pool['name']}") # 3n..4n-1
313
+ pipeline.scard("vmpooler__ready__#{pool['name']}") # 4n..5n-1
314
+ pipeline.scard("vmpooler__running__#{pool['name']}") # 5n..6n-1
315
+ pipeline.scard("vmpooler__completed__#{pool['name']}") # 6n..7n-1
316
+ end
317
+ pipeline.get('vmpooler__tasks__clone') # 7n
318
+ pipeline.get('vmpooler__tasks__ondemandclone') # 7n+1
319
+ end
308
320
 
309
- queue[:cloning] = backend.get('vmpooler__tasks__clone').to_i + backend.get('vmpooler__tasks__ondemandclone').to_i
310
- queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i
311
- queue[:booting] = 0 if queue[:booting] < 0
312
- queue[:total] = queue[:requested] + queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i
321
+ n = pools.length
322
+ # Safely extract results with default to empty array if slice returns nil
323
+ queue[:requested] = (results[0...n] || []).sum(&:to_i) +
324
+ (results[n...(2 * n)] || []).sum(&:to_i) +
325
+ (results[(2 * n)...(3 * n)] || []).sum(&:to_i)
326
+ queue[:pending] = (results[(3 * n)...(4 * n)] || []).sum(&:to_i)
327
+ queue[:ready] = (results[(4 * n)...(5 * n)] || []).sum(&:to_i)
328
+ queue[:running] = (results[(5 * n)...(6 * n)] || []).sum(&:to_i)
329
+ queue[:completed] = (results[(6 * n)...(7 * n)] || []).sum(&:to_i)
330
+ queue[:cloning] = (results[7 * n] || 0).to_i + (results[7 * n + 1] || 0).to_i
331
+ queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i
332
+ queue[:booting] = 0 if queue[:booting] < 0
333
+ queue[:total] = queue[:requested] + queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i
313
334
 
314
335
  queue
315
336
  end
@@ -0,0 +1,159 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Vmpooler
4
+ class API
5
+ # Input validation helpers to enhance security
6
+ module InputValidator
7
+ # Maximum lengths to prevent abuse
8
+ MAX_HOSTNAME_LENGTH = 253
9
+ MAX_TAG_KEY_LENGTH = 50
10
+ MAX_TAG_VALUE_LENGTH = 255
11
+ MAX_REASON_LENGTH = 500
12
+ MAX_POOL_NAME_LENGTH = 100
13
+ MAX_TOKEN_LENGTH = 64
14
+
15
+ # Valid patterns
16
+ HOSTNAME_PATTERN = /\A[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?(\.[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)* \z/ix.freeze
17
+ POOL_NAME_PATTERN = /\A[a-zA-Z0-9_-]+\z/.freeze
18
+ TAG_KEY_PATTERN = /\A[a-zA-Z0-9_\-.]+\z/.freeze
19
+ TOKEN_PATTERN = /\A[a-zA-Z0-9\-_]+\z/.freeze
20
+ INTEGER_PATTERN = /\A\d+\z/.freeze
21
+
22
+ class ValidationError < StandardError; end
23
+
24
+ # Validate hostname format and length
25
+ def validate_hostname(hostname)
26
+ return error_response('Hostname is required') if hostname.nil? || hostname.empty?
27
+ return error_response('Hostname too long') if hostname.length > MAX_HOSTNAME_LENGTH
28
+ return error_response('Invalid hostname format') unless hostname.match?(HOSTNAME_PATTERN)
29
+
30
+ true
31
+ end
32
+
33
+ # Validate pool/template name
34
+ def validate_pool_name(pool_name)
35
+ return error_response('Pool name is required') if pool_name.nil? || pool_name.empty?
36
+ return error_response('Pool name too long') if pool_name.length > MAX_POOL_NAME_LENGTH
37
+ return error_response('Invalid pool name format') unless pool_name.match?(POOL_NAME_PATTERN)
38
+
39
+ true
40
+ end
41
+
42
+ # Validate tag key and value
43
+ def validate_tag(key, value)
44
+ return error_response('Tag key is required') if key.nil? || key.empty?
45
+ return error_response('Tag key too long') if key.length > MAX_TAG_KEY_LENGTH
46
+ return error_response('Invalid tag key format') unless key.match?(TAG_KEY_PATTERN)
47
+
48
+ if value
49
+ return error_response('Tag value too long') if value.length > MAX_TAG_VALUE_LENGTH
50
+
51
+ # Sanitize value to prevent injection attacks
52
+ sanitized_value = value.gsub(/[^\w\s\-.@:\/]/, '')
53
+ return error_response('Tag value contains invalid characters') if sanitized_value != value
54
+ end
55
+
56
+ true
57
+ end
58
+
59
+ # Validate token format
60
+ def validate_token_format(token)
61
+ return error_response('Token is required') if token.nil? || token.empty?
62
+ return error_response('Token too long') if token.length > MAX_TOKEN_LENGTH
63
+ return error_response('Invalid token format') unless token.match?(TOKEN_PATTERN)
64
+
65
+ true
66
+ end
67
+
68
+ # Validate integer parameter
69
+ def validate_integer(value, name = 'value', min: nil, max: nil)
70
+ return error_response("#{name} is required") if value.nil?
71
+
72
+ value_str = value.to_s
73
+ return error_response("#{name} must be a valid integer") unless value_str.match?(INTEGER_PATTERN)
74
+
75
+ int_value = value.to_i
76
+ return error_response("#{name} must be at least #{min}") if min && int_value < min
77
+ return error_response("#{name} must be at most #{max}") if max && int_value > max
78
+
79
+ int_value
80
+ end
81
+
82
+ # Validate VM request count
83
+ def validate_vm_count(count)
84
+ validated = validate_integer(count, 'VM count', min: 1, max: 100)
85
+ return validated if validated.is_a?(Hash) # error response
86
+
87
+ validated
88
+ end
89
+
90
+ # Validate disk size
91
+ def validate_disk_size(size)
92
+ validated = validate_integer(size, 'Disk size', min: 1, max: 2048)
93
+ return validated if validated.is_a?(Hash) # error response
94
+
95
+ validated
96
+ end
97
+
98
+ # Validate lifetime (TTL) in hours
99
+ def validate_lifetime(lifetime)
100
+ validated = validate_integer(lifetime, 'Lifetime', min: 1, max: 168) # max 1 week
101
+ return validated if validated.is_a?(Hash) # error response
102
+
103
+ validated
104
+ end
105
+
106
+ # Validate reason text
107
+ def validate_reason(reason)
108
+ return true if reason.nil? || reason.empty?
109
+ return error_response('Reason too long') if reason.length > MAX_REASON_LENGTH
110
+
111
+ # Sanitize to prevent XSS/injection
112
+ sanitized = reason.gsub(/[<>"']/, '')
113
+ return error_response('Reason contains invalid characters') if sanitized != reason
114
+
115
+ true
116
+ end
117
+
118
+ # Sanitize JSON body to prevent injection
119
+ def sanitize_json_body(body)
120
+ return {} if body.nil? || body.empty?
121
+
122
+ begin
123
+ parsed = JSON.parse(body)
124
+ return error_response('Request body must be a JSON object') unless parsed.is_a?(Hash)
125
+
126
+ # Limit depth and size to prevent DoS
127
+ return error_response('Request body too complex') if json_depth(parsed) > 5
128
+ return error_response('Request body too large') if body.length > 10_240 # 10KB max
129
+
130
+ parsed
131
+ rescue JSON::ParserError => e
132
+ error_response("Invalid JSON: #{e.message}")
133
+ end
134
+ end
135
+
136
+ # Check if validation result is an error
137
+ def validation_error?(result)
138
+ result.is_a?(Hash) && result['ok'] == false
139
+ end
140
+
141
+ private
142
+
143
+ def error_response(message)
144
+ { 'ok' => false, 'error' => message }
145
+ end
146
+
147
+ def json_depth(obj, depth = 0)
148
+ return depth unless obj.is_a?(Hash) || obj.is_a?(Array)
149
+ return depth + 1 if obj.empty?
150
+
151
+ if obj.is_a?(Hash)
152
+ depth + 1 + obj.values.map { |v| json_depth(v, 0) }.max
153
+ else
154
+ depth + 1 + obj.map { |v| json_depth(v, 0) }.max
155
+ end
156
+ end
157
+ end
158
+ end
159
+ end
@@ -0,0 +1,116 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Vmpooler
4
+ class API
5
+ # Rate limiter middleware to protect against abuse
6
+ # Uses Redis to track request counts per IP and token
7
+ class RateLimiter
8
+ DEFAULT_LIMITS = {
9
+ global_per_ip: { limit: 100, period: 60 }, # 100 requests per minute per IP
10
+ authenticated: { limit: 500, period: 60 }, # 500 requests per minute with token
11
+ vm_creation: { limit: 20, period: 60 }, # 20 VM creations per minute
12
+ vm_deletion: { limit: 50, period: 60 } # 50 VM deletions per minute
13
+ }.freeze
14
+
15
+ def initialize(app, redis, config = {})
16
+ @app = app
17
+ @redis = redis
18
+ @config = DEFAULT_LIMITS.merge(config[:rate_limits] || {})
19
+ @enabled = config.fetch(:rate_limiting_enabled, true)
20
+ end
21
+
22
+ def call(env)
23
+ return @app.call(env) unless @enabled
24
+
25
+ request = Rack::Request.new(env)
26
+ client_id = identify_client(request)
27
+ endpoint_type = classify_endpoint(request)
28
+
29
+ # Check rate limits
30
+ return rate_limit_response(client_id, endpoint_type) if rate_limit_exceeded?(client_id, endpoint_type, request)
31
+
32
+ # Track the request
33
+ increment_request_count(client_id, endpoint_type)
34
+
35
+ @app.call(env)
36
+ end
37
+
38
+ private
39
+
40
+ def identify_client(request)
41
+ # Prioritize token-based identification for authenticated requests
42
+ token = request.env['HTTP_X_AUTH_TOKEN']
43
+ return "token:#{token}" if token && !token.empty?
44
+
45
+ # Fall back to IP address
46
+ ip = request.ip || request.env['REMOTE_ADDR'] || 'unknown'
47
+ "ip:#{ip}"
48
+ end
49
+
50
+ def classify_endpoint(request)
51
+ path = request.path
52
+ method = request.request_method
53
+
54
+ return :vm_creation if method == 'POST' && path.include?('/vm')
55
+ return :vm_deletion if method == 'DELETE' && path.include?('/vm')
56
+ return :authenticated if request.env['HTTP_X_AUTH_TOKEN']
57
+
58
+ :global_per_ip
59
+ end
60
+
61
+ def rate_limit_exceeded?(client_id, endpoint_type, _request)
62
+ limit_config = @config[endpoint_type] || @config[:global_per_ip]
63
+ key = "vmpooler__ratelimit__#{endpoint_type}__#{client_id}"
64
+
65
+ current_count = @redis.get(key).to_i
66
+ current_count >= limit_config[:limit]
67
+ rescue StandardError => e
68
+ # If Redis fails, allow the request through (fail open)
69
+ warn "Rate limiter Redis error: #{e.message}"
70
+ false
71
+ end
72
+
73
+ def increment_request_count(client_id, endpoint_type)
74
+ limit_config = @config[endpoint_type] || @config[:global_per_ip]
75
+ key = "vmpooler__ratelimit__#{endpoint_type}__#{client_id}"
76
+
77
+ @redis.pipelined do |pipeline|
78
+ pipeline.incr(key)
79
+ pipeline.expire(key, limit_config[:period])
80
+ end
81
+ rescue StandardError => e
82
+ # Log error but don't fail the request
83
+ warn "Rate limiter increment error: #{e.message}"
84
+ end
85
+
86
+ def rate_limit_response(client_id, endpoint_type)
87
+ limit_config = @config[endpoint_type] || @config[:global_per_ip]
88
+ key = "vmpooler__ratelimit__#{endpoint_type}__#{client_id}"
89
+
90
+ begin
91
+ ttl = @redis.ttl(key)
92
+ rescue StandardError
93
+ ttl = limit_config[:period]
94
+ end
95
+
96
+ headers = {
97
+ 'Content-Type' => 'application/json',
98
+ 'X-RateLimit-Limit' => limit_config[:limit].to_s,
99
+ 'X-RateLimit-Remaining' => '0',
100
+ 'X-RateLimit-Reset' => (Time.now.to_i + ttl).to_s,
101
+ 'Retry-After' => ttl.to_s
102
+ }
103
+
104
+ body = JSON.pretty_generate({
105
+ 'ok' => false,
106
+ 'error' => 'Rate limit exceeded',
107
+ 'limit' => limit_config[:limit],
108
+ 'period' => limit_config[:period],
109
+ 'retry_after' => ttl
110
+ })
111
+
112
+ [429, headers, [body]]
113
+ end
114
+ end
115
+ end
116
+ end