rack-ai 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,431 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Sinatra Microservice Example with Rack::AI
4
+ # This example demonstrates how to build a secure microservice using Sinatra and Rack::AI
5
+ # with comprehensive AI-powered security, monitoring, and optimization features.
6
+
7
+ require 'sinatra/base'
8
+ require 'json'
9
+ require 'rack/ai'
10
+
11
+ class SecureMicroservice < Sinatra::Base
12
+ # Configure Rack::AI middleware with security-focused settings
13
+ use Rack::AI::Middleware,
14
+ provider: :openai,
15
+ api_key: ENV['OPENAI_API_KEY'] || 'demo-key',
16
+ features: [:classification, :security, :rate_limiting, :anomaly_detection, :logging],
17
+ fail_safe: true,
18
+ async_processing: false, # Synchronous for microservice reliability
19
+ sanitize_logs: true,
20
+ explain_decisions: true,
21
+
22
+ # Strict security settings for microservice
23
+ classification: {
24
+ confidence_threshold: 0.7,
25
+ categories: [:human, :bot, :spam, :suspicious, :malicious]
26
+ },
27
+
28
+ security: {
29
+ injection_detection: true,
30
+ anomaly_threshold: 0.6,
31
+ block_suspicious: true
32
+ },
33
+
34
+ # Aggressive rate limiting for API protection
35
+ rate_limiting: {
36
+ window_size: 300, # 5 minutes
37
+ max_requests: 100,
38
+ block_duration: 900, # 15 minutes
39
+ cleanup_interval: 60
40
+ },
41
+
42
+ # Sensitive anomaly detection
43
+ anomaly_detection: {
44
+ baseline_window: 3600, # 1 hour
45
+ anomaly_threshold: 1.5, # Lower threshold for microservice
46
+ min_requests: 5,
47
+ learning_rate: 0.2
48
+ }
49
+
50
+ # Enable JSON parsing
51
+ set :protection, except: [:json_csrf]
52
+
53
+ # Helper methods for AI analysis
54
+ helpers do
55
+ def ai_results
56
+ @ai_results ||= request.env['rack.ai']
57
+ end
58
+
59
+ def ai_classification
60
+ ai_results&.dig(:results, :classification)
61
+ end
62
+
63
+ def ai_security
64
+ ai_results&.dig(:results, :security)
65
+ end
66
+
67
+ def ai_rate_limiting
68
+ ai_results&.dig(:results, :rate_limiting)
69
+ end
70
+
71
+ def ai_anomaly
72
+ ai_results&.dig(:results, :anomaly_detection)
73
+ end
74
+
75
+ def request_blocked?
76
+ ai_rate_limiting&.dig(:blocked) ||
77
+ ai_security&.dig(:threat_level) == :high ||
78
+ ai_classification&.dig(:classification) == :malicious
79
+ end
80
+
81
+ def suspicious_request?
82
+ ai_classification&.dig(:classification) == :suspicious ||
83
+ ai_security&.dig(:threat_level) == :medium ||
84
+ ai_anomaly&.dig(:risk_score)&.> 0.7
85
+ end
86
+
87
+ def log_request_analysis
88
+ return unless ai_results
89
+
90
+ logger.info "AI Analysis: #{ai_results[:results].keys.join(', ')}"
91
+
92
+ if request_blocked?
93
+ logger.warn "Request blocked: #{request.path_info}"
94
+ elsif suspicious_request?
95
+ logger.warn "Suspicious request: #{request.path_info}"
96
+ end
97
+ end
98
+
99
+ def security_headers
100
+ {
101
+ 'X-AI-Classification' => ai_classification&.dig(:classification)&.to_s,
102
+ 'X-AI-Security-Level' => ai_security&.dig(:threat_level)&.to_s,
103
+ 'X-AI-Risk-Score' => ai_anomaly&.dig(:risk_score)&.to_s,
104
+ 'X-Content-Type-Options' => 'nosniff',
105
+ 'X-Frame-Options' => 'DENY',
106
+ 'X-XSS-Protection' => '1; mode=block'
107
+ }.compact
108
+ end
109
+ end
110
+
111
+ # Before filter for security checks
112
+ before do
113
+ content_type :json
114
+ log_request_analysis
115
+
116
+ # Block malicious requests immediately
117
+ if request_blocked?
118
+ halt 403, security_headers, {
119
+ error: 'Request blocked by AI security system',
120
+ reason: determine_block_reason,
121
+ timestamp: Time.now.iso8601
122
+ }.to_json
123
+ end
124
+
125
+ # Add security headers to all responses
126
+ headers security_headers
127
+ end
128
+
129
+ # Health check endpoint (bypasses AI processing)
130
+ get '/health' do
131
+ {
132
+ status: 'healthy',
133
+ service: 'secure-microservice',
134
+ timestamp: Time.now.iso8601,
135
+ ai_middleware: ai_results ? 'active' : 'inactive'
136
+ }.to_json
137
+ end
138
+
139
+ # Service status with AI metrics
140
+ get '/status' do
141
+ {
142
+ service: {
143
+ name: 'secure-microservice',
144
+ version: '1.0.0',
145
+ uptime: Process.clock_gettime(Process::CLOCK_MONOTONIC),
146
+ status: 'operational'
147
+ },
148
+ ai: {
149
+ active: ai_results.present?,
150
+ provider: ai_results&.dig(:provider),
151
+ features: ai_results&.dig(:results)&.keys || [],
152
+ processing_time: ai_results&.dig(:processing_time),
153
+ classification: ai_classification,
154
+ security_level: ai_security&.dig(:threat_level),
155
+ rate_limit_status: ai_rate_limiting,
156
+ anomaly_score: ai_anomaly&.dig(:risk_score)
157
+ },
158
+ timestamp: Time.now.iso8601
159
+ }.to_json
160
+ end
161
+
162
+ # User authentication endpoint
163
+ post '/auth/login' do
164
+ request.body.rewind
165
+ data = JSON.parse(request.body.read) rescue {}
166
+
167
+ # AI analysis helps detect credential stuffing attacks
168
+ if ai_anomaly&.dig(:risk_score)&.> 0.8
169
+ logger.warn "Potential credential stuffing attack detected"
170
+
171
+ halt 429, {
172
+ error: 'Too many authentication attempts',
173
+ retry_after: 300,
174
+ timestamp: Time.now.iso8601
175
+ }.to_json
176
+ end
177
+
178
+ # Simulate authentication logic
179
+ username = data['username']
180
+ password = data['password']
181
+
182
+ if username && password && username.length > 3 && password.length > 6
183
+ token = "secure_token_#{Time.now.to_i}_#{rand(1000)}"
184
+
185
+ {
186
+ success: true,
187
+ token: token,
188
+ expires_at: (Time.now + 3600).iso8601,
189
+ security_analysis: {
190
+ classification: ai_classification&.dig(:classification),
191
+ risk_score: ai_anomaly&.dig(:risk_score) || 0.0
192
+ }
193
+ }.to_json
194
+ else
195
+ halt 400, {
196
+ error: 'Invalid credentials format',
197
+ requirements: {
198
+ username: 'minimum 4 characters',
199
+ password: 'minimum 7 characters'
200
+ }
201
+ }.to_json
202
+ end
203
+ end
204
+
205
+ # Data processing endpoint with content validation
206
+ post '/data/process' do
207
+ request.body.rewind
208
+ raw_data = request.body.read
209
+
210
+ # Check for suspicious content patterns
211
+ if ai_security&.dig(:injection_detection, :sql_injection_detected)
212
+ logger.error "SQL injection attempt detected"
213
+ halt 400, {
214
+ error: 'Malicious content detected',
215
+ type: 'sql_injection',
216
+ timestamp: Time.now.iso8601
217
+ }.to_json
218
+ end
219
+
220
+ if ai_security&.dig(:injection_detection, :xss_detected)
221
+ logger.error "XSS attempt detected"
222
+ halt 400, {
223
+ error: 'Malicious content detected',
224
+ type: 'xss_injection',
225
+ timestamp: Time.now.iso8601
226
+ }.to_json
227
+ end
228
+
229
+ begin
230
+ data = JSON.parse(raw_data)
231
+ rescue JSON::ParserError
232
+ halt 400, {
233
+ error: 'Invalid JSON format',
234
+ timestamp: Time.now.iso8601
235
+ }.to_json
236
+ end
237
+
238
+ # Process data (simulation)
239
+ processed_data = {
240
+ original_size: raw_data.bytesize,
241
+ processed_at: Time.now.iso8601,
242
+ items_count: data.is_a?(Array) ? data.length : 1,
243
+ security_scan: {
244
+ threats_detected: ai_security&.dig(:injection_detection, :threats) || [],
245
+ risk_level: ai_security&.dig(:threat_level) || :low,
246
+ classification: ai_classification&.dig(:classification) || :unknown
247
+ }
248
+ }
249
+
250
+ {
251
+ success: true,
252
+ result: processed_data,
253
+ ai_analysis: {
254
+ processing_safe: ai_security&.dig(:threat_level) != :high,
255
+ confidence: ai_classification&.dig(:confidence) || 0.0
256
+ }
257
+ }.to_json
258
+ end
259
+
260
+ # Analytics endpoint with anomaly detection
261
+ get '/analytics/requests' do
262
+ # Simulate analytics data
263
+ analytics = {
264
+ total_requests: 1250,
265
+ requests_by_classification: {
266
+ human: 1000,
267
+ bot: 150,
268
+ suspicious: 80,
269
+ spam: 15,
270
+ malicious: 5
271
+ },
272
+ security_events: {
273
+ blocked_requests: 25,
274
+ sql_injection_attempts: 8,
275
+ xss_attempts: 12,
276
+ rate_limit_violations: 45
277
+ },
278
+ anomaly_detection: {
279
+ anomalies_detected: 18,
280
+ current_baseline: ai_anomaly&.dig(:baseline_metrics) || {},
281
+ risk_distribution: {
282
+ low: 1180,
283
+ medium: 55,
284
+ high: 15
285
+ }
286
+ },
287
+ performance: {
288
+ avg_response_time: 125,
289
+ ai_processing_time: ai_results&.dig(:processing_time) || 0,
290
+ cache_hit_rate: 0.72
291
+ },
292
+ timestamp: Time.now.iso8601
293
+ }
294
+
295
+ # Add current request analysis
296
+ if ai_results
297
+ analytics[:current_request] = {
298
+ classification: ai_classification,
299
+ security: ai_security,
300
+ anomaly_score: ai_anomaly&.dig(:risk_score)
301
+ }
302
+ end
303
+
304
+ analytics.to_json
305
+ end
306
+
307
+ # File upload endpoint with security scanning
308
+ post '/upload' do
309
+ unless params[:file] && params[:file][:tempfile]
310
+ halt 400, {
311
+ error: 'No file provided',
312
+ timestamp: Time.now.iso8601
313
+ }.to_json
314
+ end
315
+
316
+ file = params[:file]
317
+ filename = file[:filename]
318
+ content = file[:tempfile].read
319
+
320
+ # AI-powered content analysis
321
+ suspicious_patterns = []
322
+
323
+ if content.include?('<script')
324
+ suspicious_patterns << 'javascript_code'
325
+ end
326
+
327
+ if content.match?(/\b(SELECT|INSERT|UPDATE|DELETE|DROP)\b/i)
328
+ suspicious_patterns << 'sql_statements'
329
+ end
330
+
331
+ if ai_security&.dig(:threat_level) == :high
332
+ logger.error "High-risk file upload blocked: #{filename}"
333
+ halt 403, {
334
+ error: 'File upload blocked by security system',
335
+ filename: filename,
336
+ threats: ai_security[:injection_detection][:threats],
337
+ timestamp: Time.now.iso8601
338
+ }.to_json
339
+ end
340
+
341
+ # Simulate file processing
342
+ file_info = {
343
+ filename: filename,
344
+ size: content.bytesize,
345
+ content_type: file[:type],
346
+ uploaded_at: Time.now.iso8601,
347
+ security_scan: {
348
+ suspicious_patterns: suspicious_patterns,
349
+ threat_level: ai_security&.dig(:threat_level) || :low,
350
+ safe_to_process: suspicious_patterns.empty? && ai_security&.dig(:threat_level) != :high
351
+ },
352
+ ai_classification: ai_classification&.dig(:classification)
353
+ }
354
+
355
+ {
356
+ success: true,
357
+ file: file_info,
358
+ message: suspicious_patterns.empty? ? 'File uploaded successfully' : 'File uploaded with warnings'
359
+ }.to_json
360
+ end
361
+
362
+ # Error handlers
363
+ error 404 do
364
+ {
365
+ error: 'Endpoint not found',
366
+ path: request.path_info,
367
+ method: request.request_method,
368
+ timestamp: Time.now.iso8601,
369
+ ai_classification: ai_classification&.dig(:classification)
370
+ }.to_json
371
+ end
372
+
373
+ error 500 do
374
+ logger.error "Internal server error: #{env['sinatra.error']}"
375
+
376
+ {
377
+ error: 'Internal server error',
378
+ timestamp: Time.now.iso8601,
379
+ request_id: env['HTTP_X_REQUEST_ID'] || SecureRandom.hex(8)
380
+ }.to_json
381
+ end
382
+
383
+ private
384
+
385
+ def determine_block_reason
386
+ reasons = []
387
+
388
+ if ai_rate_limiting&.dig(:blocked)
389
+ reasons << "rate_limit_exceeded"
390
+ end
391
+
392
+ if ai_security&.dig(:threat_level) == :high
393
+ reasons << "high_security_threat"
394
+ end
395
+
396
+ if ai_classification&.dig(:classification) == :malicious
397
+ reasons << "malicious_classification"
398
+ end
399
+
400
+ reasons.join(', ')
401
+ end
402
+ end
403
+
404
+ # CLI runner
405
+ if __FILE__ == $0
406
+ require 'rack'
407
+
408
+ puts "🔒 Secure Microservice with Rack::AI"
409
+ puts "🛡️ Features: Classification, Security, Rate Limiting, Anomaly Detection, Logging"
410
+ puts "🔗 Available endpoints:"
411
+ puts " GET /health - Health check (AI processing bypassed)"
412
+ puts " GET /status - Service and AI status"
413
+ puts " POST /auth/login - User authentication with anomaly detection"
414
+ puts " POST /data/process - Data processing with injection detection"
415
+ puts " GET /analytics/requests - Request analytics and security metrics"
416
+ puts " POST /upload - File upload with security scanning"
417
+ puts ""
418
+ puts "🔧 Configuration:"
419
+ puts " - Set OPENAI_API_KEY for full AI functionality"
420
+ puts " - Aggressive security settings for microservice protection"
421
+ puts " - Real-time threat detection and blocking"
422
+ puts ""
423
+ puts "🧪 Testing commands:"
424
+ puts " curl http://localhost:4567/health"
425
+ puts " curl http://localhost:4567/status"
426
+ puts " curl -X POST http://localhost:4567/auth/login -d '{\"username\":\"test\",\"password\":\"password123\"}' -H 'Content-Type: application/json'"
427
+ puts " curl -X POST http://localhost:4567/data/process -d '[{\"id\":1,\"data\":\"test\"}]' -H 'Content-Type: application/json'"
428
+ puts ""
429
+
430
+ Rack::Handler::WEBrick.run(SecureMicroservice, Port: 4567, Host: '0.0.0.0')
431
+ end
@@ -1,7 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "dry/configurable"
4
- require "dry/validation"
3
+ require "dry-configurable"
4
+ require "dry-validation"
5
+ require "ostruct"
5
6
 
6
7
  module Rack
7
8
  module AI
@@ -105,45 +106,38 @@ module Rack
105
106
  @metrics_enabled = true
106
107
  @explain_decisions = false
107
108
 
108
- # Initialize nested configurations
109
- @classification = {
110
- confidence_threshold: 0.8,
111
- categories: [:spam, :bot, :human, :suspicious]
112
- }
113
-
114
- @moderation = {
115
- toxicity_threshold: 0.7,
116
- check_response: false,
117
- block_on_violation: true
118
- }
119
-
120
- @caching = {
121
- predictive_enabled: true,
122
- prefetch_threshold: 0.9,
123
- redis_url: "redis://localhost:6379"
124
- }
125
-
126
- @routing = {
127
- smart_routing_enabled: true,
128
- suspicious_route: "/captcha",
129
- bot_route: "/api/bot"
130
- }
109
+ # Nested configuration objects with OpenStruct-like behavior
131
110
  end
132
111
 
133
112
  def classification
134
- @classification
113
+ @classification ||= OpenStruct.new(
114
+ confidence_threshold: 0.8,
115
+ categories: [:spam, :bot, :human, :suspicious]
116
+ )
135
117
  end
136
118
 
137
119
  def moderation
138
- @moderation
120
+ @moderation ||= OpenStruct.new(
121
+ toxicity_threshold: 0.7,
122
+ check_response: false,
123
+ block_on_violation: true
124
+ )
139
125
  end
140
126
 
141
127
  def caching
142
- @caching
128
+ @caching ||= OpenStruct.new(
129
+ predictive_enabled: true,
130
+ prefetch_threshold: 0.9,
131
+ redis_url: "redis://localhost:6379"
132
+ )
143
133
  end
144
134
 
145
135
  def routing
146
- @routing
136
+ @routing ||= OpenStruct.new(
137
+ smart_routing_enabled: true,
138
+ suspicious_route: "/captcha",
139
+ bot_route: "/api/bot"
140
+ )
147
141
  end
148
142
 
149
143
  # For compatibility with middleware
@@ -186,11 +180,11 @@ module Rack
186
180
 
187
181
  def to_h
188
182
  {
189
- provider: @provider,
190
- api_key: @api_key,
191
- api_url: @api_url,
192
- timeout: @timeout,
193
- retries: @retries,
183
+ provider: provider,
184
+ api_key: api_key,
185
+ api_url: api_url,
186
+ timeout: timeout,
187
+ retries: retries,
194
188
  features: @features,
195
189
  fail_safe: @fail_safe,
196
190
  async_processing: @async_processing,
@@ -212,16 +206,13 @@ module Rack
212
206
  end
213
207
 
214
208
  def provider_config
215
- case @provider
216
- when :openai
217
- { api_key: @api_key, api_url: @api_url, timeout: @timeout, retries: @retries }
218
- when :huggingface
219
- { api_key: @api_key, api_url: @api_url, timeout: @timeout, retries: @retries }
220
- when :local
221
- { api_url: @api_url, timeout: @timeout, retries: @retries }
222
- else
223
- {}
224
- end
209
+ {
210
+ provider: provider,
211
+ api_key: api_key,
212
+ api_url: api_url,
213
+ timeout: timeout,
214
+ retries: retries
215
+ }
225
216
  end
226
217
  end
227
218
  end
@@ -48,9 +48,9 @@ module Rack
48
48
  when :spam
49
49
  :block
50
50
  when :suspicious
51
- @config.routing.smart_routing_enabled ? :route : :allow
51
+ smart_routing_enabled? ? :route : :allow
52
52
  when :bot
53
- @config.routing.smart_routing_enabled ? :route : :allow
53
+ smart_routing_enabled? ? :route : :allow
54
54
  when :human
55
55
  :allow
56
56
  else
@@ -58,6 +58,12 @@ module Rack
58
58
  end
59
59
  end
60
60
 
61
+ def smart_routing_enabled?
62
+ @config.respond_to?(:routing) &&
63
+ @config.routing.respond_to?(:smart_routing_enabled) &&
64
+ @config.routing.smart_routing_enabled
65
+ end
66
+
61
67
  def generate_request_id(env)
62
68
  "#{Time.now.to_i}-#{env.object_id}"
63
69
  end
@@ -17,7 +17,9 @@ module Rack
17
17
  end
18
18
 
19
19
  def process_response?
20
- @config.moderation.check_response
20
+ @config.respond_to?(:moderation) &&
21
+ @config.moderation.respond_to?(:check_response) &&
22
+ @config.moderation.check_response
21
23
  end
22
24
 
23
25
  def process_request(env)
@@ -27,11 +29,11 @@ module Rack
27
29
  result = @provider.moderate_content(content.join(" "))
28
30
 
29
31
  # Apply toxicity threshold
30
- threshold = @config.moderation.toxicity_threshold
32
+ threshold = get_toxicity_threshold
31
33
  max_score = result[:category_scores]&.values&.max || 0.0
32
34
 
33
35
  result[:action] = if result[:flagged] && max_score > threshold
34
- @config.moderation.block_on_violation ? :block : :flag
36
+ should_block_on_violation? ? :block : :flag
35
37
  else
36
38
  :allow
37
39
  end
@@ -87,6 +89,18 @@ module Rack
87
89
  content.compact.reject(&:empty?)
88
90
  end
89
91
 
92
+ def get_toxicity_threshold
93
+ return 0.7 unless @config.respond_to?(:moderation)
94
+ return 0.7 unless @config.moderation.respond_to?(:toxicity_threshold)
95
+ @config.moderation.toxicity_threshold
96
+ end
97
+
98
+ def should_block_on_violation?
99
+ return true unless @config.respond_to?(:moderation)
100
+ return true unless @config.moderation.respond_to?(:block_on_violation)
101
+ @config.moderation.block_on_violation
102
+ end
103
+
90
104
  def extract_content_from_response(body)
91
105
  return "" unless body.respond_to?(:each)
92
106