rack-ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,259 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "faraday"
4
+ require "faraday/retry"
5
+ require "json"
6
+
7
+ module Rack
8
+ module AI
9
+ module Providers
10
+ class HuggingFace < Base
11
+ API_BASE_URL = "https://api-inference.huggingface.co"
12
+
13
+ def initialize(config)
14
+ super
15
+ @client = build_client
16
+ end
17
+
18
+ def classify_request(request_data)
19
+ # Use a text classification model for request analysis
20
+ text = build_request_text(request_data)
21
+
22
+ response = @client.post("models/microsoft/DialoGPT-medium") do |req|
23
+ req.body = {
24
+ inputs: "Classify this web request as human, bot, spam, or suspicious: #{text}",
25
+ parameters: {
26
+ max_length: 50,
27
+ temperature: 0.1
28
+ }
29
+ }.to_json
30
+ end
31
+
32
+ handle_api_error(response) unless response.success?
33
+
34
+ result = JSON.parse(response.body)
35
+ generated_text = result.first&.dig("generated_text") || ""
36
+
37
+ # Parse classification from generated text
38
+ classification = extract_classification(generated_text)
39
+
40
+ {
41
+ classification: classification,
42
+ confidence: 0.8, # HuggingFace doesn't always provide confidence scores
43
+ reasoning: generated_text,
44
+ provider: :huggingface
45
+ }
46
+ end
47
+
48
+ def moderate_content(content, options = {})
49
+ # Use toxicity detection model
50
+ response = @client.post("models/unitary/toxic-bert") do |req|
51
+ req.body = {
52
+ inputs: content
53
+ }.to_json
54
+ end
55
+
56
+ handle_api_error(response) unless response.success?
57
+
58
+ result = JSON.parse(response.body)
59
+
60
+ # HuggingFace toxicity models return classification results
61
+ toxic_score = result.first&.find { |r| r["label"] == "TOXIC" }&.dig("score") || 0.0
62
+
63
+ {
64
+ flagged: toxic_score > 0.5,
65
+ categories: { toxicity: toxic_score > 0.5 },
66
+ category_scores: { toxicity: toxic_score },
67
+ provider: :huggingface
68
+ }
69
+ end
70
+
71
+ def analyze_patterns(data)
72
+ # Use a general text analysis approach
73
+ text = "Analyze these web traffic patterns: #{data.to_json}"
74
+
75
+ response = @client.post("models/facebook/bart-large-cnn") do |req|
76
+ req.body = {
77
+ inputs: text,
78
+ parameters: {
79
+ max_length: 200,
80
+ min_length: 50
81
+ }
82
+ }.to_json
83
+ end
84
+
85
+ handle_api_error(response) unless response.success?
86
+
87
+ result = JSON.parse(response.body)
88
+ summary = result.first&.dig("summary_text") || ""
89
+
90
+ {
91
+ anomalies: extract_anomalies(summary),
92
+ trends: extract_trends(summary),
93
+ recommendations: extract_recommendations(summary),
94
+ confidence: 0.7,
95
+ provider: :huggingface
96
+ }
97
+ end
98
+
99
+ def detect_anomalies(request_data)
100
+ # Use anomaly detection approach
101
+ text = build_security_analysis_text(request_data)
102
+
103
+ response = @client.post("models/microsoft/DialoGPT-medium") do |req|
104
+ req.body = {
105
+ inputs: "Security analysis: #{text}. Threat level (low/medium/high):",
106
+ parameters: {
107
+ max_length: 100,
108
+ temperature: 0.1
109
+ }
110
+ }.to_json
111
+ end
112
+
113
+ handle_api_error(response) unless response.success?
114
+
115
+ result = JSON.parse(response.body)
116
+ analysis = result.first&.dig("generated_text") || ""
117
+
118
+ {
119
+ threat_level: extract_threat_level(analysis),
120
+ anomalies: extract_security_anomalies(analysis),
121
+ confidence: 0.7,
122
+ provider: :huggingface
123
+ }
124
+ end
125
+
126
+ def enhance_content(content, enhancement_type)
127
+ model = case enhancement_type
128
+ when :seo
129
+ "models/facebook/bart-large-cnn"
130
+ when :readability
131
+ "models/facebook/bart-large"
132
+ else
133
+ "models/t5-base"
134
+ end
135
+
136
+ prompt = build_enhancement_prompt(content, enhancement_type)
137
+
138
+ response = @client.post(model) do |req|
139
+ req.body = {
140
+ inputs: prompt,
141
+ parameters: {
142
+ max_length: 500,
143
+ temperature: 0.3
144
+ }
145
+ }.to_json
146
+ end
147
+
148
+ handle_api_error(response) unless response.success?
149
+
150
+ result = JSON.parse(response.body)
151
+ enhanced = result.first&.dig("generated_text") || content
152
+
153
+ {
154
+ original_content: content,
155
+ enhanced_content: enhanced,
156
+ enhancement_type: enhancement_type,
157
+ provider: :huggingface
158
+ }
159
+ end
160
+
161
+ def ping
162
+ response = @client.get("models")
163
+ response.success?
164
+ end
165
+
166
+ protected
167
+
168
+ def requires_api_url?
169
+ false
170
+ end
171
+
172
+ private
173
+
174
+ def build_client
175
+ Faraday.new(
176
+ url: @config[:api_url] || API_BASE_URL,
177
+ headers: {
178
+ "Authorization" => "Bearer #{@config[:api_key]}",
179
+ "Content-Type" => "application/json"
180
+ }
181
+ ) do |f|
182
+ f.request :retry, max: @config[:retries] || 3, interval: 1.0
183
+ f.adapter Faraday.default_adapter
184
+ end
185
+ end
186
+
187
+ def build_request_text(request_data)
188
+ "#{request_data[:method]} #{request_data[:path]} from #{request_data[:user_agent]} at #{request_data[:remote_ip]}"
189
+ end
190
+
191
+ def build_security_analysis_text(request_data)
192
+ "Request: #{request_data[:method]} #{request_data[:path]}?#{request_data[:query_string]} Headers: #{request_data[:headers].keys.join(',')}"
193
+ end
194
+
195
+ def extract_classification(text)
196
+ text_lower = text.downcase
197
+ return :spam if text_lower.include?("spam")
198
+ return :bot if text_lower.include?("bot")
199
+ return :suspicious if text_lower.include?("suspicious")
200
+ return :human if text_lower.include?("human")
201
+ :unknown
202
+ end
203
+
204
+ def extract_threat_level(text)
205
+ text_lower = text.downcase
206
+ return :high if text_lower.include?("high")
207
+ return :medium if text_lower.include?("medium")
208
+ :low
209
+ end
210
+
211
+ def extract_anomalies(text)
212
+ # Simple keyword extraction for anomalies
213
+ anomalies = []
214
+ anomalies << "unusual_traffic" if text.include?("unusual") || text.include?("anomal")
215
+ anomalies << "high_frequency" if text.include?("frequent") || text.include?("rapid")
216
+ anomalies << "suspicious_patterns" if text.include?("suspicious") || text.include?("pattern")
217
+ anomalies
218
+ end
219
+
220
+ def extract_trends(text)
221
+ trends = []
222
+ trends << "increasing_traffic" if text.include?("increas") || text.include?("grow")
223
+ trends << "decreasing_traffic" if text.include?("decreas") || text.include?("drop")
224
+ trends << "stable_patterns" if text.include?("stable") || text.include?("consistent")
225
+ trends
226
+ end
227
+
228
+ def extract_recommendations(text)
229
+ recommendations = []
230
+ recommendations << "monitor_closely" if text.include?("monitor") || text.include?("watch")
231
+ recommendations << "increase_security" if text.include?("security") || text.include?("protect")
232
+ recommendations << "optimize_performance" if text.include?("optim") || text.include?("performance")
233
+ recommendations
234
+ end
235
+
236
+ def extract_security_anomalies(text)
237
+ anomalies = []
238
+ anomalies << "sql_injection_attempt" if text.include?("sql") || text.include?("injection")
239
+ anomalies << "xss_attempt" if text.include?("xss") || text.include?("script")
240
+ anomalies << "unusual_user_agent" if text.include?("user-agent") || text.include?("bot")
241
+ anomalies
242
+ end
243
+
244
+ def build_enhancement_prompt(content, enhancement_type)
245
+ case enhancement_type
246
+ when :seo
247
+ "Optimize for SEO: #{content}"
248
+ when :readability
249
+ "Improve readability: #{content}"
250
+ when :accessibility
251
+ "Enhance accessibility: #{content}"
252
+ else
253
+ "Improve: #{content}"
254
+ end
255
+ end
256
+ end
257
+ end
258
+ end
259
+ end
@@ -0,0 +1,152 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "faraday"
4
+ require "json"
5
+
6
+ module Rack
7
+ module AI
8
+ module Providers
9
+ class Local < Base
10
+ DEFAULT_PORT = 8080
11
+
12
+ def initialize(config)
13
+ super
14
+ @client = build_client
15
+ end
16
+
17
+ def classify_request(request_data)
18
+ response = @client.post("classify") do |req|
19
+ req.body = {
20
+ request_data: request_data,
21
+ task: "classification"
22
+ }.to_json
23
+ end
24
+
25
+ handle_api_error(response) unless response.success?
26
+
27
+ result = JSON.parse(response.body)
28
+
29
+ {
30
+ classification: result["classification"]&.to_sym || :unknown,
31
+ confidence: result["confidence"] || 0.0,
32
+ reasoning: result["reasoning"],
33
+ provider: :local
34
+ }
35
+ end
36
+
37
+ def moderate_content(content, options = {})
38
+ response = @client.post("moderate") do |req|
39
+ req.body = {
40
+ content: content,
41
+ options: options
42
+ }.to_json
43
+ end
44
+
45
+ handle_api_error(response) unless response.success?
46
+
47
+ result = JSON.parse(response.body)
48
+
49
+ {
50
+ flagged: result["flagged"] || false,
51
+ categories: result["categories"] || {},
52
+ category_scores: result["category_scores"] || {},
53
+ provider: :local
54
+ }
55
+ end
56
+
57
+ def analyze_patterns(data)
58
+ response = @client.post("analyze") do |req|
59
+ req.body = {
60
+ data: data,
61
+ analysis_type: "patterns"
62
+ }.to_json
63
+ end
64
+
65
+ handle_api_error(response) unless response.success?
66
+
67
+ result = JSON.parse(response.body)
68
+
69
+ {
70
+ anomalies: result["anomalies"] || [],
71
+ trends: result["trends"] || [],
72
+ recommendations: result["recommendations"] || [],
73
+ confidence: result["confidence"] || 0.0,
74
+ provider: :local
75
+ }
76
+ end
77
+
78
+ def detect_anomalies(request_data)
79
+ response = @client.post("detect") do |req|
80
+ req.body = {
81
+ request_data: request_data,
82
+ detection_type: "anomalies"
83
+ }.to_json
84
+ end
85
+
86
+ handle_api_error(response) unless response.success?
87
+
88
+ result = JSON.parse(response.body)
89
+
90
+ {
91
+ threat_level: result["threat_level"]&.to_sym || :low,
92
+ anomalies: result["anomalies"] || [],
93
+ confidence: result["confidence"] || 0.0,
94
+ provider: :local
95
+ }
96
+ end
97
+
98
+ def enhance_content(content, enhancement_type)
99
+ response = @client.post("enhance") do |req|
100
+ req.body = {
101
+ content: content,
102
+ enhancement_type: enhancement_type
103
+ }.to_json
104
+ end
105
+
106
+ handle_api_error(response) unless response.success?
107
+
108
+ result = JSON.parse(response.body)
109
+
110
+ {
111
+ original_content: content,
112
+ enhanced_content: result["enhanced_content"] || content,
113
+ enhancement_type: enhancement_type,
114
+ provider: :local
115
+ }
116
+ end
117
+
118
+ def ping
119
+ response = @client.get("health")
120
+ response.success?
121
+ end
122
+
123
+ protected
124
+
125
+ def requires_api_key?
126
+ false
127
+ end
128
+
129
+ def requires_api_url?
130
+ true
131
+ end
132
+
133
+ private
134
+
135
+ def build_client
136
+ base_url = @config[:api_url] || "http://localhost:#{DEFAULT_PORT}"
137
+
138
+ Faraday.new(
139
+ url: base_url,
140
+ headers: {
141
+ "Content-Type" => "application/json",
142
+ "User-Agent" => "rack-ai/#{Rack::AI::VERSION}"
143
+ }
144
+ ) do |f|
145
+ f.request :retry, max: @config[:retries] || 2, interval: 0.5
146
+ f.adapter Faraday.default_adapter
147
+ end
148
+ end
149
+ end
150
+ end
151
+ end
152
+ end
@@ -0,0 +1,246 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "faraday"
4
+ require "faraday/retry"
5
+ require "json"
6
+
7
+ module Rack
8
+ module AI
9
+ module Providers
10
+ class OpenAI < Base
11
+ API_BASE_URL = "https://api.openai.com/v1"
12
+
13
+ def initialize(config)
14
+ super
15
+ @client = build_client
16
+ end
17
+
18
+ def classify_request(data)
19
+ prompt = build_classification_prompt(data)
20
+
21
+ response = @client.post("chat/completions") do |req|
22
+ req.body = {
23
+ model: "gpt-3.5-turbo",
24
+ messages: [
25
+ {
26
+ role: "system",
27
+ content: "You are a web request classifier. Analyze the request and classify it as one of: human, bot, spam, suspicious. Respond with JSON containing 'classification' and 'confidence' (0-1)."
28
+ },
29
+ {
30
+ role: "user",
31
+ content: prompt
32
+ }
33
+ ],
34
+ temperature: 0.1,
35
+ max_tokens: 100
36
+ }.to_json
37
+ end
38
+
39
+ handle_api_error(response) unless response.success?
40
+
41
+ result = JSON.parse(response.body)
42
+ content = JSON.parse(result.dig("choices", 0, "message", "content") || "{}")
43
+
44
+ {
45
+ classification: content["classification"]&.to_sym || :unknown,
46
+ confidence: content["confidence"] || 0.0,
47
+ reasoning: content["reasoning"],
48
+ provider: :openai
49
+ }
50
+ end
51
+
52
+ def moderate_content(content, options = {})
53
+ response = @client.post("moderations") do |req|
54
+ req.body = {
55
+ input: content,
56
+ model: "text-moderation-latest"
57
+ }.to_json
58
+ end
59
+
60
+ handle_api_error(response) unless response.success?
61
+
62
+ result = JSON.parse(response.body)
63
+ moderation_result = result["results"]&.first || {}
64
+
65
+ {
66
+ flagged: moderation_result["flagged"] || false,
67
+ categories: moderation_result["categories"] || {},
68
+ category_scores: moderation_result["category_scores"] || {},
69
+ provider: :openai
70
+ }
71
+ end
72
+
73
+ def analyze_patterns(data)
74
+ prompt = build_pattern_analysis_prompt(data)
75
+
76
+ response = @client.post("chat/completions") do |req|
77
+ req.body = {
78
+ model: "gpt-3.5-turbo",
79
+ messages: [
80
+ {
81
+ role: "system",
82
+ content: "You are a traffic pattern analyst. Analyze the provided request patterns and identify anomalies, trends, and recommendations. Respond with JSON."
83
+ },
84
+ {
85
+ role: "user",
86
+ content: prompt
87
+ }
88
+ ],
89
+ temperature: 0.2,
90
+ max_tokens: 500
91
+ }.to_json
92
+ end
93
+
94
+ handle_api_error(response) unless response.success?
95
+
96
+ result = JSON.parse(response.body)
97
+ content = JSON.parse(result.dig("choices", 0, "message", "content") || "{}")
98
+
99
+ {
100
+ anomalies: content["anomalies"] || [],
101
+ trends: content["trends"] || [],
102
+ recommendations: content["recommendations"] || [],
103
+ confidence: content["confidence"] || 0.0,
104
+ provider: :openai
105
+ }
106
+ end
107
+
108
+ def detect_anomalies(request_data)
109
+ prompt = build_anomaly_detection_prompt(request_data)
110
+
111
+ response = @client.post("chat/completions") do |req|
112
+ req.body = {
113
+ model: "gpt-3.5-turbo",
114
+ messages: [
115
+ {
116
+ role: "system",
117
+ content: "You are a security analyst. Detect potential security threats, anomalies, or suspicious patterns in web requests. Respond with JSON containing 'threat_level' (low/medium/high), 'anomalies', and 'confidence'."
118
+ },
119
+ {
120
+ role: "user",
121
+ content: prompt
122
+ }
123
+ ],
124
+ temperature: 0.1,
125
+ max_tokens: 200
126
+ }.to_json
127
+ end
128
+
129
+ handle_api_error(response) unless response.success?
130
+
131
+ result = JSON.parse(response.body)
132
+ content = JSON.parse(result.dig("choices", 0, "message", "content") || "{}")
133
+
134
+ {
135
+ threat_level: content["threat_level"]&.to_sym || :low,
136
+ anomalies: content["anomalies"] || [],
137
+ confidence: content["confidence"] || 0.0,
138
+ provider: :openai
139
+ }
140
+ end
141
+
142
+ def enhance_content(content, enhancement_type)
143
+ prompt = build_enhancement_prompt(content, enhancement_type)
144
+
145
+ response = @client.post("chat/completions") do |req|
146
+ req.body = {
147
+ model: "gpt-3.5-turbo",
148
+ messages: [
149
+ {
150
+ role: "system",
151
+ content: "You are a content enhancement assistant. Improve the provided content based on the specified enhancement type."
152
+ },
153
+ {
154
+ role: "user",
155
+ content: prompt
156
+ }
157
+ ],
158
+ temperature: 0.3,
159
+ max_tokens: 1000
160
+ }.to_json
161
+ end
162
+
163
+ handle_api_error(response) unless response.success?
164
+
165
+ result = JSON.parse(response.body)
166
+ enhanced_content = result.dig("choices", 0, "message", "content")
167
+
168
+ {
169
+ original_content: content,
170
+ enhanced_content: enhanced_content,
171
+ enhancement_type: enhancement_type,
172
+ provider: :openai
173
+ }
174
+ end
175
+
176
+ def ping
177
+ response = @client.get("models")
178
+ response.success?
179
+ end
180
+
181
+ private
182
+
183
+ def build_client
184
+ Faraday.new(
185
+ url: @config[:api_url] || API_BASE_URL,
186
+ headers: {
187
+ "Authorization" => "Bearer #{@config[:api_key]}",
188
+ "Content-Type" => "application/json"
189
+ }
190
+ ) do |f|
191
+ f.request :retry, max: @config[:retries] || 3, interval: 0.5
192
+ f.adapter Faraday.default_adapter
193
+ end
194
+ end
195
+
196
+ def build_classification_prompt(request_data)
197
+ <<~PROMPT
198
+ Analyze this web request:
199
+
200
+ Method: #{request_data[:method]}
201
+ Path: #{request_data[:path]}
202
+ Query: #{request_data[:query_string]}
203
+ User-Agent: #{request_data[:user_agent]}
204
+ IP: #{request_data[:remote_ip]}
205
+ Headers: #{request_data[:headers].to_json}
206
+
207
+ Classify this request and provide your reasoning.
208
+ PROMPT
209
+ end
210
+
211
+ def build_pattern_analysis_prompt(data)
212
+ <<~PROMPT
213
+ Analyze these traffic patterns:
214
+
215
+ #{data.to_json}
216
+
217
+ Identify anomalies, trends, and provide actionable recommendations.
218
+ PROMPT
219
+ end
220
+
221
+ def build_anomaly_detection_prompt(request_data)
222
+ <<~PROMPT
223
+ Analyze this request for security threats:
224
+
225
+ #{request_data.to_json}
226
+
227
+ Look for: SQL injection, XSS, CSRF, unusual patterns, bot behavior, etc.
228
+ PROMPT
229
+ end
230
+
231
+ def build_enhancement_prompt(content, enhancement_type)
232
+ case enhancement_type
233
+ when :seo
234
+ "Optimize this content for SEO while maintaining its meaning:\n\n#{content}"
235
+ when :readability
236
+ "Improve the readability and clarity of this content:\n\n#{content}"
237
+ when :accessibility
238
+ "Enhance this content for better accessibility:\n\n#{content}"
239
+ else
240
+ "Improve this content:\n\n#{content}"
241
+ end
242
+ end
243
+ end
244
+ end
245
+ end
246
+ end