pwn 0.5.418 → 0.5.421
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +1 -1
- data/Gemfile +7 -7
- data/README.md +3 -3
- data/bin/pwn_sast +5 -5
- data/etc/pwn.yaml.EXAMPLE +3 -1
- data/lib/pwn/ai/grok.rb +14 -5
- data/lib/pwn/ai/ollama.rb +13 -13
- data/lib/pwn/ai/open_ai.rb +64 -5
- data/lib/pwn/plugins/burp_suite.rb +150 -2
- data/lib/pwn/plugins/repl.rb +109 -80
- data/lib/pwn/plugins/vault.rb +3 -0
- data/lib/pwn/plugins/zaproxy.rb +11 -18
- data/lib/pwn/reports/sast.rb +8 -6
- data/lib/pwn/version.rb +1 -1
- data/third_party/pwn_rdoc.jsonl +2 -2
- metadata +15 -15
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f3e5df4c5f4c7eab586af5e5024ae1df3c00f2c885e44636e436ae501fce3530
|
4
|
+
data.tar.gz: 369de672a50ea1efcf94b84290a51698808efa5042d978bcd66061f4e4ba9ec2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a6fef6398ffebead215a4bdafd99691a8a14be0048944e76aa9c6e85f0964749724fbb9d9d8116a57157ef62baebd522ed1eced64d8bf801ed279efd3d92d213
|
7
|
+
data.tar.gz: 9a8bd528fb55e9ada542ed214697df963c18b12a8d2b831b50ca6c1326f8229478abf3a884db281e30d0234b96c741f2215021e50dba85f3bf3b937de76e84d8
|
data/.rubocop.yml
CHANGED
data/Gemfile
CHANGED
@@ -11,7 +11,7 @@ gemspec
|
|
11
11
|
# In some circumstances custom flags are passed to gems in order
|
12
12
|
# to build appropriately. Defer to ./reinstall_pwn_gemset.sh
|
13
13
|
# to review these custom flags (e.g. pg, serialport, etc).
|
14
|
-
gem 'activesupport', '8.0.
|
14
|
+
gem 'activesupport', '8.0.3'
|
15
15
|
gem 'anemone', '0.7.2'
|
16
16
|
gem 'authy', '3.0.1'
|
17
17
|
gem 'aws-sdk', '3.3.0'
|
@@ -33,7 +33,7 @@ gem 'ffi', '1.17.2'
|
|
33
33
|
gem 'fftw3', '0.3'
|
34
34
|
gem 'gdb', '1.0.0'
|
35
35
|
gem 'gem-wrappers', '1.4.0'
|
36
|
-
gem 'geocoder', '1.8.
|
36
|
+
gem 'geocoder', '1.8.6'
|
37
37
|
gem 'gist', '6.0.0'
|
38
38
|
gem 'gruff', '0.29.0'
|
39
39
|
# gem 'hidapi', '0.1.9'
|
@@ -41,14 +41,14 @@ gem 'htmlentities', '4.3.4'
|
|
41
41
|
gem 'ipaddress', '0.8.3'
|
42
42
|
gem 'jenkins_api_client2', '1.9.0'
|
43
43
|
gem 'js-beautify', '0.1.8'
|
44
|
-
gem 'json', '2.
|
44
|
+
gem 'json', '2.15.0'
|
45
45
|
gem 'jsonpath', '1.1.5'
|
46
46
|
gem 'json_schemer', '2.4.0'
|
47
47
|
gem 'jwt', '3.1.2'
|
48
48
|
gem 'libusb', '0.7.2'
|
49
49
|
gem 'luhn', '1.0.2'
|
50
50
|
gem 'mail', '2.8.1'
|
51
|
-
gem 'meshtastic', '0.0.
|
51
|
+
gem 'meshtastic', '0.0.126'
|
52
52
|
gem 'metasm', '1.0.5'
|
53
53
|
gem 'mongo', '2.21.3'
|
54
54
|
gem 'msfrpc-client', '1.1.2'
|
@@ -82,7 +82,7 @@ gem 'rmagick', '6.1.4'
|
|
82
82
|
gem 'rqrcode', '3.1.0'
|
83
83
|
gem 'rspec', '3.13.1'
|
84
84
|
gem 'rtesseract', '3.1.4'
|
85
|
-
gem 'rubocop', '1.
|
85
|
+
gem 'rubocop', '1.81.1'
|
86
86
|
gem 'rubocop-rake', '0.7.1'
|
87
87
|
gem 'rubocop-rspec', '3.7.0'
|
88
88
|
gem 'ruby-audio', '1.6.1'
|
@@ -93,10 +93,10 @@ gem 'savon', '2.15.1'
|
|
93
93
|
gem 'selenium-devtools', '0.139.0'
|
94
94
|
# gem 'serialport', '1.3.2'
|
95
95
|
# gem 'sinatra', '4.0.0'
|
96
|
-
gem 'slack-ruby-client', '
|
96
|
+
gem 'slack-ruby-client', '3.0.0'
|
97
97
|
gem 'socksify', '1.8.1'
|
98
98
|
gem 'spreadsheet', '1.3.4'
|
99
|
-
gem 'sqlite3', '2.7.
|
99
|
+
gem 'sqlite3', '2.7.4'
|
100
100
|
gem 'thin', '2.0.1'
|
101
101
|
gem 'tty-prompt', '0.23.1'
|
102
102
|
gem 'tty-spinner', '0.9.3'
|
data/README.md
CHANGED
@@ -37,7 +37,7 @@ $ cd /opt/pwn
|
|
37
37
|
$ ./install.sh
|
38
38
|
$ ./install.sh ruby-gem
|
39
39
|
$ pwn
|
40
|
-
pwn[v0.5.
|
40
|
+
pwn[v0.5.421]:001 >>> PWN.help
|
41
41
|
```
|
42
42
|
|
43
43
|
[](https://youtu.be/G7iLUY4FzsI)
|
@@ -52,7 +52,7 @@ $ rvm use ruby-3.4.4@pwn
|
|
52
52
|
$ gem uninstall --all --executables pwn
|
53
53
|
$ gem install --verbose pwn
|
54
54
|
$ pwn
|
55
|
-
pwn[v0.5.
|
55
|
+
pwn[v0.5.421]:001 >>> PWN.help
|
56
56
|
```
|
57
57
|
|
58
58
|
If you're using a multi-user install of RVM do:
|
@@ -62,7 +62,7 @@ $ rvm use ruby-3.4.4@pwn
|
|
62
62
|
$ rvmsudo gem uninstall --all --executables pwn
|
63
63
|
$ rvmsudo gem install --verbose pwn
|
64
64
|
$ pwn
|
65
|
-
pwn[v0.5.
|
65
|
+
pwn[v0.5.421]:001 >>> PWN.help
|
66
66
|
```
|
67
67
|
|
68
68
|
PWN periodically upgrades to the latest version of Ruby which is reflected in `/opt/pwn/.ruby-version`. The easiest way to upgrade to the latest version of Ruby from a previous PWN installation is to run the following script:
|
data/bin/pwn_sast
CHANGED
@@ -39,8 +39,8 @@ OptionParser.new do |options|
|
|
39
39
|
opts[:ai_engine] = a
|
40
40
|
end
|
41
41
|
|
42
|
-
options.on('-
|
43
|
-
opts[:
|
42
|
+
options.on('-bURI', '--ai-base-uri=URI', '<Optional AI Base API URI (Only Required for "ollama" AI Engine. Supported by other LLMs when hosted privately)>') do |b|
|
43
|
+
opts[:ai_base_uri] = b
|
44
44
|
end
|
45
45
|
|
46
46
|
options.on('-mMODEL', '--ai-model=MODEL', '<Optional AI Model to Use for Respective AI Engine (e.g., grok-4-0709, grok-3-mini-fast, gpt5-chat-latest, chargpt-4o-latest, llama-3.1, etc.)>') do |m|
|
@@ -101,8 +101,8 @@ begin
|
|
101
101
|
valid_ai_engines = %i[grok ollama openai]
|
102
102
|
raise "ERROR: Invalid AI Engine. Valid options are: #{valid_ai_engines.join(', ')}" unless valid_ai_engines.include?(ai_engine)
|
103
103
|
|
104
|
-
|
105
|
-
raise 'ERROR:
|
104
|
+
ai_base_uri = opts[:ai_base_uri]
|
105
|
+
raise 'ERROR: --base-uri Parameter for Ollama AI engine is required.' if ai_engine == :ollama && ai_base_uri.nil?
|
106
106
|
|
107
107
|
ai_model = opts[:ai_model]
|
108
108
|
raise 'ERROR: AI Model is required for AI engine ollama.' if ai_engine == :ollama && ai_model.nil?
|
@@ -206,7 +206,7 @@ begin
|
|
206
206
|
ai_engine: ai_engine,
|
207
207
|
ai_model: ai_model,
|
208
208
|
ai_key: ai_key,
|
209
|
-
|
209
|
+
ai_base_uri: ai_base_uri,
|
210
210
|
ai_system_role_content: ai_system_role_content,
|
211
211
|
ai_temp: ai_temp
|
212
212
|
)
|
data/etc/pwn.yaml.EXAMPLE
CHANGED
@@ -28,19 +28,21 @@ hunter:
|
|
28
28
|
api_key: 'hunter.how API Key'
|
29
29
|
|
30
30
|
grok:
|
31
|
+
base_uri: 'optional - Base URI for Grok - Use private base OR defaults to https://api.x.ai/v1'
|
31
32
|
key: 'required - OpenAI API Key'
|
32
33
|
model: 'optional - Grok model to use'
|
33
34
|
system_role_content: 'You are an ethically hacking OpenAI agent.'
|
34
35
|
temp: 'optional - OpenAI temperature'
|
35
36
|
|
36
37
|
openai:
|
38
|
+
base_uri: 'optional - Base URI for OpenAI - Use private base OR defaults to https://api.openai.com/v1'
|
37
39
|
key: 'required - OpenAI API Key'
|
38
40
|
model: 'optional - OpenAI model to use'
|
39
41
|
system_role_content: 'You are an ethically hacking OpenAI agent.'
|
40
42
|
temp: 'optional - OpenAI temperature'
|
41
43
|
|
42
44
|
ollama:
|
43
|
-
|
45
|
+
base_uri: 'required - Base URI for Open WebUI - e.g. https://ollama.local'
|
44
46
|
key: 'required - Open WebUI API Key Under Settings >> Account >> JWT Token'
|
45
47
|
model: 'required - Ollama model to use'
|
46
48
|
system_role_content: 'You are an ethically hacking Ollama agent.'
|
data/lib/pwn/ai/grok.rb
CHANGED
@@ -15,6 +15,7 @@ module PWN
|
|
15
15
|
# grok_ai_rest_call(
|
16
16
|
# token: 'required - grok_ai bearer token',
|
17
17
|
# http_method: 'optional HTTP method (defaults to GET)
|
18
|
+
# base_uri: 'optional base grok api URI (defaults to https://api.x.ai/v1)',
|
18
19
|
# rest_call: 'required rest call to make per the schema',
|
19
20
|
# params: 'optional params passed in the URI or HTTP Headers',
|
20
21
|
# http_body: 'optional HTTP body sent in HTTP methods that support it e.g. POST',
|
@@ -29,6 +30,8 @@ module PWN
|
|
29
30
|
else
|
30
31
|
opts[:http_method].to_s.scrub.to_sym
|
31
32
|
end
|
33
|
+
|
34
|
+
base_uri = opts[:base_uri] ||= 'https://api.x.ai/v1'
|
32
35
|
rest_call = opts[:rest_call].to_s.scrub
|
33
36
|
params = opts[:params]
|
34
37
|
headers = {
|
@@ -44,8 +47,6 @@ module PWN
|
|
44
47
|
|
45
48
|
spinner = opts[:spinner] || false
|
46
49
|
|
47
|
-
base_grok_api_uri = 'https://api.x.ai/v1'
|
48
|
-
|
49
50
|
browser_obj = PWN::Plugins::TransparentBrowser.open(browser_type: :rest)
|
50
51
|
rest_client = browser_obj[:browser]::Request
|
51
52
|
|
@@ -59,7 +60,7 @@ module PWN
|
|
59
60
|
headers[:params] = params
|
60
61
|
response = rest_client.execute(
|
61
62
|
method: http_method,
|
62
|
-
url: "#{
|
63
|
+
url: "#{base_uri}/#{rest_call}",
|
63
64
|
headers: headers,
|
64
65
|
verify_ssl: false,
|
65
66
|
timeout: timeout
|
@@ -71,7 +72,7 @@ module PWN
|
|
71
72
|
|
72
73
|
response = rest_client.execute(
|
73
74
|
method: http_method,
|
74
|
-
url: "#{
|
75
|
+
url: "#{base_uri}/#{rest_call}",
|
75
76
|
headers: headers,
|
76
77
|
payload: http_body,
|
77
78
|
verify_ssl: false,
|
@@ -80,7 +81,7 @@ module PWN
|
|
80
81
|
else
|
81
82
|
response = rest_client.execute(
|
82
83
|
method: http_method,
|
83
|
-
url: "#{
|
84
|
+
url: "#{base_uri}/#{rest_call}",
|
84
85
|
headers: headers,
|
85
86
|
payload: http_body.to_json,
|
86
87
|
verify_ssl: false,
|
@@ -107,13 +108,16 @@ module PWN
|
|
107
108
|
|
108
109
|
# Supported Method Parameters::
|
109
110
|
# response = PWN::AI::Grok.get_models(
|
111
|
+
# base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
110
112
|
# token: 'required - Bearer token'
|
111
113
|
# )
|
112
114
|
|
113
115
|
public_class_method def self.get_models(opts = {})
|
116
|
+
base_uri = opts[:base_uri]
|
114
117
|
token = opts[:token]
|
115
118
|
|
116
119
|
response = grok_rest_call(
|
120
|
+
base_uri: base_uri,
|
117
121
|
token: token,
|
118
122
|
rest_call: 'models'
|
119
123
|
)
|
@@ -125,6 +129,7 @@ module PWN
|
|
125
129
|
|
126
130
|
# Supported Method Parameters::
|
127
131
|
# response = PWN::AI::Grok.chat(
|
132
|
+
# base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
128
133
|
# token: 'required - Bearer token',
|
129
134
|
# request: 'required - message to ChatGPT'
|
130
135
|
# model: 'optional - model to use for text generation (defaults to grok-4-0709)',
|
@@ -137,6 +142,7 @@ module PWN
|
|
137
142
|
# )
|
138
143
|
|
139
144
|
public_class_method def self.chat(opts = {})
|
145
|
+
base_uri = opts[:base_uri]
|
140
146
|
token = opts[:token]
|
141
147
|
request = opts[:request]
|
142
148
|
|
@@ -188,6 +194,7 @@ module PWN
|
|
188
194
|
spinner = opts[:spinner]
|
189
195
|
|
190
196
|
response = grok_rest_call(
|
197
|
+
base_uri: base_uri,
|
191
198
|
http_method: :post,
|
192
199
|
token: token,
|
193
200
|
rest_call: rest_call,
|
@@ -230,10 +237,12 @@ module PWN
|
|
230
237
|
public_class_method def self.help
|
231
238
|
puts "USAGE:
|
232
239
|
response = #{self}.get_models(
|
240
|
+
base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
233
241
|
token: 'required - Bearer token'
|
234
242
|
)
|
235
243
|
|
236
244
|
response = #{self}.chat(
|
245
|
+
base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
237
246
|
token: 'required - Bearer token',
|
238
247
|
request: 'required - message to ChatGPT',
|
239
248
|
model: 'optional - model to use for text generation (defaults to grok-4-0709)',
|
data/lib/pwn/ai/ollama.rb
CHANGED
@@ -14,7 +14,7 @@ module PWN
|
|
14
14
|
module Ollama
|
15
15
|
# Supported Method Parameters::
|
16
16
|
# ollama_rest_call(
|
17
|
-
#
|
17
|
+
# base_uri: 'required - base URI for the Ollama API',
|
18
18
|
# token: 'required - ollama bearer token',
|
19
19
|
# http_method: 'optional HTTP method (defaults to GET)
|
20
20
|
# rest_call: 'required rest call to make per the schema',
|
@@ -25,7 +25,7 @@ module PWN
|
|
25
25
|
# )
|
26
26
|
|
27
27
|
private_class_method def self.ollama_rest_call(opts = {})
|
28
|
-
|
28
|
+
base_uri = opts[:base_uri]
|
29
29
|
token = opts[:token]
|
30
30
|
http_method = if opts[:http_method].nil?
|
31
31
|
:get
|
@@ -61,7 +61,7 @@ module PWN
|
|
61
61
|
headers[:params] = params
|
62
62
|
response = rest_client.execute(
|
63
63
|
method: http_method,
|
64
|
-
url: "#{
|
64
|
+
url: "#{base_uri}/#{rest_call}",
|
65
65
|
headers: headers,
|
66
66
|
verify_ssl: false,
|
67
67
|
timeout: timeout
|
@@ -73,7 +73,7 @@ module PWN
|
|
73
73
|
|
74
74
|
response = rest_client.execute(
|
75
75
|
method: http_method,
|
76
|
-
url: "#{
|
76
|
+
url: "#{base_uri}/#{rest_call}",
|
77
77
|
headers: headers,
|
78
78
|
payload: http_body,
|
79
79
|
verify_ssl: false,
|
@@ -82,7 +82,7 @@ module PWN
|
|
82
82
|
else
|
83
83
|
response = rest_client.execute(
|
84
84
|
method: http_method,
|
85
|
-
url: "#{
|
85
|
+
url: "#{base_uri}/#{rest_call}",
|
86
86
|
headers: headers,
|
87
87
|
payload: http_body.to_json,
|
88
88
|
verify_ssl: false,
|
@@ -109,16 +109,16 @@ module PWN
|
|
109
109
|
|
110
110
|
# Supported Method Parameters::
|
111
111
|
# response = PWN::AI::Ollama.get_models(
|
112
|
-
#
|
112
|
+
# base_uri: 'required - base URI for the Ollama API',
|
113
113
|
# token: 'required - Bearer token'
|
114
114
|
# )
|
115
115
|
|
116
116
|
public_class_method def self.get_models(opts = {})
|
117
|
-
|
117
|
+
base_uri = opts[:base_uri]
|
118
118
|
token = opts[:token]
|
119
119
|
|
120
120
|
response = ollama_rest_call(
|
121
|
-
|
121
|
+
base_uri: base_uri,
|
122
122
|
token: token,
|
123
123
|
rest_call: 'ollama/api/tags'
|
124
124
|
)
|
@@ -130,7 +130,7 @@ module PWN
|
|
130
130
|
|
131
131
|
# Supported Method Parameters::
|
132
132
|
# response = PWN::AI::Ollama.chat(
|
133
|
-
#
|
133
|
+
# base_uri: 'required - base URI for the Ollama API',
|
134
134
|
# token: 'required - Bearer token',
|
135
135
|
# request: 'required - message to ChatGPT'
|
136
136
|
# model: 'optional - model to use for text generation (defaults to gpt-3.5-turbo-0613)',
|
@@ -143,7 +143,7 @@ module PWN
|
|
143
143
|
# )
|
144
144
|
|
145
145
|
public_class_method def self.chat(opts = {})
|
146
|
-
|
146
|
+
base_uri = opts[:base_uri]
|
147
147
|
token = opts[:token]
|
148
148
|
request = opts[:request]
|
149
149
|
|
@@ -195,7 +195,7 @@ module PWN
|
|
195
195
|
spinner = opts[:spinner]
|
196
196
|
|
197
197
|
response = ollama_rest_call(
|
198
|
-
|
198
|
+
base_uri: base_uri,
|
199
199
|
http_method: :post,
|
200
200
|
token: token,
|
201
201
|
rest_call: rest_call,
|
@@ -238,12 +238,12 @@ module PWN
|
|
238
238
|
public_class_method def self.help
|
239
239
|
puts "USAGE:
|
240
240
|
response = #{self}.get_models(
|
241
|
-
|
241
|
+
base_uri: 'required - base URI for the Ollama API',
|
242
242
|
token: 'required - Bearer token'
|
243
243
|
)
|
244
244
|
|
245
245
|
response = #{self}.chat(
|
246
|
-
|
246
|
+
base_uri: 'required - base URI for the Ollama API',
|
247
247
|
token: 'required - Bearer token',
|
248
248
|
request: 'required - message to ChatGPT',
|
249
249
|
model: 'optional - model to use for text generation (defaults to llama2:latest)',
|