pwn 0.5.420 → 0.5.421
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile +1 -1
- data/README.md +3 -3
- data/bin/pwn_sast +5 -5
- data/etc/pwn.yaml.EXAMPLE +3 -1
- data/lib/pwn/ai/grok.rb +14 -5
- data/lib/pwn/ai/ollama.rb +13 -13
- data/lib/pwn/ai/open_ai.rb +64 -5
- data/lib/pwn/plugins/repl.rb +109 -80
- data/lib/pwn/plugins/vault.rb +3 -0
- data/lib/pwn/reports/sast.rb +6 -4
- data/lib/pwn/version.rb +1 -1
- data/third_party/pwn_rdoc.jsonl +2 -2
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f3e5df4c5f4c7eab586af5e5024ae1df3c00f2c885e44636e436ae501fce3530
|
4
|
+
data.tar.gz: 369de672a50ea1efcf94b84290a51698808efa5042d978bcd66061f4e4ba9ec2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a6fef6398ffebead215a4bdafd99691a8a14be0048944e76aa9c6e85f0964749724fbb9d9d8116a57157ef62baebd522ed1eced64d8bf801ed279efd3d92d213
|
7
|
+
data.tar.gz: 9a8bd528fb55e9ada542ed214697df963c18b12a8d2b831b50ca6c1326f8229478abf3a884db281e30d0234b96c741f2215021e50dba85f3bf3b937de76e84d8
|
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -37,7 +37,7 @@ $ cd /opt/pwn
|
|
37
37
|
$ ./install.sh
|
38
38
|
$ ./install.sh ruby-gem
|
39
39
|
$ pwn
|
40
|
-
pwn[v0.5.
|
40
|
+
pwn[v0.5.421]:001 >>> PWN.help
|
41
41
|
```
|
42
42
|
|
43
43
|
[](https://youtu.be/G7iLUY4FzsI)
|
@@ -52,7 +52,7 @@ $ rvm use ruby-3.4.4@pwn
|
|
52
52
|
$ gem uninstall --all --executables pwn
|
53
53
|
$ gem install --verbose pwn
|
54
54
|
$ pwn
|
55
|
-
pwn[v0.5.
|
55
|
+
pwn[v0.5.421]:001 >>> PWN.help
|
56
56
|
```
|
57
57
|
|
58
58
|
If you're using a multi-user install of RVM do:
|
@@ -62,7 +62,7 @@ $ rvm use ruby-3.4.4@pwn
|
|
62
62
|
$ rvmsudo gem uninstall --all --executables pwn
|
63
63
|
$ rvmsudo gem install --verbose pwn
|
64
64
|
$ pwn
|
65
|
-
pwn[v0.5.
|
65
|
+
pwn[v0.5.421]:001 >>> PWN.help
|
66
66
|
```
|
67
67
|
|
68
68
|
PWN periodically upgrades to the latest version of Ruby which is reflected in `/opt/pwn/.ruby-version`. The easiest way to upgrade to the latest version of Ruby from a previous PWN installation is to run the following script:
|
data/bin/pwn_sast
CHANGED
@@ -39,8 +39,8 @@ OptionParser.new do |options|
|
|
39
39
|
opts[:ai_engine] = a
|
40
40
|
end
|
41
41
|
|
42
|
-
options.on('-
|
43
|
-
opts[:
|
42
|
+
options.on('-bURI', '--ai-base-uri=URI', '<Optional AI Base API URI (Only Required for "ollama" AI Engine. Supported by other LLMs when hosted privately)>') do |b|
|
43
|
+
opts[:ai_base_uri] = b
|
44
44
|
end
|
45
45
|
|
46
46
|
options.on('-mMODEL', '--ai-model=MODEL', '<Optional AI Model to Use for Respective AI Engine (e.g., grok-4-0709, grok-3-mini-fast, gpt5-chat-latest, chargpt-4o-latest, llama-3.1, etc.)>') do |m|
|
@@ -101,8 +101,8 @@ begin
|
|
101
101
|
valid_ai_engines = %i[grok ollama openai]
|
102
102
|
raise "ERROR: Invalid AI Engine. Valid options are: #{valid_ai_engines.join(', ')}" unless valid_ai_engines.include?(ai_engine)
|
103
103
|
|
104
|
-
|
105
|
-
raise 'ERROR:
|
104
|
+
ai_base_uri = opts[:ai_base_uri]
|
105
|
+
raise 'ERROR: --base-uri Parameter for Ollama AI engine is required.' if ai_engine == :ollama && ai_base_uri.nil?
|
106
106
|
|
107
107
|
ai_model = opts[:ai_model]
|
108
108
|
raise 'ERROR: AI Model is required for AI engine ollama.' if ai_engine == :ollama && ai_model.nil?
|
@@ -206,7 +206,7 @@ begin
|
|
206
206
|
ai_engine: ai_engine,
|
207
207
|
ai_model: ai_model,
|
208
208
|
ai_key: ai_key,
|
209
|
-
|
209
|
+
ai_base_uri: ai_base_uri,
|
210
210
|
ai_system_role_content: ai_system_role_content,
|
211
211
|
ai_temp: ai_temp
|
212
212
|
)
|
data/etc/pwn.yaml.EXAMPLE
CHANGED
@@ -28,19 +28,21 @@ hunter:
|
|
28
28
|
api_key: 'hunter.how API Key'
|
29
29
|
|
30
30
|
grok:
|
31
|
+
base_uri: 'optional - Base URI for Grok - Use private base OR defaults to https://api.x.ai/v1'
|
31
32
|
key: 'required - OpenAI API Key'
|
32
33
|
model: 'optional - Grok model to use'
|
33
34
|
system_role_content: 'You are an ethically hacking OpenAI agent.'
|
34
35
|
temp: 'optional - OpenAI temperature'
|
35
36
|
|
36
37
|
openai:
|
38
|
+
base_uri: 'optional - Base URI for OpenAI - Use private base OR defaults to https://api.openai.com/v1'
|
37
39
|
key: 'required - OpenAI API Key'
|
38
40
|
model: 'optional - OpenAI model to use'
|
39
41
|
system_role_content: 'You are an ethically hacking OpenAI agent.'
|
40
42
|
temp: 'optional - OpenAI temperature'
|
41
43
|
|
42
44
|
ollama:
|
43
|
-
|
45
|
+
base_uri: 'required - Base URI for Open WebUI - e.g. https://ollama.local'
|
44
46
|
key: 'required - Open WebUI API Key Under Settings >> Account >> JWT Token'
|
45
47
|
model: 'required - Ollama model to use'
|
46
48
|
system_role_content: 'You are an ethically hacking Ollama agent.'
|
data/lib/pwn/ai/grok.rb
CHANGED
@@ -15,6 +15,7 @@ module PWN
|
|
15
15
|
# grok_ai_rest_call(
|
16
16
|
# token: 'required - grok_ai bearer token',
|
17
17
|
# http_method: 'optional HTTP method (defaults to GET)
|
18
|
+
# base_uri: 'optional base grok api URI (defaults to https://api.x.ai/v1)',
|
18
19
|
# rest_call: 'required rest call to make per the schema',
|
19
20
|
# params: 'optional params passed in the URI or HTTP Headers',
|
20
21
|
# http_body: 'optional HTTP body sent in HTTP methods that support it e.g. POST',
|
@@ -29,6 +30,8 @@ module PWN
|
|
29
30
|
else
|
30
31
|
opts[:http_method].to_s.scrub.to_sym
|
31
32
|
end
|
33
|
+
|
34
|
+
base_uri = opts[:base_uri] ||= 'https://api.x.ai/v1'
|
32
35
|
rest_call = opts[:rest_call].to_s.scrub
|
33
36
|
params = opts[:params]
|
34
37
|
headers = {
|
@@ -44,8 +47,6 @@ module PWN
|
|
44
47
|
|
45
48
|
spinner = opts[:spinner] || false
|
46
49
|
|
47
|
-
base_grok_api_uri = 'https://api.x.ai/v1'
|
48
|
-
|
49
50
|
browser_obj = PWN::Plugins::TransparentBrowser.open(browser_type: :rest)
|
50
51
|
rest_client = browser_obj[:browser]::Request
|
51
52
|
|
@@ -59,7 +60,7 @@ module PWN
|
|
59
60
|
headers[:params] = params
|
60
61
|
response = rest_client.execute(
|
61
62
|
method: http_method,
|
62
|
-
url: "#{
|
63
|
+
url: "#{base_uri}/#{rest_call}",
|
63
64
|
headers: headers,
|
64
65
|
verify_ssl: false,
|
65
66
|
timeout: timeout
|
@@ -71,7 +72,7 @@ module PWN
|
|
71
72
|
|
72
73
|
response = rest_client.execute(
|
73
74
|
method: http_method,
|
74
|
-
url: "#{
|
75
|
+
url: "#{base_uri}/#{rest_call}",
|
75
76
|
headers: headers,
|
76
77
|
payload: http_body,
|
77
78
|
verify_ssl: false,
|
@@ -80,7 +81,7 @@ module PWN
|
|
80
81
|
else
|
81
82
|
response = rest_client.execute(
|
82
83
|
method: http_method,
|
83
|
-
url: "#{
|
84
|
+
url: "#{base_uri}/#{rest_call}",
|
84
85
|
headers: headers,
|
85
86
|
payload: http_body.to_json,
|
86
87
|
verify_ssl: false,
|
@@ -107,13 +108,16 @@ module PWN
|
|
107
108
|
|
108
109
|
# Supported Method Parameters::
|
109
110
|
# response = PWN::AI::Grok.get_models(
|
111
|
+
# base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
110
112
|
# token: 'required - Bearer token'
|
111
113
|
# )
|
112
114
|
|
113
115
|
public_class_method def self.get_models(opts = {})
|
116
|
+
base_uri = opts[:base_uri]
|
114
117
|
token = opts[:token]
|
115
118
|
|
116
119
|
response = grok_rest_call(
|
120
|
+
base_uri: base_uri,
|
117
121
|
token: token,
|
118
122
|
rest_call: 'models'
|
119
123
|
)
|
@@ -125,6 +129,7 @@ module PWN
|
|
125
129
|
|
126
130
|
# Supported Method Parameters::
|
127
131
|
# response = PWN::AI::Grok.chat(
|
132
|
+
# base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
128
133
|
# token: 'required - Bearer token',
|
129
134
|
# request: 'required - message to ChatGPT'
|
130
135
|
# model: 'optional - model to use for text generation (defaults to grok-4-0709)',
|
@@ -137,6 +142,7 @@ module PWN
|
|
137
142
|
# )
|
138
143
|
|
139
144
|
public_class_method def self.chat(opts = {})
|
145
|
+
base_uri = opts[:base_uri]
|
140
146
|
token = opts[:token]
|
141
147
|
request = opts[:request]
|
142
148
|
|
@@ -188,6 +194,7 @@ module PWN
|
|
188
194
|
spinner = opts[:spinner]
|
189
195
|
|
190
196
|
response = grok_rest_call(
|
197
|
+
base_uri: base_uri,
|
191
198
|
http_method: :post,
|
192
199
|
token: token,
|
193
200
|
rest_call: rest_call,
|
@@ -230,10 +237,12 @@ module PWN
|
|
230
237
|
public_class_method def self.help
|
231
238
|
puts "USAGE:
|
232
239
|
response = #{self}.get_models(
|
240
|
+
base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
233
241
|
token: 'required - Bearer token'
|
234
242
|
)
|
235
243
|
|
236
244
|
response = #{self}.chat(
|
245
|
+
base_uri: 'optional - base grok api URI (defaults to https://api.x.ai/v1)',
|
237
246
|
token: 'required - Bearer token',
|
238
247
|
request: 'required - message to ChatGPT',
|
239
248
|
model: 'optional - model to use for text generation (defaults to grok-4-0709)',
|
data/lib/pwn/ai/ollama.rb
CHANGED
@@ -14,7 +14,7 @@ module PWN
|
|
14
14
|
module Ollama
|
15
15
|
# Supported Method Parameters::
|
16
16
|
# ollama_rest_call(
|
17
|
-
#
|
17
|
+
# base_uri: 'required - base URI for the Ollama API',
|
18
18
|
# token: 'required - ollama bearer token',
|
19
19
|
# http_method: 'optional HTTP method (defaults to GET)
|
20
20
|
# rest_call: 'required rest call to make per the schema',
|
@@ -25,7 +25,7 @@ module PWN
|
|
25
25
|
# )
|
26
26
|
|
27
27
|
private_class_method def self.ollama_rest_call(opts = {})
|
28
|
-
|
28
|
+
base_uri = opts[:base_uri]
|
29
29
|
token = opts[:token]
|
30
30
|
http_method = if opts[:http_method].nil?
|
31
31
|
:get
|
@@ -61,7 +61,7 @@ module PWN
|
|
61
61
|
headers[:params] = params
|
62
62
|
response = rest_client.execute(
|
63
63
|
method: http_method,
|
64
|
-
url: "#{
|
64
|
+
url: "#{base_uri}/#{rest_call}",
|
65
65
|
headers: headers,
|
66
66
|
verify_ssl: false,
|
67
67
|
timeout: timeout
|
@@ -73,7 +73,7 @@ module PWN
|
|
73
73
|
|
74
74
|
response = rest_client.execute(
|
75
75
|
method: http_method,
|
76
|
-
url: "#{
|
76
|
+
url: "#{base_uri}/#{rest_call}",
|
77
77
|
headers: headers,
|
78
78
|
payload: http_body,
|
79
79
|
verify_ssl: false,
|
@@ -82,7 +82,7 @@ module PWN
|
|
82
82
|
else
|
83
83
|
response = rest_client.execute(
|
84
84
|
method: http_method,
|
85
|
-
url: "#{
|
85
|
+
url: "#{base_uri}/#{rest_call}",
|
86
86
|
headers: headers,
|
87
87
|
payload: http_body.to_json,
|
88
88
|
verify_ssl: false,
|
@@ -109,16 +109,16 @@ module PWN
|
|
109
109
|
|
110
110
|
# Supported Method Parameters::
|
111
111
|
# response = PWN::AI::Ollama.get_models(
|
112
|
-
#
|
112
|
+
# base_uri: 'required - base URI for the Ollama API',
|
113
113
|
# token: 'required - Bearer token'
|
114
114
|
# )
|
115
115
|
|
116
116
|
public_class_method def self.get_models(opts = {})
|
117
|
-
|
117
|
+
base_uri = opts[:base_uri]
|
118
118
|
token = opts[:token]
|
119
119
|
|
120
120
|
response = ollama_rest_call(
|
121
|
-
|
121
|
+
base_uri: base_uri,
|
122
122
|
token: token,
|
123
123
|
rest_call: 'ollama/api/tags'
|
124
124
|
)
|
@@ -130,7 +130,7 @@ module PWN
|
|
130
130
|
|
131
131
|
# Supported Method Parameters::
|
132
132
|
# response = PWN::AI::Ollama.chat(
|
133
|
-
#
|
133
|
+
# base_uri: 'required - base URI for the Ollama API',
|
134
134
|
# token: 'required - Bearer token',
|
135
135
|
# request: 'required - message to ChatGPT'
|
136
136
|
# model: 'optional - model to use for text generation (defaults to gpt-3.5-turbo-0613)',
|
@@ -143,7 +143,7 @@ module PWN
|
|
143
143
|
# )
|
144
144
|
|
145
145
|
public_class_method def self.chat(opts = {})
|
146
|
-
|
146
|
+
base_uri = opts[:base_uri]
|
147
147
|
token = opts[:token]
|
148
148
|
request = opts[:request]
|
149
149
|
|
@@ -195,7 +195,7 @@ module PWN
|
|
195
195
|
spinner = opts[:spinner]
|
196
196
|
|
197
197
|
response = ollama_rest_call(
|
198
|
-
|
198
|
+
base_uri: base_uri,
|
199
199
|
http_method: :post,
|
200
200
|
token: token,
|
201
201
|
rest_call: rest_call,
|
@@ -238,12 +238,12 @@ module PWN
|
|
238
238
|
public_class_method def self.help
|
239
239
|
puts "USAGE:
|
240
240
|
response = #{self}.get_models(
|
241
|
-
|
241
|
+
base_uri: 'required - base URI for the Ollama API',
|
242
242
|
token: 'required - Bearer token'
|
243
243
|
)
|
244
244
|
|
245
245
|
response = #{self}.chat(
|
246
|
-
|
246
|
+
base_uri: 'required - base URI for the Ollama API',
|
247
247
|
token: 'required - Bearer token',
|
248
248
|
request: 'required - message to ChatGPT',
|
249
249
|
model: 'optional - model to use for text generation (defaults to llama2:latest)',
|
data/lib/pwn/ai/open_ai.rb
CHANGED
@@ -16,6 +16,7 @@ module PWN
|
|
16
16
|
# open_ai_rest_call(
|
17
17
|
# token: 'required - open_ai bearer token',
|
18
18
|
# http_method: 'optional HTTP method (defaults to GET)
|
19
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
19
20
|
# rest_call: 'required rest call to make per the schema',
|
20
21
|
# params: 'optional params passed in the URI or HTTP Headers',
|
21
22
|
# http_body: 'optional HTTP body sent in HTTP methods that support it e.g. POST',
|
@@ -30,6 +31,8 @@ module PWN
|
|
30
31
|
else
|
31
32
|
opts[:http_method].to_s.scrub.to_sym
|
32
33
|
end
|
34
|
+
|
35
|
+
base_uri = opts[:base_uri] ||= 'https://api.openai.com/v1'
|
33
36
|
rest_call = opts[:rest_call].to_s.scrub
|
34
37
|
params = opts[:params]
|
35
38
|
headers = {
|
@@ -45,8 +48,6 @@ module PWN
|
|
45
48
|
|
46
49
|
spinner = opts[:spinner] || false
|
47
50
|
|
48
|
-
base_open_ai_api_uri = 'https://api.openai.com/v1'
|
49
|
-
|
50
51
|
browser_obj = PWN::Plugins::TransparentBrowser.open(browser_type: :rest)
|
51
52
|
rest_client = browser_obj[:browser]::Request
|
52
53
|
|
@@ -60,7 +61,7 @@ module PWN
|
|
60
61
|
headers[:params] = params
|
61
62
|
response = rest_client.execute(
|
62
63
|
method: http_method,
|
63
|
-
url: "#{
|
64
|
+
url: "#{base_uri}/#{rest_call}",
|
64
65
|
headers: headers,
|
65
66
|
verify_ssl: false,
|
66
67
|
timeout: timeout
|
@@ -72,7 +73,7 @@ module PWN
|
|
72
73
|
|
73
74
|
response = rest_client.execute(
|
74
75
|
method: http_method,
|
75
|
-
url: "#{
|
76
|
+
url: "#{base_uri}/#{rest_call}",
|
76
77
|
headers: headers,
|
77
78
|
payload: http_body,
|
78
79
|
verify_ssl: false,
|
@@ -81,7 +82,7 @@ module PWN
|
|
81
82
|
else
|
82
83
|
response = rest_client.execute(
|
83
84
|
method: http_method,
|
84
|
-
url: "#{
|
85
|
+
url: "#{base_uri}/#{rest_call}",
|
85
86
|
headers: headers,
|
86
87
|
payload: http_body.to_json,
|
87
88
|
verify_ssl: false,
|
@@ -108,15 +109,18 @@ module PWN
|
|
108
109
|
|
109
110
|
# Supported Method Parameters::
|
110
111
|
# response = PWN::AI::OpenAI.get_models(
|
112
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
111
113
|
# token: 'required - Bearer token',
|
112
114
|
# timeout: 'optional timeout in seconds (defaults to 180)'
|
113
115
|
# )
|
114
116
|
|
115
117
|
public_class_method def self.get_models(opts = {})
|
118
|
+
base_uri = opts[:base_uri]
|
116
119
|
token = opts[:token]
|
117
120
|
timeout = opts[:timeout]
|
118
121
|
|
119
122
|
response = open_ai_rest_call(
|
123
|
+
base_uri: base_uri,
|
120
124
|
token: token,
|
121
125
|
rest_call: 'models'
|
122
126
|
)
|
@@ -128,6 +132,7 @@ module PWN
|
|
128
132
|
|
129
133
|
# Supported Method Parameters::
|
130
134
|
# response = PWN::AI::OpenAI.chat(
|
135
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
131
136
|
# token: 'required - Bearer token',
|
132
137
|
# request: 'required - message to ChatGPT'
|
133
138
|
# model: 'optional - model to use for text generation (defaults to gpt-5-chat-latest)',
|
@@ -140,6 +145,7 @@ module PWN
|
|
140
145
|
# )
|
141
146
|
|
142
147
|
public_class_method def self.chat(opts = {})
|
148
|
+
base_uri = opts[:base_uri]
|
143
149
|
token = opts[:token]
|
144
150
|
request = opts[:request]
|
145
151
|
|
@@ -224,6 +230,7 @@ module PWN
|
|
224
230
|
spinner = opts[:spinner]
|
225
231
|
|
226
232
|
response = open_ai_rest_call(
|
233
|
+
base_uri: base_uri,
|
227
234
|
http_method: :post,
|
228
235
|
token: token,
|
229
236
|
rest_call: rest_call,
|
@@ -282,6 +289,7 @@ module PWN
|
|
282
289
|
|
283
290
|
# Supported Method Parameters::
|
284
291
|
# response = PWN::AI::OpenAI.img_gen(
|
292
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
285
293
|
# token: 'required - Bearer token',
|
286
294
|
# request: 'required - message to ChatGPT',
|
287
295
|
# n: 'optional - number of images to generate (defaults to 1)',
|
@@ -290,6 +298,7 @@ module PWN
|
|
290
298
|
# )
|
291
299
|
|
292
300
|
public_class_method def self.img_gen(opts = {})
|
301
|
+
base_uri = opts[:base_uri]
|
293
302
|
token = opts[:token]
|
294
303
|
request = opts[:request]
|
295
304
|
n = opts[:n]
|
@@ -307,6 +316,7 @@ module PWN
|
|
307
316
|
}
|
308
317
|
|
309
318
|
response = open_ai_rest_call(
|
319
|
+
base_uri: base_uri,
|
310
320
|
http_method: :post,
|
311
321
|
token: token,
|
312
322
|
rest_call: rest_call,
|
@@ -321,6 +331,7 @@ module PWN
|
|
321
331
|
|
322
332
|
# Supported Method Parameters::
|
323
333
|
# response = PWN::AI::OpenAI.vision(
|
334
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
324
335
|
# token: 'required - Bearer token',
|
325
336
|
# img_path: 'required - path or URI of image to analyze',
|
326
337
|
# request: 'optional - message to ChatGPT (defaults to, "what is in this image?")',
|
@@ -332,6 +343,7 @@ module PWN
|
|
332
343
|
# )
|
333
344
|
|
334
345
|
public_class_method def self.vision(opts = {})
|
346
|
+
base_uri = opts[:base_uri]
|
335
347
|
token = opts[:token]
|
336
348
|
img_path = opts[:img_path]
|
337
349
|
|
@@ -399,6 +411,7 @@ module PWN
|
|
399
411
|
timeout = opts[:timeout]
|
400
412
|
|
401
413
|
response = open_ai_rest_call(
|
414
|
+
base_uri: base_uri,
|
402
415
|
http_method: :post,
|
403
416
|
token: token,
|
404
417
|
rest_call: rest_call,
|
@@ -429,6 +442,7 @@ module PWN
|
|
429
442
|
|
430
443
|
# Supported Method Parameters::
|
431
444
|
# response = PWN::AI::OpenAI.create_fine_tune(
|
445
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
432
446
|
# token: 'required - Bearer token',
|
433
447
|
# training_file: 'required - JSONL that contains OpenAI training data'
|
434
448
|
# validation_file: 'optional - JSONL that contains OpenAI validation data'
|
@@ -445,6 +459,7 @@ module PWN
|
|
445
459
|
# )
|
446
460
|
|
447
461
|
public_class_method def self.create_fine_tune(opts = {})
|
462
|
+
base_uri = opts[:base_uri]
|
448
463
|
token = opts[:token]
|
449
464
|
training_file = opts[:training_file]
|
450
465
|
validation_file = opts[:validation_file]
|
@@ -462,6 +477,7 @@ module PWN
|
|
462
477
|
timeout = opts[:timeout]
|
463
478
|
|
464
479
|
response = upload_file(
|
480
|
+
base_uri: base_uri,
|
465
481
|
token: token,
|
466
482
|
file: training_file
|
467
483
|
)
|
@@ -469,6 +485,7 @@ module PWN
|
|
469
485
|
|
470
486
|
if validation_file
|
471
487
|
response = upload_file(
|
488
|
+
base_uri: base_uri,
|
472
489
|
token: token,
|
473
490
|
file: validation_file
|
474
491
|
)
|
@@ -492,6 +509,7 @@ module PWN
|
|
492
509
|
http_body[:suffix] = suffix if suffix
|
493
510
|
|
494
511
|
response = open_ai_rest_call(
|
512
|
+
base_uri: base_uri,
|
495
513
|
http_method: :post,
|
496
514
|
token: token,
|
497
515
|
rest_call: 'fine_tuning/jobs',
|
@@ -506,15 +524,18 @@ module PWN
|
|
506
524
|
|
507
525
|
# Supported Method Parameters::
|
508
526
|
# response = PWN::AI::OpenAI.list_fine_tunes(
|
527
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
509
528
|
# token: 'required - Bearer token',
|
510
529
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
511
530
|
# )
|
512
531
|
|
513
532
|
public_class_method def self.list_fine_tunes(opts = {})
|
533
|
+
base_uri = opts[:base_uri]
|
514
534
|
token = opts[:token]
|
515
535
|
timeout = opts[:timeout]
|
516
536
|
|
517
537
|
response = open_ai_rest_call(
|
538
|
+
base_uri: base_uri,
|
518
539
|
token: token,
|
519
540
|
rest_call: 'fine_tuning/jobs',
|
520
541
|
timeout: timeout
|
@@ -527,12 +548,14 @@ module PWN
|
|
527
548
|
|
528
549
|
# Supported Method Parameters::
|
529
550
|
# response = PWN::AI::OpenAI.get_fine_tune_status(
|
551
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
530
552
|
# token: 'required - Bearer token',
|
531
553
|
# fine_tune_id: 'required - respective :id value returned from #list_fine_tunes',
|
532
554
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
533
555
|
# )
|
534
556
|
|
535
557
|
public_class_method def self.get_fine_tune_status(opts = {})
|
558
|
+
base_uri = opts[:base_uri]
|
536
559
|
token = opts[:token]
|
537
560
|
fine_tune_id = opts[:fine_tune_id]
|
538
561
|
timeout = opts[:timeout]
|
@@ -540,6 +563,7 @@ module PWN
|
|
540
563
|
rest_call = "fine_tuning/jobs/#{fine_tune_id}"
|
541
564
|
|
542
565
|
response = open_ai_rest_call(
|
566
|
+
base_uri: base_uri,
|
543
567
|
token: token,
|
544
568
|
rest_call: rest_call,
|
545
569
|
timeout: timeout
|
@@ -552,12 +576,14 @@ module PWN
|
|
552
576
|
|
553
577
|
# Supported Method Parameters::
|
554
578
|
# response = PWN::AI::OpenAI.cancel_fine_tune(
|
579
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
555
580
|
# token: 'required - Bearer token',
|
556
581
|
# fine_tune_id: 'required - respective :id value returned from #list_fine_tunes',
|
557
582
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
558
583
|
# )
|
559
584
|
|
560
585
|
public_class_method def self.cancel_fine_tune(opts = {})
|
586
|
+
base_uri = opts[:base_uri]
|
561
587
|
token = opts[:token]
|
562
588
|
fine_tune_id = opts[:fine_tune_id]
|
563
589
|
timeout = opts[:timeout]
|
@@ -565,6 +591,7 @@ module PWN
|
|
565
591
|
rest_call = "fine_tuning/jobs/#{fine_tune_id}/cancel"
|
566
592
|
|
567
593
|
response = open_ai_rest_call(
|
594
|
+
base_uri: base_uri,
|
568
595
|
http_method: :post,
|
569
596
|
token: token,
|
570
597
|
rest_call: rest_call,
|
@@ -578,12 +605,14 @@ module PWN
|
|
578
605
|
|
579
606
|
# Supported Method Parameters::
|
580
607
|
# response = PWN::AI::OpenAI.get_fine_tune_events(
|
608
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
581
609
|
# token: 'required - Bearer token',
|
582
610
|
# fine_tune_id: 'required - respective :id value returned from #list_fine_tunes',
|
583
611
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
584
612
|
# )
|
585
613
|
|
586
614
|
public_class_method def self.get_fine_tune_events(opts = {})
|
615
|
+
base_uri = opts[:base_uri]
|
587
616
|
token = opts[:token]
|
588
617
|
fine_tune_id = opts[:fine_tune_id]
|
589
618
|
timeout = opts[:timeout]
|
@@ -591,6 +620,7 @@ module PWN
|
|
591
620
|
rest_call = "fine_tuning/jobs/#{fine_tune_id}/events"
|
592
621
|
|
593
622
|
response = open_ai_rest_call(
|
623
|
+
base_uri: base_uri,
|
594
624
|
token: token,
|
595
625
|
rest_call: rest_call,
|
596
626
|
timeout: timeout
|
@@ -603,12 +633,14 @@ module PWN
|
|
603
633
|
|
604
634
|
# Supported Method Parameters::
|
605
635
|
# response = PWN::AI::OpenAI.delete_fine_tune_model(
|
636
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
606
637
|
# token: 'required - Bearer token',
|
607
638
|
# model: 'required - model to delete',
|
608
639
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
609
640
|
# )
|
610
641
|
|
611
642
|
public_class_method def self.delete_fine_tune_model(opts = {})
|
643
|
+
base_uri = opts[:base_uri]
|
612
644
|
token = opts[:token]
|
613
645
|
model = opts[:model]
|
614
646
|
timeout = opts[:timeout]
|
@@ -616,6 +648,7 @@ module PWN
|
|
616
648
|
rest_call = "models/#{model}"
|
617
649
|
|
618
650
|
response = open_ai_rest_call(
|
651
|
+
base_uri: base_uri,
|
619
652
|
http_method: :delete,
|
620
653
|
token: token,
|
621
654
|
rest_call: rest_call,
|
@@ -629,15 +662,18 @@ module PWN
|
|
629
662
|
|
630
663
|
# Supported Method Parameters::
|
631
664
|
# response = PWN::AI::OpenAI.list_files(
|
665
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
632
666
|
# token: 'required - Bearer token',
|
633
667
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
634
668
|
# )
|
635
669
|
|
636
670
|
public_class_method def self.list_files(opts = {})
|
671
|
+
base_uri = opts[:base_uri]
|
637
672
|
token = opts[:token]
|
638
673
|
timeout = opts[:timeout]
|
639
674
|
|
640
675
|
response = open_ai_rest_call(
|
676
|
+
base_uri: base_uri,
|
641
677
|
token: token,
|
642
678
|
rest_call: 'files',
|
643
679
|
timeout: timeout
|
@@ -650,6 +686,7 @@ module PWN
|
|
650
686
|
|
651
687
|
# Supported Method Parameters::
|
652
688
|
# response = PWN::AI::OpenAI.upload_file(
|
689
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
653
690
|
# token: 'required - Bearer token',
|
654
691
|
# file: 'required - file to upload',
|
655
692
|
# purpose: 'optional - intended purpose of the uploaded documents (defaults to fine-tune',
|
@@ -657,6 +694,7 @@ module PWN
|
|
657
694
|
# )
|
658
695
|
|
659
696
|
public_class_method def self.upload_file(opts = {})
|
697
|
+
base_uri = opts[:base_uri]
|
660
698
|
token = opts[:token]
|
661
699
|
file = opts[:file]
|
662
700
|
raise "ERROR: #{file} not found." unless File.exist?(file)
|
@@ -672,6 +710,7 @@ module PWN
|
|
672
710
|
}
|
673
711
|
|
674
712
|
response = open_ai_rest_call(
|
713
|
+
base_uri: base_uri,
|
675
714
|
http_method: :post,
|
676
715
|
token: token,
|
677
716
|
rest_call: 'files',
|
@@ -686,12 +725,14 @@ module PWN
|
|
686
725
|
|
687
726
|
# Supported Method Parameters::
|
688
727
|
# response = PWN::AI::OpenAI.delete_file(
|
728
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
689
729
|
# token: 'required - Bearer token',
|
690
730
|
# file: 'required - file to delete',
|
691
731
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
692
732
|
# )
|
693
733
|
|
694
734
|
public_class_method def self.delete_file(opts = {})
|
735
|
+
base_uri = opts[:base_uri]
|
695
736
|
token = opts[:token]
|
696
737
|
file = opts[:file]
|
697
738
|
timeout = opts[:timeout]
|
@@ -702,6 +743,7 @@ module PWN
|
|
702
743
|
rest_call = "files/#{file_id}"
|
703
744
|
|
704
745
|
response = open_ai_rest_call(
|
746
|
+
base_uri: base_uri,
|
705
747
|
http_method: :delete,
|
706
748
|
token: token,
|
707
749
|
rest_call: rest_call,
|
@@ -715,12 +757,14 @@ module PWN
|
|
715
757
|
|
716
758
|
# Supported Method Parameters::
|
717
759
|
# response = PWN::AI::OpenAI.get_file(
|
760
|
+
# base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
718
761
|
# token: 'required - Bearer token',
|
719
762
|
# file: 'required - file to delete',
|
720
763
|
# timeout: 'optional - timeout in seconds (defaults to 180)'
|
721
764
|
# )
|
722
765
|
|
723
766
|
public_class_method def self.get_file(opts = {})
|
767
|
+
base_uri = opts[:base_uri]
|
724
768
|
token = opts[:token]
|
725
769
|
file = opts[:file]
|
726
770
|
raise "ERROR: #{file} not found." unless File.exist?(file)
|
@@ -733,6 +777,7 @@ module PWN
|
|
733
777
|
rest_call = "files/#{file_id}"
|
734
778
|
|
735
779
|
response = open_ai_rest_call(
|
780
|
+
base_uri: base_uri,
|
736
781
|
token: token,
|
737
782
|
rest_call: rest_call,
|
738
783
|
timeout: timeout
|
@@ -756,11 +801,13 @@ module PWN
|
|
756
801
|
public_class_method def self.help
|
757
802
|
puts "USAGE:
|
758
803
|
response = #{self}.get_models(
|
804
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
759
805
|
token: 'required - Bearer token',
|
760
806
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
761
807
|
)
|
762
808
|
|
763
809
|
response = #{self}.chat(
|
810
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
764
811
|
token: 'required - Bearer token',
|
765
812
|
request: 'required - message to ChatGPT',
|
766
813
|
model: 'optional - model to use for text generation (defaults to gpt-5-chat-latest)',
|
@@ -773,6 +820,7 @@ module PWN
|
|
773
820
|
)
|
774
821
|
|
775
822
|
response = #{self}.img_gen(
|
823
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
776
824
|
token: 'required - Bearer token',
|
777
825
|
request: 'required - message to ChatGPT',
|
778
826
|
n: 'optional - number of images to generate (defaults to 1)',
|
@@ -781,6 +829,7 @@ module PWN
|
|
781
829
|
)
|
782
830
|
|
783
831
|
response = #{self}.vision(
|
832
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
784
833
|
token: 'required - Bearer token',
|
785
834
|
img_path: 'required - path or URI of image to analyze',
|
786
835
|
request: 'optional - message to ChatGPT (defaults to, \"what is in this image?\")',
|
@@ -792,6 +841,7 @@ module PWN
|
|
792
841
|
)
|
793
842
|
|
794
843
|
response = #{self}.create_fine_tune(
|
844
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
795
845
|
token: 'required - Bearer token',
|
796
846
|
training_file: 'required - JSONL that contains OpenAI training data'
|
797
847
|
validation_file: 'optional - JSONL that contains OpenAI validation data'
|
@@ -808,52 +858,61 @@ module PWN
|
|
808
858
|
)
|
809
859
|
|
810
860
|
response = #{self}.list_fine_tunes(
|
861
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
811
862
|
token: 'required - Bearer token',
|
812
863
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
813
864
|
)
|
814
865
|
|
815
866
|
response = #{self}.get_fine_tune_status(
|
867
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
816
868
|
token: 'required - Bearer token',
|
817
869
|
fine_tune_id: 'required - respective :id value returned from #list_fine_tunes',
|
818
870
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
819
871
|
)
|
820
872
|
|
821
873
|
response = #{self}.cancel_fine_tune(
|
874
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
822
875
|
token: 'required - Bearer token',
|
823
876
|
fine_tune_id: 'required - respective :id value returned from #list_fine_tunes',
|
824
877
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
825
878
|
)
|
826
879
|
|
827
880
|
response = #{self}.get_fine_tune_events(
|
881
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
828
882
|
token: 'required - Bearer token',
|
829
883
|
fine_tune_id: 'required - respective :id value returned from #list_fine_tunes',
|
830
884
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
831
885
|
)
|
832
886
|
|
833
887
|
response = #{self}.delete_fine_tune_model(
|
888
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
834
889
|
token: 'required - Bearer token',
|
835
890
|
model: 'required - model to delete',
|
836
891
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
837
892
|
)
|
838
893
|
|
839
894
|
response = #{self}.list_files(
|
895
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
840
896
|
token: 'required - Bearer token',
|
841
897
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
842
898
|
)
|
843
899
|
|
844
900
|
response = #{self}.upload_file(
|
901
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
845
902
|
token: 'required - Bearer token',
|
846
903
|
file: 'required - file to upload',
|
847
904
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
848
905
|
)
|
849
906
|
|
850
907
|
response = #{self}.delete_file(
|
908
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
851
909
|
token: 'required - Bearer token',
|
852
910
|
file: 'required - file to delete',
|
853
911
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
854
912
|
)
|
855
913
|
|
856
914
|
response = #{self}.get_file(
|
915
|
+
base_uri: 'optional - base OpenAI API URI (defaults to https://api.openai.com/v1)',
|
857
916
|
token: 'required - Bearer token',
|
858
917
|
file: 'required - file to delete',
|
859
918
|
timeout: 'optional - timeout in seconds (defaults to 180)'
|
data/lib/pwn/plugins/repl.rb
CHANGED
@@ -8,6 +8,96 @@ module PWN
|
|
8
8
|
module Plugins
|
9
9
|
# This module contains methods related to the pwn REPL Driver.
|
10
10
|
module REPL
|
11
|
+
# Supported Method Parameters::
|
12
|
+
# PWN::Plugins::REPL.load_config(
|
13
|
+
# pi: 'required - Pry Instance object',
|
14
|
+
# yaml_config_path: 'required - full path to pwn.yaml file',
|
15
|
+
# decryption_file: 'optional - full path to decryption YAML file'
|
16
|
+
# )
|
17
|
+
public_class_method def self.load_config(opts = {})
|
18
|
+
yaml_config_path = opts[:yaml_config_path]
|
19
|
+
|
20
|
+
return false unless yaml_config_path
|
21
|
+
|
22
|
+
pi = opts[:pi] ||= Pry
|
23
|
+
raise "ERROR: #{yaml_config_path} does not exist." unless File.exist?(yaml_config_path)
|
24
|
+
|
25
|
+
is_encrypted = PWN::Plugins::Vault.file_encrypted?(file: yaml_config_path)
|
26
|
+
|
27
|
+
if is_encrypted
|
28
|
+
# TODO: Implement "something you know, something you have, && something you are?"
|
29
|
+
decryption_file = opts[:decryption_file] ||= "#{Dir.home}/pwn.decryptor.yaml"
|
30
|
+
raise "ERROR: #{decryption_file} does not exist." unless File.exist?(decryption_file)
|
31
|
+
|
32
|
+
yaml_decryptor = YAML.load_file(decryption_file, symbolize_names: true)
|
33
|
+
|
34
|
+
key = opts[:key] ||= yaml_decryptor[:key] ||= ENV.fetch('PWN_DECRYPTOR_KEY')
|
35
|
+
key = PWN::Plugins::AuthenticationHelper.mask_password(prompt: 'Decryption Key') if key.nil?
|
36
|
+
|
37
|
+
iv = opts[:iv] ||= yaml_decryptor[:iv] ||= ENV.fetch('PWN_DECRYPTOR_IV')
|
38
|
+
iv = PWN::Plugins::AuthenticationHelper.mask_password(prompt: 'Decryption IV') if iv.nil?
|
39
|
+
|
40
|
+
yaml_config = PWN::Plugins::Vault.dump(
|
41
|
+
file: yaml_config_path,
|
42
|
+
key: key,
|
43
|
+
iv: iv
|
44
|
+
)
|
45
|
+
else
|
46
|
+
yaml_config = YAML.load_file(yaml_config_path, symbolize_names: true)
|
47
|
+
end
|
48
|
+
pi.config.p = yaml_config
|
49
|
+
Pry.config.p = yaml_config
|
50
|
+
|
51
|
+
valid_ai_engines = %i[
|
52
|
+
grok
|
53
|
+
openai
|
54
|
+
ollama
|
55
|
+
]
|
56
|
+
ai_engine = yaml_config[:ai_engine].to_s.downcase.to_sym
|
57
|
+
|
58
|
+
raise "ERROR: Unsupported AI Engine: #{ai_engine} in #{yaml_config_path}. Supported AI Engines:\n#{valid_ai_engines.inspect}" unless valid_ai_engines.include?(ai_engine)
|
59
|
+
|
60
|
+
pi.config.pwn_ai_engine = ai_engine
|
61
|
+
Pry.config.pwn_ai_engine = ai_engine
|
62
|
+
|
63
|
+
pi.config.pwn_ai_base_uri = pi.config.p[ai_engine][:base_uri]
|
64
|
+
Pry.config.pwn_ai_base_uri = pi.config.pwn_ai_base_uri
|
65
|
+
|
66
|
+
pi.config.pwn_ai_key = pi.config.p[ai_engine][:key]
|
67
|
+
Pry.config.pwn_ai_key = pi.config.pwn_ai_key
|
68
|
+
|
69
|
+
pi.config.pwn_ai_model = pi.config.p[ai_engine][:model]
|
70
|
+
Pry.config.pwn_ai_model = pi.config.pwn_ai_model
|
71
|
+
|
72
|
+
pi.config.pwn_ai_system_role_content = pi.config.p[ai_engine][:system_role_content]
|
73
|
+
Pry.config.pwn_ai_system_role_content = pi.config.pwn_ai_system_role_content
|
74
|
+
|
75
|
+
pi.config.pwn_ai_temp = pi.config.p[ai_engine][:temp]
|
76
|
+
Pry.config.pwn_ai_temp = pi.config.pwn_ai_temp
|
77
|
+
|
78
|
+
pi.config.pwn_asm_arch = pi.config.p[:asm][:arch]
|
79
|
+
Pry.config.pwn_asm_arch = pi.config.pwn_asm_arch
|
80
|
+
|
81
|
+
pi.config.pwn_asm_endian = pi.config.p[:asm][:endian]
|
82
|
+
Pry.config.pwn_asm_endian = pi.config.pwn_asm_endian
|
83
|
+
|
84
|
+
pi.config.pwn_irc = pi.config.p[:irc]
|
85
|
+
Pry.config.pwn_irc = pi.config.pwn_irc
|
86
|
+
|
87
|
+
pi.config.pwn_hunter = pi.config.p[:hunter][:api_key]
|
88
|
+
Pry.config.pwn_hunter = pi.config.pwn_hunter
|
89
|
+
|
90
|
+
pi.config.pwn_shodan = pi.config.p[:shodan][:api_key]
|
91
|
+
Pry.config.pwn_shodan = pi.config.pwn_shodan
|
92
|
+
|
93
|
+
pi.config.reload_config = false
|
94
|
+
Pry.config.reload_config = false
|
95
|
+
|
96
|
+
true
|
97
|
+
rescue StandardError => e
|
98
|
+
raise e
|
99
|
+
end
|
100
|
+
|
11
101
|
# Supported Method Parameters::
|
12
102
|
# PWN::Plugins::REPL.refresh_ps1_proc(
|
13
103
|
# mode: 'required - :splat or nil'
|
@@ -17,6 +107,8 @@ module PWN
|
|
17
107
|
mode = opts[:mode]
|
18
108
|
|
19
109
|
proc do |_target_self, _nest_level, pi|
|
110
|
+
load_config(opts) if Pry.config.reload_config
|
111
|
+
|
20
112
|
pi.config.pwn_repl_line += 1
|
21
113
|
line_pad = format(
|
22
114
|
'%0.3d',
|
@@ -69,6 +161,7 @@ module PWN
|
|
69
161
|
# PWN::Plugins::REPL.add_commands
|
70
162
|
|
71
163
|
public_class_method def self.add_commands
|
164
|
+
# Load any existing pwn.yaml configuration file
|
72
165
|
# Define Custom REPL Commands
|
73
166
|
Pry::Commands.create_command 'welcome-banner' do
|
74
167
|
description 'Display the random welcome banner, including basic usage.'
|
@@ -299,8 +392,7 @@ module PWN
|
|
299
392
|
|
300
393
|
response_history = ai_agents[dm_agent.to_sym][:response_history]
|
301
394
|
ai_engine = pi.config.pwn_ai_engine
|
302
|
-
|
303
|
-
ai_fqdn ||= ''
|
395
|
+
ai_base_uri = pi.config.pwn_ai_base_uri
|
304
396
|
ai_key = pi.config.pwn_ai_key
|
305
397
|
ai_key ||= ''
|
306
398
|
ai_temp = pi.config.pwn_ai_temp
|
@@ -341,6 +433,7 @@ module PWN
|
|
341
433
|
case ai_engine
|
342
434
|
when :grok
|
343
435
|
response = PWN::AI::Grok.chat(
|
436
|
+
base_uri: ai_base_uri,
|
344
437
|
token: ai_key,
|
345
438
|
model: model,
|
346
439
|
temp: ai_temp,
|
@@ -351,7 +444,7 @@ module PWN
|
|
351
444
|
)
|
352
445
|
when :ollama
|
353
446
|
response = PWN::AI::Ollama.chat(
|
354
|
-
|
447
|
+
base_uri: ai_base_uri,
|
355
448
|
token: ai_key,
|
356
449
|
model: model,
|
357
450
|
temp: ai_temp,
|
@@ -362,6 +455,7 @@ module PWN
|
|
362
455
|
)
|
363
456
|
when :openai
|
364
457
|
response = PWN::AI::OpenAI.chat(
|
458
|
+
base_uri: ai_base_uri,
|
365
459
|
token: ai_key,
|
366
460
|
model: model,
|
367
461
|
temp: ai_temp,
|
@@ -502,78 +596,8 @@ module PWN
|
|
502
596
|
|
503
597
|
# Initialize pwn.yaml Configuration using :before_session Hook
|
504
598
|
Pry.config.hooks.add_hook(:before_session, :init_opts) do |_output, _binding, pi|
|
505
|
-
|
506
|
-
|
507
|
-
raise "ERROR: #{yaml_config_path} does not exist." unless File.exist?(yaml_config_path)
|
508
|
-
|
509
|
-
is_encrypted = PWN::Plugins::Vault.file_encrypted?(file: yaml_config_path)
|
510
|
-
|
511
|
-
if is_encrypted
|
512
|
-
# TODO: Implement "something you know, something you have, && something you are?"
|
513
|
-
decryption_file = opts[:decryption_file] ||= "#{Dir.home}/pwn.decryptor.yaml"
|
514
|
-
yaml_decryptor = YAML.load_file(decryption_file, symbolize_names: true) if File.exist?(decryption_file)
|
515
|
-
|
516
|
-
key = opts[:key] ||= yaml_decryptor[:key] ||= ENV.fetch('PWN_DECRYPTOR_KEY')
|
517
|
-
key = PWN::Plugins::AuthenticationHelper.mask_password(prompt: 'Decryption Key') if key.nil?
|
518
|
-
|
519
|
-
iv = opts[:iv] ||= yaml_decryptor[:iv] ||= ENV.fetch('PWN_DECRYPTOR_IV')
|
520
|
-
iv = PWN::Plugins::AuthenticationHelper.mask_password(prompt: 'Decryption IV') if iv.nil?
|
521
|
-
|
522
|
-
yaml_config = PWN::Plugins::Vault.dump(
|
523
|
-
file: yaml_config_path,
|
524
|
-
key: key,
|
525
|
-
iv: iv
|
526
|
-
)
|
527
|
-
else
|
528
|
-
yaml_config = YAML.load_file(yaml_config_path, symbolize_names: true)
|
529
|
-
end
|
530
|
-
pi.config.p = yaml_config
|
531
|
-
Pry.config.p = yaml_config
|
532
|
-
|
533
|
-
valid_ai_engines = %i[
|
534
|
-
grok
|
535
|
-
openai
|
536
|
-
ollama
|
537
|
-
]
|
538
|
-
ai_engine = yaml_config[:ai_engine].to_s.downcase.to_sym
|
539
|
-
|
540
|
-
raise "ERROR: Unsupported AI Engine: #{ai_engine} in #{yaml_config_path}. Supported AI Engines:\n#{valid_ai_engines.inspect}" unless valid_ai_engines.include?(ai_engine)
|
541
|
-
|
542
|
-
pi.config.pwn_ai_engine = ai_engine
|
543
|
-
Pry.config.pwn_ai_engine = ai_engine
|
544
|
-
|
545
|
-
pi.config.pwn_ai_fqdn = pi.config.p[ai_engine][:fqdn]
|
546
|
-
Pry.config.pwn_ai_fqdn = pi.config.pwn_ai_fqdn
|
547
|
-
|
548
|
-
pi.config.pwn_ai_key = pi.config.p[ai_engine][:key]
|
549
|
-
Pry.config.pwn_ai_key = pi.config.pwn_ai_key
|
550
|
-
|
551
|
-
pi.config.pwn_ai_model = pi.config.p[ai_engine][:model]
|
552
|
-
Pry.config.pwn_ai_model = pi.config.pwn_ai_model
|
553
|
-
|
554
|
-
pi.config.pwn_ai_system_role_content = pi.config.p[ai_engine][:system_role_content]
|
555
|
-
Pry.config.pwn_ai_system_role_content = pi.config.pwn_ai_system_role_content
|
556
|
-
|
557
|
-
pi.config.pwn_ai_temp = pi.config.p[ai_engine][:temp]
|
558
|
-
Pry.config.pwn_ai_temp = pi.config.pwn_ai_temp
|
559
|
-
|
560
|
-
pi.config.pwn_asm_arch = pi.config.p[:asm][:arch]
|
561
|
-
Pry.config.pwn_asm_arch = pi.config.pwn_asm_arch
|
562
|
-
|
563
|
-
pi.config.pwn_asm_endian = pi.config.p[:asm][:endian]
|
564
|
-
Pry.config.pwn_asm_endian = pi.config.pwn_asm_endian
|
565
|
-
|
566
|
-
pi.config.pwn_irc = pi.config.p[:irc]
|
567
|
-
Pry.config.pwn_irc = pi.config.pwn_irc
|
568
|
-
|
569
|
-
pi.config.pwn_hunter = pi.config.p[:hunter][:api_key]
|
570
|
-
Pry.config.pwn_hunter = pi.config.pwn_hunter
|
571
|
-
|
572
|
-
pi.config.pwn_shodan = pi.config.p[:shodan][:api_key]
|
573
|
-
Pry.config.pwn_shodan = pi.config.pwn_shodan
|
574
|
-
|
575
|
-
true
|
576
|
-
end
|
599
|
+
opts[:pi] = pi
|
600
|
+
load_config(opts)
|
577
601
|
end
|
578
602
|
|
579
603
|
Pry.config.hooks.add_hook(:after_read, :pwn_asm_hook) do |request, pi|
|
@@ -633,9 +657,12 @@ module PWN
|
|
633
657
|
system_role_content = pi.config.pwn_ai_system_role_content
|
634
658
|
temp = pi.config.pwn_ai_temp
|
635
659
|
|
660
|
+
ai_base_uri = pi.config.pwn_ai_base_uri
|
661
|
+
|
636
662
|
case ai_engine
|
637
663
|
when :grok
|
638
664
|
response = PWN::AI::Grok.chat(
|
665
|
+
base_uri: ai_base_uri,
|
639
666
|
token: ai_key,
|
640
667
|
model: model,
|
641
668
|
system_role_content: system_role_content,
|
@@ -646,10 +673,8 @@ module PWN
|
|
646
673
|
spinner: true
|
647
674
|
)
|
648
675
|
when :ollama
|
649
|
-
fqdn = pi.config.pwn_ai_fqdn
|
650
|
-
|
651
676
|
response = PWN::AI::Ollama.chat(
|
652
|
-
|
677
|
+
base_uri: ai_base_uri,
|
653
678
|
token: ai_key,
|
654
679
|
model: model,
|
655
680
|
system_role_content: system_role_content,
|
@@ -661,6 +686,7 @@ module PWN
|
|
661
686
|
)
|
662
687
|
when :openai
|
663
688
|
response = PWN::AI::OpenAI.chat(
|
689
|
+
base_uri: ai_base_uri,
|
664
690
|
token: ai_key,
|
665
691
|
model: model,
|
666
692
|
system_role_content: system_role_content,
|
@@ -721,8 +747,11 @@ module PWN
|
|
721
747
|
# Define PS1 Prompt
|
722
748
|
Pry.config.pwn_repl_line = 0
|
723
749
|
Pry.config.prompt_name = :pwn
|
724
|
-
arrow_ps1_proc = refresh_ps1_proc
|
725
|
-
|
750
|
+
arrow_ps1_proc = refresh_ps1_proc(opts)
|
751
|
+
|
752
|
+
opts[:mode] = :splat
|
753
|
+
splat_ps1_proc = refresh_ps1_proc(opts)
|
754
|
+
|
726
755
|
ps1 = [arrow_ps1_proc, splat_ps1_proc]
|
727
756
|
prompt = Pry::Prompt.new(:pwn, 'PWN Prototyping REPL', ps1)
|
728
757
|
|
data/lib/pwn/plugins/vault.rb
CHANGED
@@ -172,6 +172,9 @@ module PWN
|
|
172
172
|
relative_editor = File.basename(editor)
|
173
173
|
system(relative_editor, file)
|
174
174
|
|
175
|
+
# If the Pry object exists, set reload_config to true
|
176
|
+
Pry.config.reload_config = true if defined?(Pry)
|
177
|
+
|
175
178
|
encrypt(
|
176
179
|
file: file,
|
177
180
|
key: key,
|
data/lib/pwn/reports/sast.rb
CHANGED
@@ -18,7 +18,7 @@ module PWN
|
|
18
18
|
# ai_engine: 'optional - AI engine to use for analysis (:grok, :ollama, or :openai)',
|
19
19
|
# ai_model: 'optionnal - AI Model to Use for Respective AI Engine (e.g., grok-4i-0709, chargpt-4o-latest, llama-3.1, etc.)',
|
20
20
|
# ai_key: 'optional - AI Key/Token for Respective AI Engine',
|
21
|
-
#
|
21
|
+
# ai_base_uri: 'optional - AI FQDN (Only Required for "ollama" AI Engine)',
|
22
22
|
# ai_system_role_content: 'optional - AI System Role Content (Defaults to "Confidence score of 0-10 this is vulnerable (0 being not vulnerable, moving upwards in confidence of exploitation). Provide additional context to assist penetration tester assessment.")',
|
23
23
|
# ai_temp: 'optional - AI Temperature (Defaults to 0.1)'
|
24
24
|
# )
|
@@ -37,8 +37,8 @@ module PWN
|
|
37
37
|
valid_ai_engines = %i[grok ollama openai]
|
38
38
|
raise "ERROR: Invalid AI Engine. Valid options are: #{valid_ai_engines.join(', ')}" unless valid_ai_engines.include?(ai_engine)
|
39
39
|
|
40
|
-
|
41
|
-
raise 'ERROR: FQDN for Ollama AI engine is required.' if ai_engine == :ollama &&
|
40
|
+
ai_base_uri = opts[:ai_base_uri]
|
41
|
+
raise 'ERROR: FQDN for Ollama AI engine is required.' if ai_engine == :ollama && ai_base_uri.nil?
|
42
42
|
|
43
43
|
ai_model = opts[:ai_model]
|
44
44
|
raise 'ERROR: AI Model is required for AI engine ollama.' if ai_engine == :ollama && ai_model.nil?
|
@@ -75,6 +75,7 @@ module PWN
|
|
75
75
|
case ai_engine
|
76
76
|
when :grok
|
77
77
|
response = PWN::AI::Grok.chat(
|
78
|
+
base_uri: ai_base_uri,
|
78
79
|
token: ai_key,
|
79
80
|
model: ai_model,
|
80
81
|
system_role_content: ai_system_role_content,
|
@@ -84,7 +85,7 @@ module PWN
|
|
84
85
|
)
|
85
86
|
when :ollama
|
86
87
|
response = PWN::AI::Ollama.chat(
|
87
|
-
|
88
|
+
base_uri: ai_base_uri,
|
88
89
|
token: ai_key,
|
89
90
|
model: ai_model,
|
90
91
|
system_role_content: ai_system_role_content,
|
@@ -94,6 +95,7 @@ module PWN
|
|
94
95
|
)
|
95
96
|
when :openai
|
96
97
|
response = PWN::AI::OpenAI.chat(
|
98
|
+
base_uri: ai_base_uri,
|
97
99
|
token: ai_key,
|
98
100
|
model: ai_model,
|
99
101
|
system_role_content: ai_system_role_content,
|
data/lib/pwn/version.rb
CHANGED
data/third_party/pwn_rdoc.jsonl
CHANGED
@@ -536,7 +536,7 @@
|
|
536
536
|
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.get_proxy_listeners Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.get_proxy_listeners`: Supported Method Parameters\n\njson_proxy_listeners = PWN::Plugins::BurpSuite.get_proxy_listeners(\n\nburp_obj: 'required - burp_obj returned by #start method'\n\n)\n"}]}
|
537
537
|
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.get_repeater_tab Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.get_repeater_tab`: Supported Method Parameters\n\nrepeater_tab = PWN::Plugins::BurpSuite.get_repeater_tab(\n\nburp_obj: 'required - burp_obj returned by #start method',\nid: 'required - id of the repeater tab to get'\n\n)\n"}]}
|
538
538
|
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.get_scan_issues Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.get_scan_issues`: Supported Method Parameters\n\njson_scan_issues = PWN::Plugins::BurpSuite.get_scan_issues(\n\nburp_obj: 'required - burp_obj returned by #start method'\n\n)\n"}]}
|
539
|
-
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.get_sitemap Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.get_sitemap`: Supported Method Parameters\n\njson_sitemap = PWN::Plugins::BurpSuite.get_sitemap(\n\nburp_obj: 'required - burp_obj returned by #start method',\nkeyword: 'optional - keyword to filter sitemap entries (default: nil)'\n\n)\n"}]}
|
539
|
+
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.get_sitemap Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.get_sitemap`: Supported Method Parameters\n\njson_sitemap = PWN::Plugins::BurpSuite.get_sitemap(\n\nburp_obj: 'required - burp_obj returned by #start method',\nkeyword: 'optional - keyword to filter sitemap entries (default: nil)',\nreturn_as: 'optional - :base64 or :har (defaults to :base64)'\n\n)\n"}]}
|
540
540
|
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.help Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.help`: "}]}
|
541
541
|
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.import_openapi_to_sitemap Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.import_openapi_to_sitemap`: "}]}
|
542
542
|
{"messages":[{"role":"user","content":"PWN::Plugins::BurpSuite.in_scope Usage"},{"role":"assistant","content":"`PWN::Plugins::BurpSuite.in_scope`: Supported Method Parameters\n\nuri_in_scope = PWN::Plugins::BurpSuite.in_scope(\n\nburp_obj: 'required - burp_obj returned by #start method',\nuri: 'required - URI to determine if in scope'\n\n)\n"}]}
|
@@ -1069,7 +1069,7 @@
|
|
1069
1069
|
{"messages":[{"role":"user","content":"PWN::Reports::Phone.generate Usage"},{"role":"assistant","content":"`PWN::Reports::Phone.generate`: Supported Method Parameters\n\nPWN::Reports::Phone.generate(\n\ndir_path: dir_path,\nresults_hash: results_hash\n\n)\n"}]}
|
1070
1070
|
{"messages":[{"role":"user","content":"PWN::Reports::Phone.help Usage"},{"role":"assistant","content":"`PWN::Reports::Phone.help`: "}]}
|
1071
1071
|
{"messages":[{"role":"user","content":"PWN::Reports::SAST.authors Usage"},{"role":"assistant","content":"`PWN::Reports::SAST.authors`: Author(s)\n\n0day Inc. <support@0dayinc.com>\n"}]}
|
1072
|
-
{"messages":[{"role":"user","content":"PWN::Reports::SAST.generate Usage"},{"role":"assistant","content":"`PWN::Reports::SAST.generate`: Supported Method Parameters\n\nPWN::Reports::SAST.generate(\n\ndir_path: 'optional - Directory path to save the report (defaults to .)',\nresults_hash: 'optional - Hash containing the results of the SAST analysis (defaults to empty hash structure)',\nreport_name: 'optional - Name of the report file (defaults to current directory name)',\nai_engine: 'optional - AI engine to use for analysis (:grok, :ollama, or :openai)',\nai_model: 'optionnal - AI Model to Use for Respective AI Engine (e.g., grok-4i-0709, chargpt-4o-latest, llama-3.1, etc.)',\nai_key: 'optional - AI Key/Token for Respective AI Engine',\nai_fqdn: 'optional - AI FQDN (Only Required for \"ollama\" AI Engine)',\nai_system_role_content: 'optional - AI System Role Content (Defaults to \"Confidence score of 0-10 this is vulnerable (0 being not vulnerable, moving upwards in confidence of exploitation). Provide additional context to assist penetration tester assessment.\")',\nai_temp: 'optional - AI Temperature (Defaults to 0.
|
1072
|
+
{"messages":[{"role":"user","content":"PWN::Reports::SAST.generate Usage"},{"role":"assistant","content":"`PWN::Reports::SAST.generate`: Supported Method Parameters\n\nPWN::Reports::SAST.generate(\n\ndir_path: 'optional - Directory path to save the report (defaults to .)',\nresults_hash: 'optional - Hash containing the results of the SAST analysis (defaults to empty hash structure)',\nreport_name: 'optional - Name of the report file (defaults to current directory name)',\nai_engine: 'optional - AI engine to use for analysis (:grok, :ollama, or :openai)',\nai_model: 'optionnal - AI Model to Use for Respective AI Engine (e.g., grok-4i-0709, chargpt-4o-latest, llama-3.1, etc.)',\nai_key: 'optional - AI Key/Token for Respective AI Engine',\nai_fqdn: 'optional - AI FQDN (Only Required for \"ollama\" AI Engine)',\nai_system_role_content: 'optional - AI System Role Content (Defaults to \"Confidence score of 0-10 this is vulnerable (0 being not vulnerable, moving upwards in confidence of exploitation). Provide additional context to assist penetration tester assessment.\")',\nai_temp: 'optional - AI Temperature (Defaults to 0.1)'\n\n)\n"}]}
|
1073
1073
|
{"messages":[{"role":"user","content":"PWN::Reports::SAST.help Usage"},{"role":"assistant","content":"`PWN::Reports::SAST.help`: "}]}
|
1074
1074
|
{"messages":[{"role":"user","content":"PWN::Reports::URIBuster.authors Usage"},{"role":"assistant","content":"`PWN::Reports::URIBuster.authors`: Author(s)\n\n0day Inc. <support@0dayinc.com>\n"}]}
|
1075
1075
|
{"messages":[{"role":"user","content":"PWN::Reports::URIBuster.generate Usage"},{"role":"assistant","content":"`PWN::Reports::URIBuster.generate`: Supported Method Parameters\n\nPWN::Reports::URIBuster.generate(\n\ndir_path: dir_path,\nresults_hash: results_hash\n\n)\n"}]}
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pwn
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.5.
|
4
|
+
version: 0.5.421
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- 0day Inc.
|
@@ -939,14 +939,14 @@ dependencies:
|
|
939
939
|
requirements:
|
940
940
|
- - '='
|
941
941
|
- !ruby/object:Gem::Version
|
942
|
-
version: 1.
|
942
|
+
version: 1.81.1
|
943
943
|
type: :runtime
|
944
944
|
prerelease: false
|
945
945
|
version_requirements: !ruby/object:Gem::Requirement
|
946
946
|
requirements:
|
947
947
|
- - '='
|
948
948
|
- !ruby/object:Gem::Version
|
949
|
-
version: 1.
|
949
|
+
version: 1.81.1
|
950
950
|
- !ruby/object:Gem::Dependency
|
951
951
|
name: rubocop-rake
|
952
952
|
requirement: !ruby/object:Gem::Requirement
|