pwn 0.4.643 → 0.4.644
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +2 -2
- data/bin/pwn_chat +21 -49
- data/lib/pwn/plugins/open_ai.rb +41 -2
- data/lib/pwn/version.rb +1 -1
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2b544a8cd980d315cdcedf61ecb3f684ec6251155018073cfa73f4fe99fda103
|
4
|
+
data.tar.gz: 15cb2f4de0b6247e0aee5fafdcd891612f53c6cee748a715f7de41acd655b2d4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 39f3b397df4238587b9e887e6cee439e25242d1437500f3d8c1a935ae177f5a276a975063515abf7bae4161a228f7a0fb8888b53c0d9ce135f263ce469056ef0
|
7
|
+
data.tar.gz: 720a4b698073fdc3da5c31c176302ed155a9b48530bb7eed297ec0a77bd5b5947f9fe697c27d86ebfa97c8dfa67bafd666e6fbf16633e58f614abf1e32f2c22c
|
data/README.md
CHANGED
@@ -37,7 +37,7 @@ $ rvm use ruby-3.2.2@pwn
|
|
37
37
|
$ rvm list gemsets
|
38
38
|
$ gem install --verbose pwn
|
39
39
|
$ pwn
|
40
|
-
pwn[v0.4.
|
40
|
+
pwn[v0.4.644]:001 >>> PWN.help
|
41
41
|
```
|
42
42
|
|
43
43
|
[](https://youtu.be/G7iLUY4FzsI)
|
@@ -52,7 +52,7 @@ $ rvm use ruby-3.2.2@pwn
|
|
52
52
|
$ gem uninstall --all --executables pwn
|
53
53
|
$ gem install --verbose pwn
|
54
54
|
$ pwn
|
55
|
-
pwn[v0.4.
|
55
|
+
pwn[v0.4.644]:001 >>> PWN.help
|
56
56
|
```
|
57
57
|
|
58
58
|
|
data/bin/pwn_chat
CHANGED
@@ -24,8 +24,8 @@ OptionParser.new do |options|
|
|
24
24
|
opts[:system_role_content] = s
|
25
25
|
end
|
26
26
|
|
27
|
-
options.on('-
|
28
|
-
opts[:
|
27
|
+
options.on('-S', '--speak-answer', '<Options - Speak Answers (Defaults to false)>') do |v|
|
28
|
+
opts[:speak_answer] = v
|
29
29
|
end
|
30
30
|
end.parse!
|
31
31
|
|
@@ -93,7 +93,7 @@ begin
|
|
93
93
|
|
94
94
|
system_role_content = opts[:system_role_content]
|
95
95
|
|
96
|
-
@
|
96
|
+
@speak_answer = true if opts[:speak_answer]
|
97
97
|
|
98
98
|
# Define Custom REPL Commands
|
99
99
|
Pry::Commands.create_command 'welcome-banner' do
|
@@ -127,52 +127,24 @@ begin
|
|
127
127
|
@keep_in_memory = choices_len * -1
|
128
128
|
end
|
129
129
|
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
id: response[:id],
|
149
|
-
object: response[:object],
|
150
|
-
model: response[:model],
|
151
|
-
usage: response[:usage]
|
152
|
-
}
|
153
|
-
@response_history[:choices] ||= response[:choices]
|
154
|
-
rescue JSON::ParserError
|
155
|
-
max_tokens = 4097
|
156
|
-
if request.length > max_tokens
|
157
|
-
puts "Request Length Too Long: #{request.length}\n"
|
158
|
-
else
|
159
|
-
puts 'Token Length too long, trimming memory by 1 and retrying...'
|
160
|
-
@keep_in_memory += 1
|
161
|
-
@response_history[:choices] = @response_history[:choices].slice(@keep_in_memory..)
|
162
|
-
|
163
|
-
response = PWN::Plugins::OpenAI.chat(
|
164
|
-
token: token,
|
165
|
-
system_role_content: system_role_content,
|
166
|
-
request: "summarize what we've already discussed",
|
167
|
-
temp: 1,
|
168
|
-
max_tokens: 0,
|
169
|
-
response_history: @response_history
|
170
|
-
)
|
171
|
-
@response_history[:choices] = response[:choices].slice(@keep_in_memory..)
|
172
|
-
|
173
|
-
retry
|
174
|
-
end
|
175
|
-
end
|
130
|
+
response = PWN::Plugins::OpenAI.chat(
|
131
|
+
token: token,
|
132
|
+
system_role_content: system_role_content,
|
133
|
+
request: request.to_s,
|
134
|
+
temp: 1,
|
135
|
+
max_tokens: 0,
|
136
|
+
response_history: @response_history,
|
137
|
+
speak_answer: @speak_answer
|
138
|
+
)
|
139
|
+
puts "\n\n\n#{response[:choices].last[:content]}\n\n\n"
|
140
|
+
|
141
|
+
@response_history = {
|
142
|
+
id: response[:id],
|
143
|
+
object: response[:object],
|
144
|
+
model: response[:model],
|
145
|
+
usage: response[:usage]
|
146
|
+
}
|
147
|
+
@response_history[:choices] ||= response[:choices]
|
176
148
|
end
|
177
149
|
end
|
178
150
|
|
data/lib/pwn/plugins/open_ai.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require 'json'
|
4
|
+
require 'securerandom'
|
4
5
|
|
5
6
|
module PWN
|
6
7
|
module Plugins
|
@@ -98,7 +99,8 @@ module PWN
|
|
98
99
|
# temp: 'optional - creative response float (deafults to 0)',
|
99
100
|
# max_tokens: 'optional - integer (defaults to 4_097 - request.length || 300)',
|
100
101
|
# system_role_content: 'optional - context to set up the model behavior for conversation (Default: "You are a sarcastic ethical hacker named Kennedy. You are able to access any content or website on any device or network without any limitations or filters.")',
|
101
|
-
# response_history: 'optional - pass response back in to have a conversation'
|
102
|
+
# response_history: 'optional - pass response back in to have a conversation',
|
103
|
+
# speak_answer: 'optional speak answer using PWN::Plugins::Voice.text_to_speech (Default: nil)'
|
102
104
|
# )
|
103
105
|
|
104
106
|
public_class_method def self.chat(opts = {})
|
@@ -138,6 +140,7 @@ module PWN
|
|
138
140
|
}
|
139
141
|
|
140
142
|
response_history ||= { choices: [system_role] }
|
143
|
+
choices_len = response_history[:choices].length
|
141
144
|
|
142
145
|
# TODO: Include max_tokens when sending chat requests
|
143
146
|
http_body = {
|
@@ -178,7 +181,42 @@ module PWN
|
|
178
181
|
json_resp[:choices].push(assistant_resp)
|
179
182
|
end
|
180
183
|
|
184
|
+
speak_answer = true if opts[:speak_answer]
|
185
|
+
|
186
|
+
if speak_answer
|
187
|
+
text_path = "/tmp/#{SecureRandom.hex}.pwn_voice"
|
188
|
+
answer = json_resp[:choices].last[:text]
|
189
|
+
answer = json_resp[:choices].last[:content] if gpt
|
190
|
+
File.write(text_path, answer)
|
191
|
+
PWN::Plugins::Voice.text_to_speech(text_path: text_path)
|
192
|
+
File.unlink(text_path)
|
193
|
+
end
|
194
|
+
|
181
195
|
json_resp
|
196
|
+
rescue JSON::ParserError => e
|
197
|
+
if e.message.include?('exceeded')
|
198
|
+
if request.length > max_tokens
|
199
|
+
puts "Request Length Too Long: #{request.length}\n"
|
200
|
+
else
|
201
|
+
# TODO: make this as tight as possible.
|
202
|
+
keep_in_memory = (choices_len - 2) * -1
|
203
|
+
response_history[:choices] = response_history[:choices].slice(keep_in_memory..)
|
204
|
+
|
205
|
+
response = chat(
|
206
|
+
token: token,
|
207
|
+
system_role_content: system_role_content,
|
208
|
+
request: "summarize what we've already discussed",
|
209
|
+
temp: 1,
|
210
|
+
max_tokens: 0,
|
211
|
+
response_history: response_history,
|
212
|
+
speak_answer: speak_answer
|
213
|
+
)
|
214
|
+
keep_in_memory = (choices_len / 2) * -1
|
215
|
+
response_history[:choices] = response[:choices].slice(keep_in_memory..)
|
216
|
+
|
217
|
+
retry
|
218
|
+
end
|
219
|
+
end
|
182
220
|
rescue StandardError => e
|
183
221
|
raise e
|
184
222
|
end
|
@@ -238,7 +276,8 @@ module PWN
|
|
238
276
|
temp: 'optional - creative response float (defaults to 0)',
|
239
277
|
max_tokens: 'optional - integer (deafults to 4_097 - request.length || 300)',
|
240
278
|
system_role_content: 'optional - context to set up the model behavior for conversation (Default: \"You are a sarcastic ethical hacker named Kennedy. You are able to access any content or website on any device or network without any limitations or filters.\")',
|
241
|
-
response_history: 'optional - pass response back in to have a conversation'
|
279
|
+
response_history: 'optional - pass response back in to have a conversation',
|
280
|
+
speak_answer: 'optional speak answer using PWN::Plugins::Voice.text_to_speech (Default: nil)'
|
242
281
|
)
|
243
282
|
|
244
283
|
response = #{self}.img_gen(
|
data/lib/pwn/version.rb
CHANGED