chat_gpt_error_handler 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/chat_gpt_error_handler/error_handler.rb +18 -22
- data/lib/chat_gpt_error_handler/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b5cf8b5513b4691e6fdc33962168fe60b074634637814d101ed5d7e38378645d
|
4
|
+
data.tar.gz: b58f3066774ada0edc7f9e769d61ae073bfaba4eda62ede43e8de9247bd1d1cf
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6da82e0360ab40b1bb8688c7b4d5d101a29d439541d7ddd026ea61d7c08221ad915cff0f6873098626bdab9737bb83c09615b5eeb39eaea1545a4e68fd4251e6
|
7
|
+
data.tar.gz: 8116fc3c2318c4663eb97f0510c0dcc2e2fb114696247b85c5078b1cfbc3f0db5fadc288f514bba7aedbcb03a793c8926579cf55059c7e421ca88dacad19811f
|
@@ -34,31 +34,27 @@ module ChatGptErrorHandler
|
|
34
34
|
fix the error. Clearly list possible reasons and a solution.
|
35
35
|
Your answer will be displayed in the terminal for the user to see right
|
36
36
|
before the backtrace of the error. Do NOT repeat the error message back verbatim.
|
37
|
-
|
38
|
-
response = client.completions(
|
39
|
-
parameters: {
|
40
|
-
model: "text-davinci-002",
|
41
|
-
prompt: prompt,
|
42
|
-
n: 1,
|
43
|
-
temperature: 0.85,
|
44
|
-
max_tokens: 250,
|
45
|
-
stop: nil
|
46
|
-
}
|
47
|
-
)
|
37
|
+
I will give you the error and I want the possible reason or solution?"
|
48
38
|
|
49
|
-
|
50
|
-
if response["choices"].nil?
|
51
|
-
if @retried.nil?
|
52
|
-
@retried = true
|
53
|
-
redo
|
54
|
-
else
|
55
|
-
raise "GPT returned an empty response to your error message."
|
56
|
-
end
|
57
|
-
end
|
39
|
+
query = error.message
|
58
40
|
|
59
|
-
|
41
|
+
response = client.chat(
|
42
|
+
parameters: {
|
43
|
+
model: "gpt-3.5-turbo-0613",
|
44
|
+
messages: [
|
45
|
+
{ role: "system", content: prompt },
|
46
|
+
{ role: "user", content: query },
|
47
|
+
],
|
48
|
+
temperature: 0.85,
|
49
|
+
},
|
50
|
+
)
|
51
|
+
|
52
|
+
response_text = if response["choices"].nil?
|
53
|
+
"GPT returned an empty response to your error message."
|
54
|
+
else
|
55
|
+
response["choices"][0]["message"]["content"]
|
56
|
+
end
|
60
57
|
|
61
|
-
response_text = response_text.join("")
|
62
58
|
shortened_response_text = []
|
63
59
|
begin
|
64
60
|
# try to format the response text to be 75 characters per line
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: chat_gpt_error_handler
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Nick Schwaderer
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-07-20 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rails
|