chat_gpt_error_handler 0.1.2 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3ff5d23745282dc3a2d6b01cd67e998e89f73074ae16ceb3e1ee63988b5361e0
4
- data.tar.gz: 32ede69316cc4964c2497baab47e244d8e5fb8de13db2ff68d808edc71c589b3
3
+ metadata.gz: 8eb4d741fe11dd031e02e4405a131ba4a440a0306575a97246b6486de5f7ae3e
4
+ data.tar.gz: 589d87cf005f91a365b9399e99a9d45711dce60d9847500790395c3173f9b097
5
5
  SHA512:
6
- metadata.gz: 33d465f4944e318d13766a19d9328cb8bdd914c3eb98aca2e491b02a94df38cff4bfa345ad68f157f3e3c8df3e81bb5d6b887cc70adf3427302ebfff91a08a7f
7
- data.tar.gz: e1913f58570b9ac5e3f88dbb145bfcc89ff91f58929b25235a88c6f9ba8770c29e190ba5f4877bb45436ddb9db405beab964d477e2ab48f0e3fb77737a776af8
6
+ metadata.gz: ef2028789e012e4856f1397fcf98c8e2ee3c593a4d22dec620d05a1b20c539b3103e6892e8463e59e1f19f2bee4fa892f49035f3ec80f3818ecf7d7a68663795
7
+ data.tar.gz: cbf4c7bb9ebe54e539e543c36108387c089f9a2fb891968ddf0e7bf0f23a57c5a6d9eb179c3ba63a07da2603201a74e822fe74f1203eeac8648d158e2bf20477
data/README.md CHANGED
@@ -12,11 +12,12 @@ Add this line to your application's Gemfile:
12
12
  gem "chat_gpt_error_handler"
13
13
  ```
14
14
 
15
- Set your [OpenAI API Token](https://openai.com/product#made-for-developers) in an initializer:
15
+ Set your [OpenAI API Token](https://openai.com/product#made-for-developers) and enable the gem in an initializer:
16
16
 
17
17
  ```ruby
18
18
  # config/initializers/chat_gpt_error_handler.rb
19
19
  ChatGptErrorHandler.openai_access_token = 'your_openai_access_token_here'
20
+ ChatGptErrorHandler.enabled = true
20
21
  ```
21
22
 
22
23
  And you're off!
@@ -5,8 +5,10 @@ module ChatGptErrorHandler
5
5
  class ErrorHandler
6
6
  def initialize(app)
7
7
  @app = app
8
- OpenAI.configure do |config|
9
- config.access_token = ChatGptErrorHandler.openai_access_token || ENV.fetch('OPENAI_ACCESS_TOKEN')
8
+ if ChatGptErrorHandler.enabled
9
+ OpenAI.configure do |config|
10
+ config.access_token = ChatGptErrorHandler.openai_access_token || ENV.fetch('OPENAI_ACCESS_TOKEN')
11
+ end
10
12
  end
11
13
  end
12
14
 
@@ -14,7 +16,7 @@ module ChatGptErrorHandler
14
16
  begin
15
17
  @app.call(env)
16
18
  rescue => error
17
- send_to_gpt(error)
19
+ send_to_gpt(error) if ChatGptErrorHandler.enabled
18
20
  raise error
19
21
  end
20
22
  end
@@ -43,29 +45,49 @@ module ChatGptErrorHandler
43
45
  stop: nil
44
46
  }
45
47
  )
46
- response_text = response["choices"].map { |c| c["text"].to_s }
47
48
 
48
- response_text = response_text.join("")
49
- shortened_response_text = []
50
- begin
51
- # try to format the response text to be 75 characters per line
52
- response_text.split(" ").reduce("") do |line, word|
53
- if line.length + word.length > 75
54
- shortened_response_text << line
55
- line = word
56
- elsif word == response_text.split(" ").last
57
- shortened_response_text << [line + " " + word]
58
- else
59
- line += (" " + word)
49
+
50
+ # Try a second time if the response is empty. Raise the second time
51
+ if response["choices"].nil?
52
+ response = client.completions(
53
+ parameters: {
54
+ model: "text-davinci-002",
55
+ prompt: prompt,
56
+ n: 1,
57
+ temperature: 0.85,
58
+ max_tokens: 250,
59
+ stop: nil
60
+ }
61
+ )
62
+ end
63
+
64
+ if true #response["choices"].nil?
65
+ print_error_and_solution(error, "ChatGPT returned an empty response to your error message. You may need to try again.")
66
+ else
67
+ response_text = response["choices"].map { |c| c["text"].to_s }
68
+
69
+ response_text = response_text.join("")
70
+ shortened_response_text = []
71
+ begin
72
+ # try to format the response text to be 75 characters per line
73
+ response_text.split(" ").reduce("") do |line, word|
74
+ if line.length + word.length > 75
75
+ shortened_response_text << line
76
+ line = word
77
+ elsif word == response_text.split(" ").last
78
+ shortened_response_text << [line + " " + word]
79
+ else
80
+ line += (" " + word)
81
+ end
60
82
  end
83
+ shortened_response_text = shortened_response_text.join("\n")
84
+ rescue => e
85
+ # The formatting does not always work, so if it fails, just use the original response text
86
+ shortened_response_text = response_text
61
87
  end
62
- shortened_response_text = shortened_response_text.join("\n")
63
- rescue => e
64
- # The formatting does not always work, so if it fails, just use the original response text
65
- shortened_response_text = response_text
66
- end
67
88
 
68
- print_error_and_solution(error, shortened_response_text.strip)
89
+ print_error_and_solution(error, shortened_response_text.strip)
90
+ end
69
91
  rescue => gpt_error
70
92
  puts "\e[31mAn error occurred while communicating with GPT:\e[0m"
71
93
  puts gpt_error.message
@@ -1,3 +1,3 @@
1
1
  module ChatGptErrorHandler
2
- VERSION = "0.1.2"
2
+ VERSION = "0.2.1"
3
3
  end
@@ -2,8 +2,11 @@ require "chat_gpt_error_handler/version"
2
2
  require "chat_gpt_error_handler/engine"
3
3
 
4
4
  module ChatGptErrorHandler
5
- mattr_accessor :openai_access_token
5
+ mattr_accessor :openai_access_token, :enabled
6
6
 
7
7
  # Default value for the openai_access_token, used when not explicitly set by the user
8
8
  @@openai_access_token = nil
9
+
10
+ # Default value to allow opting-in
11
+ @@enabled = false
9
12
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: chat_gpt_error_handler
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Nick Schwaderer
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-05-17 00:00:00.000000000 Z
11
+ date: 2023-06-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rails