ai-chat 0.5.0 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ai-chat.gemspec +1 -1
- data/lib/ai/chat.rb +90 -204
- metadata +4 -5
- data/lib/ai/http.rb +0 -45
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 77efcbc43e3402b184ce49ff870671796904a0935a37d55dd9eeaaea93572b53
|
|
4
|
+
data.tar.gz: 8996a14db31e7455815d10acac9827658fcbaf5358e1a461d2b83a946b8f3cc9
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: b1993b6e812a75387b53ade7f29785a80aa0c70c7b6aa97d5ac9eed7e73fc27a87f1a76676e9878416849af3c9ae3967d6b78838be0c681f077fa63c81e32993
|
|
7
|
+
data.tar.gz: 0f463e8beef43965db7b85b94335624322f46bc39cc4dd2c1a02ad44993ae972bdbfb4a670890b3f55ddf9ce5ebf57fd8b2e92f85fd3340a2be1d7aa6d1ca549
|
data/ai-chat.gemspec
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
Gem::Specification.new do |spec|
|
|
4
4
|
spec.name = "ai-chat"
|
|
5
|
-
spec.version = "0.5.
|
|
5
|
+
spec.version = "0.5.1"
|
|
6
6
|
spec.authors = ["Raghu Betina", "Jelani Woods"]
|
|
7
7
|
spec.email = ["raghu@firstdraft.com", "jelani@firstdraft.com"]
|
|
8
8
|
spec.homepage = "https://github.com/firstdraft/ai-chat"
|
data/lib/ai/chat.rb
CHANGED
|
@@ -11,8 +11,6 @@ require "fileutils"
|
|
|
11
11
|
require "tty-spinner"
|
|
12
12
|
require "timeout"
|
|
13
13
|
|
|
14
|
-
require_relative "http"
|
|
15
|
-
|
|
16
14
|
module AI
|
|
17
15
|
# :reek:MissingSafeMethod { exclude: [ generate! ] }
|
|
18
16
|
# :reek:TooManyMethods
|
|
@@ -20,24 +18,23 @@ module AI
|
|
|
20
18
|
# :reek:InstanceVariableAssumption
|
|
21
19
|
# :reek:IrresponsibleModule
|
|
22
20
|
class Chat
|
|
23
|
-
include AI::Http
|
|
24
|
-
|
|
25
21
|
# :reek:Attribute
|
|
26
|
-
attr_accessor :background, :code_interpreter, :conversation_id, :image_generation, :image_folder, :messages, :model, :
|
|
27
|
-
attr_reader :client, :last_response_id, :schema, :schema_file
|
|
22
|
+
attr_accessor :background, :code_interpreter, :conversation_id, :image_generation, :image_folder, :messages, :model, :reasoning_effort, :web_search
|
|
23
|
+
attr_reader :client, :last_response_id, :proxy, :schema, :schema_file
|
|
28
24
|
|
|
29
|
-
|
|
25
|
+
BASE_PROXY_URL = "https://prepend.me/api.openai.com/v1"
|
|
30
26
|
|
|
31
27
|
def initialize(api_key: nil, api_key_env_var: "OPENAI_API_KEY")
|
|
32
28
|
@api_key = api_key || ENV.fetch(api_key_env_var)
|
|
29
|
+
@proxy = false
|
|
33
30
|
@messages = []
|
|
34
31
|
@reasoning_effort = nil
|
|
35
32
|
@model = "gpt-5.2"
|
|
36
33
|
@client = OpenAI::Client.new(api_key: @api_key)
|
|
37
34
|
@last_response_id = nil
|
|
38
|
-
@proxy = false
|
|
39
35
|
@image_generation = false
|
|
40
36
|
@image_folder = "./images"
|
|
37
|
+
@api_key_validated = false
|
|
41
38
|
end
|
|
42
39
|
|
|
43
40
|
def self.generate_schema!(description, location: "schema.json", api_key: nil, api_key_env_var: "OPENAI_API_KEY", proxy: false)
|
|
@@ -45,34 +42,25 @@ module AI
|
|
|
45
42
|
prompt_path = File.expand_path("../prompts/schema_generator.md", __dir__)
|
|
46
43
|
system_prompt = File.read(prompt_path)
|
|
47
44
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
input: [
|
|
53
|
-
{role: :system, content: system_prompt},
|
|
54
|
-
{role: :user, content: description}
|
|
55
|
-
],
|
|
56
|
-
text: {format: {type: "json_object"}},
|
|
57
|
-
reasoning: {effort: "high"}
|
|
58
|
-
}
|
|
45
|
+
options = {
|
|
46
|
+
api_key: api_key,
|
|
47
|
+
base_url: proxy ? BASE_PROXY_URL : nil
|
|
48
|
+
}.compact
|
|
59
49
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
50
|
+
client = OpenAI::Client.new(**options)
|
|
51
|
+
response = client.responses.create(
|
|
52
|
+
model: "gpt-5.2",
|
|
53
|
+
input: [
|
|
54
|
+
{role: :system, content: system_prompt},
|
|
55
|
+
{role: :user, content: description}
|
|
56
|
+
],
|
|
57
|
+
text: {format: {type: "json_object"}},
|
|
58
|
+
reasoning: {effort: "high"}
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
output_text = response.output_text
|
|
62
|
+
json = JSON.parse(output_text)
|
|
72
63
|
|
|
73
|
-
output_text = response.output_text
|
|
74
|
-
JSON.parse(output_text)
|
|
75
|
-
end
|
|
76
64
|
content = JSON.pretty_generate(json)
|
|
77
65
|
if location
|
|
78
66
|
path = Pathname.new(location)
|
|
@@ -146,7 +134,7 @@ module AI
|
|
|
146
134
|
# :reek:NilCheck
|
|
147
135
|
# :reek:TooManyStatements
|
|
148
136
|
def generate!
|
|
149
|
-
validate_api_key
|
|
137
|
+
validate_api_key unless @api_key_validated
|
|
150
138
|
response = create_response
|
|
151
139
|
parse_response(response)
|
|
152
140
|
|
|
@@ -167,6 +155,19 @@ module AI
|
|
|
167
155
|
parse_response(response)
|
|
168
156
|
end
|
|
169
157
|
|
|
158
|
+
def proxy=(value)
|
|
159
|
+
@proxy = value
|
|
160
|
+
if value
|
|
161
|
+
@client = OpenAI::Client.new(
|
|
162
|
+
api_key: @api_key,
|
|
163
|
+
base_url: BASE_PROXY_URL
|
|
164
|
+
)
|
|
165
|
+
else
|
|
166
|
+
@client = OpenAI::Client.new(api_key: @api_key)
|
|
167
|
+
end
|
|
168
|
+
value
|
|
169
|
+
end
|
|
170
|
+
|
|
170
171
|
def schema=(value)
|
|
171
172
|
if value.is_a?(String)
|
|
172
173
|
parsed = JSON.parse(value, symbolize_names: true)
|
|
@@ -191,29 +192,7 @@ module AI
|
|
|
191
192
|
def get_items(order: :asc)
|
|
192
193
|
raise "No conversation_id set. Call generate! first to create a conversation." unless conversation_id
|
|
193
194
|
|
|
194
|
-
raw_items =
|
|
195
|
-
uri = URI(PROXY_URL + "api.openai.com/v1/conversations/#{conversation_id}/items?order=#{order}")
|
|
196
|
-
response_hash = send_request(uri, content_type: "json", method: "get")
|
|
197
|
-
|
|
198
|
-
if response_hash.key?(:data)
|
|
199
|
-
response_hash.dig(:data).map do |hash|
|
|
200
|
-
# Transform values to allow expected symbols that non-proxied request returns
|
|
201
|
-
|
|
202
|
-
hash.transform_values! do |value|
|
|
203
|
-
if hash.key(value) == :type
|
|
204
|
-
value.to_sym
|
|
205
|
-
else
|
|
206
|
-
value
|
|
207
|
-
end
|
|
208
|
-
end
|
|
209
|
-
end
|
|
210
|
-
response_hash
|
|
211
|
-
end
|
|
212
|
-
# Convert to Struct to allow same interface as non-proxied request
|
|
213
|
-
create_deep_struct(response_hash)
|
|
214
|
-
else
|
|
215
|
-
client.conversations.items.list(conversation_id, order: order)
|
|
216
|
-
end
|
|
195
|
+
raw_items = client.conversations.items.list(conversation_id, order: order)
|
|
217
196
|
|
|
218
197
|
Items.new(raw_items, conversation_id: conversation_id)
|
|
219
198
|
end
|
|
@@ -280,14 +259,8 @@ module AI
|
|
|
280
259
|
end
|
|
281
260
|
|
|
282
261
|
def create_conversation
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
response = send_request(uri, content_type: "json", method: "post")
|
|
286
|
-
response.dig(:id)
|
|
287
|
-
else
|
|
288
|
-
conversation = client.conversations.create
|
|
289
|
-
conversation.id
|
|
290
|
-
end
|
|
262
|
+
conversation = client.conversations.create
|
|
263
|
+
self.conversation_id = conversation.id
|
|
291
264
|
end
|
|
292
265
|
|
|
293
266
|
# :reek:TooManyStatements
|
|
@@ -307,50 +280,22 @@ module AI
|
|
|
307
280
|
messages_to_send = prepare_messages_for_api
|
|
308
281
|
parameters[:input] = strip_responses(messages_to_send) unless messages_to_send.empty?
|
|
309
282
|
|
|
310
|
-
|
|
311
|
-
uri = URI(PROXY_URL + "api.openai.com/v1/responses")
|
|
312
|
-
send_request(uri, content_type: "json", parameters: parameters, method: "post")
|
|
313
|
-
else
|
|
314
|
-
client.responses.create(**parameters)
|
|
315
|
-
end
|
|
283
|
+
client.responses.create(**parameters)
|
|
316
284
|
end
|
|
317
285
|
|
|
318
286
|
# :reek:NilCheck
|
|
319
287
|
# :reek:TooManyStatements
|
|
320
288
|
def parse_response(response)
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
289
|
+
text_response = response.output_text
|
|
290
|
+
response_id = response.id
|
|
291
|
+
response_status = response.status
|
|
292
|
+
response_model = response.model
|
|
293
|
+
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
325
294
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
end.flatten
|
|
329
|
-
|
|
330
|
-
output_texts = message_contents.select do |content|
|
|
331
|
-
content[:type] == "output_text"
|
|
332
|
-
end
|
|
333
|
-
|
|
334
|
-
text_response = output_texts.map { |output| output[:text] }.join
|
|
335
|
-
response_id = response.dig(:id)
|
|
336
|
-
response_status = response.dig(:status).to_sym
|
|
337
|
-
response_model = response.dig(:model)
|
|
338
|
-
response_usage = response.dig(:usage)&.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
339
|
-
|
|
340
|
-
if response.key?(:conversation)
|
|
341
|
-
self.conversation_id = response.dig(:conversation, :id)
|
|
342
|
-
end
|
|
343
|
-
else
|
|
344
|
-
text_response = response.output_text
|
|
345
|
-
response_id = response.id
|
|
346
|
-
response_status = response.status
|
|
347
|
-
response_model = response.model
|
|
348
|
-
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
349
|
-
|
|
350
|
-
if response.conversation
|
|
351
|
-
self.conversation_id = response.conversation.id
|
|
352
|
-
end
|
|
295
|
+
if response.conversation
|
|
296
|
+
self.conversation_id = response.conversation.id
|
|
353
297
|
end
|
|
298
|
+
|
|
354
299
|
image_filenames = extract_and_save_images(response) + extract_and_save_files(response)
|
|
355
300
|
|
|
356
301
|
chat_response = {
|
|
@@ -579,30 +524,20 @@ module AI
|
|
|
579
524
|
def extract_and_save_images(response)
|
|
580
525
|
image_filenames = []
|
|
581
526
|
|
|
582
|
-
image_outputs =
|
|
583
|
-
response.dig(:output).select { |output|
|
|
584
|
-
output.dig(:type) == "image_generation_call"
|
|
585
|
-
}
|
|
586
|
-
else
|
|
587
|
-
response.output.select { |output|
|
|
527
|
+
image_outputs = response.output.select { |output|
|
|
588
528
|
output.respond_to?(:type) && output.type == :image_generation_call
|
|
589
529
|
}
|
|
590
|
-
end
|
|
591
530
|
|
|
592
531
|
return image_filenames if image_outputs.empty?
|
|
593
532
|
|
|
594
|
-
response_id =
|
|
533
|
+
response_id = response.id
|
|
595
534
|
subfolder_path = create_images_folder(response_id)
|
|
596
535
|
|
|
597
536
|
image_outputs.each_with_index do |output, index|
|
|
598
|
-
|
|
599
|
-
next unless output.key?(:result) && output.dig(:result)
|
|
600
|
-
else
|
|
601
|
-
next unless output.respond_to?(:result) && output.result
|
|
602
|
-
end
|
|
537
|
+
next unless output.respond_to?(:result) && output.result
|
|
603
538
|
|
|
604
539
|
warn_if_file_fails_to_save do
|
|
605
|
-
result =
|
|
540
|
+
result = output.result
|
|
606
541
|
image_data = Base64.strict_decode64(result)
|
|
607
542
|
|
|
608
543
|
filename = "#{(index + 1).to_s.rjust(3, "0")}.png"
|
|
@@ -634,27 +569,20 @@ module AI
|
|
|
634
569
|
end
|
|
635
570
|
|
|
636
571
|
def validate_api_key
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
It looks like you're using an official API key from OpenAI with proxying enabled. When proxying is enabled you must use an OpenAI API key from prepend.me. Please disable proxy or update your API key before generating a response.
|
|
572
|
+
# Simple API call to validate the token
|
|
573
|
+
client.models.list
|
|
574
|
+
@api_key_validated = true
|
|
575
|
+
rescue OpenAI::Errors::AuthenticationError
|
|
576
|
+
message = if proxy
|
|
577
|
+
<<~STRING
|
|
578
|
+
It looks like you're using an invalid API key. Proxying is enabled, so you must use an OpenAI API key from prepend.me. Please disable proxy or update your API key before generating a response.
|
|
645
579
|
STRING
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
It looks like you're using an
|
|
649
|
-
|
|
650
|
-
Example:
|
|
651
|
-
|
|
652
|
-
chat = AI::Chat.new
|
|
653
|
-
chat.proxy = true
|
|
654
|
-
chat.user(...)
|
|
655
|
-
chat.generate!
|
|
580
|
+
else
|
|
581
|
+
<<~STRING
|
|
582
|
+
It looks like you're using an invalid API key. Check to make sure your API key is valid before generating a response.
|
|
656
583
|
STRING
|
|
657
584
|
end
|
|
585
|
+
raise WrongAPITokenUsedError, message, cause: nil
|
|
658
586
|
end
|
|
659
587
|
|
|
660
588
|
# :reek:FeatureEnvy
|
|
@@ -664,72 +592,40 @@ module AI
|
|
|
664
592
|
def extract_and_save_files(response)
|
|
665
593
|
filenames = []
|
|
666
594
|
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
end
|
|
595
|
+
message_outputs = response.output.select do |output|
|
|
596
|
+
output.respond_to?(:type) && output.type == :message
|
|
597
|
+
end
|
|
671
598
|
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
end
|
|
676
|
-
end.compact
|
|
677
|
-
else
|
|
678
|
-
message_outputs = response.output.select do |output|
|
|
679
|
-
output.respond_to?(:type) && output.type == :message
|
|
599
|
+
outputs_with_annotations = message_outputs.map do |message|
|
|
600
|
+
message.content.find do |content|
|
|
601
|
+
content.respond_to?(:annotations) && content.annotations.length.positive?
|
|
680
602
|
end
|
|
681
|
-
|
|
682
|
-
outputs_with_annotations = message_outputs.map do |message|
|
|
683
|
-
message.content.find do |content|
|
|
684
|
-
content.respond_to?(:annotations) && content.annotations.length.positive?
|
|
685
|
-
end
|
|
686
|
-
end.compact
|
|
687
|
-
end
|
|
603
|
+
end.compact
|
|
688
604
|
|
|
689
605
|
return filenames if outputs_with_annotations.empty?
|
|
690
606
|
|
|
691
|
-
response_id =
|
|
607
|
+
response_id = response.id
|
|
692
608
|
subfolder_path = create_images_folder(response_id)
|
|
693
609
|
|
|
694
|
-
|
|
695
|
-
annotations
|
|
696
|
-
|
|
697
|
-
annotation.key?(:filename)
|
|
698
|
-
end
|
|
699
|
-
end.compact
|
|
700
|
-
|
|
701
|
-
annotations.each do |annotation|
|
|
702
|
-
container_id = annotation.dig(:container_id)
|
|
703
|
-
file_id = annotation.dig(:file_id)
|
|
704
|
-
filename = annotation.dig(:filename)
|
|
705
|
-
|
|
706
|
-
warn_if_file_fails_to_save do
|
|
707
|
-
file_content = retrieve_file(file_id, container_id: container_id)
|
|
708
|
-
file_path = File.join(subfolder_path, filename)
|
|
709
|
-
File.binwrite(file_path, file_content)
|
|
710
|
-
filenames << file_path
|
|
711
|
-
end
|
|
610
|
+
annotations = outputs_with_annotations.map do |output|
|
|
611
|
+
output.annotations.find do |annotation|
|
|
612
|
+
annotation.respond_to?(:filename)
|
|
712
613
|
end
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
warn_if_file_fails_to_save do
|
|
726
|
-
file_content = retrieve_file(file_id, container_id: container_id)
|
|
727
|
-
file_path = File.join(subfolder_path, filename)
|
|
728
|
-
File.binwrite(file_path, file_content.read)
|
|
729
|
-
filenames << file_path
|
|
730
|
-
end
|
|
614
|
+
end.compact
|
|
615
|
+
|
|
616
|
+
annotations.each do |annotation|
|
|
617
|
+
container_id = annotation.container_id
|
|
618
|
+
file_id = annotation.file_id
|
|
619
|
+
filename = annotation.filename
|
|
620
|
+
|
|
621
|
+
warn_if_file_fails_to_save do
|
|
622
|
+
file_content = retrieve_file(file_id, container_id: container_id)
|
|
623
|
+
file_path = File.join(subfolder_path, filename)
|
|
624
|
+
File.binwrite(file_path, file_content.read)
|
|
625
|
+
filenames << file_path
|
|
731
626
|
end
|
|
732
627
|
end
|
|
628
|
+
|
|
733
629
|
filenames
|
|
734
630
|
end
|
|
735
631
|
|
|
@@ -790,22 +686,12 @@ module AI
|
|
|
790
686
|
end
|
|
791
687
|
|
|
792
688
|
def retrieve_response(response_id)
|
|
793
|
-
|
|
794
|
-
uri = URI(PROXY_URL + "api.openai.com/v1/responses/#{response_id}")
|
|
795
|
-
send_request(uri, content_type: "json", method: "get")
|
|
796
|
-
else
|
|
797
|
-
client.responses.retrieve(response_id)
|
|
798
|
-
end
|
|
689
|
+
client.responses.retrieve(response_id)
|
|
799
690
|
end
|
|
800
691
|
|
|
801
692
|
def retrieve_file(file_id, container_id: nil)
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
send_request(uri, method: "get")
|
|
805
|
-
else
|
|
806
|
-
container_content = client.containers.files.content
|
|
807
|
-
container_content.retrieve(file_id, container_id: container_id)
|
|
808
|
-
end
|
|
693
|
+
container_content = client.containers.files.content
|
|
694
|
+
container_content.retrieve(file_id, container_id: container_id)
|
|
809
695
|
end
|
|
810
696
|
end
|
|
811
697
|
end
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: ai-chat
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.5.
|
|
4
|
+
version: 0.5.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Raghu Betina
|
|
8
8
|
- Jelani Woods
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date:
|
|
11
|
+
date: 2026-01-29 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: openai
|
|
@@ -148,8 +148,8 @@ email:
|
|
|
148
148
|
executables: []
|
|
149
149
|
extensions: []
|
|
150
150
|
extra_rdoc_files:
|
|
151
|
-
- LICENSE
|
|
152
151
|
- README.md
|
|
152
|
+
- LICENSE
|
|
153
153
|
files:
|
|
154
154
|
- LICENSE
|
|
155
155
|
- README.md
|
|
@@ -157,7 +157,6 @@ files:
|
|
|
157
157
|
- lib/ai-chat.rb
|
|
158
158
|
- lib/ai/amazing_print.rb
|
|
159
159
|
- lib/ai/chat.rb
|
|
160
|
-
- lib/ai/http.rb
|
|
161
160
|
- lib/ai/items.rb
|
|
162
161
|
- lib/ai/message.rb
|
|
163
162
|
- lib/prompts/schema_generator.md
|
|
@@ -185,7 +184,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
185
184
|
- !ruby/object:Gem::Version
|
|
186
185
|
version: '0'
|
|
187
186
|
requirements: []
|
|
188
|
-
rubygems_version:
|
|
187
|
+
rubygems_version: 3.6.2
|
|
189
188
|
specification_version: 4
|
|
190
189
|
summary: A beginner-friendly Ruby interface for OpenAI's API
|
|
191
190
|
test_files: []
|
data/lib/ai/http.rb
DELETED
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
require "net/http"
|
|
2
|
-
module AI
|
|
3
|
-
module Http
|
|
4
|
-
def send_request(uri, method:, content_type: nil, parameters: nil)
|
|
5
|
-
Net::HTTP.start(uri.host, 443, use_ssl: true) do |http|
|
|
6
|
-
headers = {
|
|
7
|
-
"Authorization" => "Bearer #{@api_key}"
|
|
8
|
-
}
|
|
9
|
-
if content_type
|
|
10
|
-
headers.store("Content-Type", "application/json")
|
|
11
|
-
end
|
|
12
|
-
net_http_method = "Net::HTTP::#{method.downcase.capitalize}"
|
|
13
|
-
client = Kernel.const_get(net_http_method)
|
|
14
|
-
request = client.new(uri, headers)
|
|
15
|
-
|
|
16
|
-
if parameters
|
|
17
|
-
request.body = parameters.to_json
|
|
18
|
-
end
|
|
19
|
-
response = http.request(request)
|
|
20
|
-
|
|
21
|
-
# Handle proxy server 503 HTML response
|
|
22
|
-
begin
|
|
23
|
-
if content_type
|
|
24
|
-
return JSON.parse(response.body, symbolize_names: true)
|
|
25
|
-
else
|
|
26
|
-
return response.body
|
|
27
|
-
end
|
|
28
|
-
rescue JSON::ParserError, TypeError => e
|
|
29
|
-
raise JSON::ParserError, "Failed to parse response from proxy: #{e.message}"
|
|
30
|
-
end
|
|
31
|
-
end
|
|
32
|
-
end
|
|
33
|
-
|
|
34
|
-
def create_deep_struct(value)
|
|
35
|
-
case value
|
|
36
|
-
when Hash
|
|
37
|
-
OpenStruct.new(value.transform_values { |hash_value| send __method__, hash_value })
|
|
38
|
-
when Array
|
|
39
|
-
value.map { |element| send __method__, element }
|
|
40
|
-
else
|
|
41
|
-
value
|
|
42
|
-
end
|
|
43
|
-
end
|
|
44
|
-
end
|
|
45
|
-
end
|