scout-ai 0.2.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.vimproject +91 -10
- data/Rakefile +1 -0
- data/VERSION +1 -1
- data/bin/scout-ai +2 -0
- data/lib/scout/llm/agent/chat.rb +24 -0
- data/lib/scout/llm/agent.rb +13 -13
- data/lib/scout/llm/ask.rb +26 -16
- data/lib/scout/llm/backends/bedrock.rb +129 -0
- data/lib/scout/llm/backends/huggingface.rb +6 -21
- data/lib/scout/llm/backends/ollama.rb +69 -36
- data/lib/scout/llm/backends/openai.rb +85 -35
- data/lib/scout/llm/backends/openwebui.rb +1 -1
- data/lib/scout/llm/backends/relay.rb +3 -2
- data/lib/scout/llm/backends/responses.rb +272 -0
- data/lib/scout/llm/chat.rb +547 -0
- data/lib/scout/llm/parse.rb +70 -13
- data/lib/scout/llm/tools.rb +126 -5
- data/lib/scout/llm/utils.rb +17 -10
- data/lib/scout/model/base.rb +19 -0
- data/lib/scout/model/python/base.rb +25 -0
- data/lib/scout/model/python/huggingface/causal/next_token.rb +23 -0
- data/lib/scout/model/python/huggingface/causal.rb +29 -0
- data/lib/scout/model/python/huggingface/classification +0 -0
- data/lib/scout/model/python/huggingface/classification.rb +50 -0
- data/lib/scout/model/python/huggingface.rb +112 -0
- data/lib/scout/model/python/torch/dataloader.rb +57 -0
- data/lib/scout/model/python/torch/helpers.rb +84 -0
- data/lib/scout/model/python/torch/introspection.rb +34 -0
- data/lib/scout/model/python/torch/load_and_save.rb +47 -0
- data/lib/scout/model/python/torch.rb +94 -0
- data/lib/scout/model/util/run.rb +181 -0
- data/lib/scout/model/util/save.rb +81 -0
- data/lib/scout-ai.rb +3 -1
- data/python/scout_ai/__init__.py +35 -0
- data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
- data/python/scout_ai/atcold/__init__.py +0 -0
- data/python/scout_ai/atcold/plot_lib.py +141 -0
- data/python/scout_ai/atcold/spiral.py +27 -0
- data/python/scout_ai/huggingface/data.py +48 -0
- data/python/scout_ai/huggingface/eval.py +60 -0
- data/python/scout_ai/huggingface/model.py +29 -0
- data/python/scout_ai/huggingface/rlhf.py +83 -0
- data/python/scout_ai/huggingface/train/__init__.py +34 -0
- data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
- data/python/scout_ai/huggingface/train/next_token.py +315 -0
- data/python/scout_ai/language_model.py +70 -0
- data/python/scout_ai/util.py +32 -0
- data/scout-ai.gemspec +130 -0
- data/scout_commands/agent/ask +133 -15
- data/scout_commands/agent/kb +15 -0
- data/scout_commands/llm/ask +71 -12
- data/scout_commands/llm/process +4 -2
- data/test/data/cat.jpg +0 -0
- data/test/scout/llm/agent/test_chat.rb +14 -0
- data/test/scout/llm/backends/test_bedrock.rb +60 -0
- data/test/scout/llm/backends/test_huggingface.rb +3 -3
- data/test/scout/llm/backends/test_ollama.rb +48 -10
- data/test/scout/llm/backends/test_openai.rb +96 -11
- data/test/scout/llm/backends/test_responses.rb +115 -0
- data/test/scout/llm/test_ask.rb +1 -0
- data/test/scout/llm/test_chat.rb +214 -0
- data/test/scout/llm/test_parse.rb +81 -2
- data/test/scout/model/python/huggingface/causal/test_next_token.rb +59 -0
- data/test/scout/model/python/huggingface/test_causal.rb +33 -0
- data/test/scout/model/python/huggingface/test_classification.rb +30 -0
- data/test/scout/model/python/test_base.rb +44 -0
- data/test/scout/model/python/test_huggingface.rb +9 -0
- data/test/scout/model/python/test_torch.rb +71 -0
- data/test/scout/model/python/torch/test_helpers.rb +14 -0
- data/test/scout/model/test_base.rb +117 -0
- data/test/scout/model/util/test_save.rb +31 -0
- metadata +72 -5
- data/questions/coach +0 -2
@@ -1,8 +1,6 @@
|
|
1
1
|
require 'scout'
|
2
2
|
require 'openai'
|
3
|
-
require_relative '../
|
4
|
-
require_relative '../tools'
|
5
|
-
require_relative '../utils'
|
3
|
+
require_relative '../chat'
|
6
4
|
|
7
5
|
module LLM
|
8
6
|
module OpenAI
|
@@ -11,9 +9,44 @@ module LLM
|
|
11
9
|
Object::OpenAI::Client.new(access_token:key, log_errors: log_errors, uri_base: url)
|
12
10
|
end
|
13
11
|
|
12
|
+
def self.process_input(messages)
|
13
|
+
messages.collect do |message|
|
14
|
+
if message[:role] == 'function_call'
|
15
|
+
{role: 'assistant', tool_calls: [message[:content]]}
|
16
|
+
elsif message[:role] == 'function_call_output'
|
17
|
+
message[:content]
|
18
|
+
else
|
19
|
+
message
|
20
|
+
end
|
21
|
+
end.flatten
|
22
|
+
end
|
23
|
+
|
24
|
+
def self.process_response(response, &block)
|
25
|
+
Log.debug "Respose: #{Log.fingerprint response}"
|
26
|
+
raise Exception, response["error"] if response["error"]
|
27
|
+
|
28
|
+
message = response.dig("choices", 0, "message")
|
29
|
+
tool_calls = response.dig("choices", 0, "tool_calls") ||
|
30
|
+
response.dig("choices", 0, "message", "tool_calls")
|
31
|
+
|
32
|
+
if tool_calls && tool_calls.any?
|
33
|
+
LLM.call_tools tool_calls, &block
|
34
|
+
else
|
35
|
+
[message]
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
14
39
|
def self.ask(question, options = {}, &block)
|
40
|
+
original_options = options.dup
|
15
41
|
|
16
|
-
|
42
|
+
messages = LLM.chat(question)
|
43
|
+
options = options.merge LLM.options messages
|
44
|
+
tools = LLM.tools messages
|
45
|
+
associations = LLM.associations messages
|
46
|
+
|
47
|
+
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
48
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
49
|
+
log_errors: true
|
17
50
|
|
18
51
|
if client.nil?
|
19
52
|
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
@@ -23,45 +56,62 @@ module LLM
|
|
23
56
|
|
24
57
|
if model.nil?
|
25
58
|
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
26
|
-
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-
|
59
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
|
27
60
|
end
|
28
61
|
|
29
|
-
role = IndiferentHash.process_options options, :role
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
62
|
+
#role = IndiferentHash.process_options options, :role
|
63
|
+
|
64
|
+
case format.to_sym
|
65
|
+
when :json, :json_object
|
66
|
+
options[:response_format] = {type: 'json_object'}
|
67
|
+
else
|
68
|
+
options[:response_format] = {type: format}
|
69
|
+
end if format
|
70
|
+
|
71
|
+
parameters = options.merge(model: model)
|
72
|
+
|
73
|
+
if tools.any? || associations.any?
|
74
|
+
parameters[:tools] = []
|
75
|
+
parameters[:tools] += tools.values.collect{|a| a.last } if tools
|
76
|
+
parameters[:tools] += associations.values.collect{|a| a.last } if associations
|
77
|
+
if not block_given?
|
78
|
+
block = Proc.new do |name,parameters|
|
79
|
+
IndiferentHash.setup parameters
|
80
|
+
if tools[name]
|
81
|
+
workflow = tools[name].first
|
82
|
+
jobname = parameters.delete :jobname
|
83
|
+
workflow.job(name, jobname, parameters).run
|
84
|
+
else
|
85
|
+
kb = associations[name].first
|
86
|
+
entities, reverse = IndiferentHash.process_options parameters, :entities, :reverse
|
87
|
+
if reverse
|
88
|
+
kb.parents(name, entities)
|
89
|
+
else
|
90
|
+
kb.children(name, entities)
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
42
96
|
|
43
|
-
|
97
|
+
Log.low "Calling openai #{url}: #{Log.fingerprint parameters}}"
|
98
|
+
Log.debug LLM.print messages
|
44
99
|
|
45
|
-
|
46
|
-
messages << message
|
100
|
+
parameters[:messages] = LLM.tools_to_openai messages
|
47
101
|
|
48
|
-
|
49
|
-
tool_calls.each do |tool_call|
|
50
|
-
response_message = LLM.tool_response(tool_call, &block)
|
51
|
-
messages << response_message
|
52
|
-
end
|
102
|
+
response = self.process_response client.chat(parameters: parameters), &block
|
53
103
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
104
|
+
res = if response.last[:role] == 'function_call_output'
|
105
|
+
response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: parameters[:tools]), &block)
|
106
|
+
else
|
107
|
+
response
|
108
|
+
end
|
58
109
|
|
59
|
-
|
60
|
-
|
61
|
-
|
110
|
+
if return_messages
|
111
|
+
res
|
112
|
+
else
|
113
|
+
res.last['content']
|
62
114
|
end
|
63
|
-
|
64
|
-
message.dig("content")
|
65
115
|
end
|
66
116
|
|
67
117
|
def self.embed(text, options = {})
|
@@ -31,7 +31,7 @@ module LLM
|
|
31
31
|
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENWEBUI_MODEL')
|
32
32
|
|
33
33
|
role = IndiferentHash.process_options options, :role
|
34
|
-
messages = LLM.
|
34
|
+
messages = LLM.messages(question, role)
|
35
35
|
|
36
36
|
parameters = options.merge(model: model, messages: messages)
|
37
37
|
|
@@ -24,9 +24,10 @@ module LLM
|
|
24
24
|
end
|
25
25
|
|
26
26
|
def self.ask(question, options = {}, &block)
|
27
|
-
|
27
|
+
server = IndiferentHash.process_options options, :server
|
28
|
+
server ||= Scout::Config.get :server, :ask_relay, :relay, :ask, env: 'ASK_ENDPOINT,LLM_ENDPOINT', default: :openai
|
28
29
|
|
29
|
-
|
30
|
+
options[:question] = question
|
30
31
|
TmpFile.with_file(options.to_json) do |file|
|
31
32
|
id = upload(server, file)
|
32
33
|
gather(server, id)
|
@@ -0,0 +1,272 @@
|
|
1
|
+
require_relative 'openai'
|
2
|
+
|
3
|
+
module LLM
|
4
|
+
module Responses
|
5
|
+
def self.encode_image(path)
|
6
|
+
path = path.find if Path === path
|
7
|
+
file_content = File.binread(path) # Replace with your file name
|
8
|
+
|
9
|
+
case extension = path.split('.').last.downcase
|
10
|
+
when 'jpg', 'jpeg'
|
11
|
+
mime = "image/jpeg"
|
12
|
+
when 'png'
|
13
|
+
mime = "image/png"
|
14
|
+
else
|
15
|
+
mime = "image/extension"
|
16
|
+
end
|
17
|
+
|
18
|
+
base64_image = Base64.strict_encode64(file_content)
|
19
|
+
|
20
|
+
"data:#{mime};base64,#{base64_image}"
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.encode_pdf(path)
|
24
|
+
file_content = File.binread(path) # Replace with your file name
|
25
|
+
Base64.strict_encode64(file_content)
|
26
|
+
end
|
27
|
+
def self.tool_response(tool_call, &block)
|
28
|
+
tool_call_id = tool_call.dig("id").sub(/^fc_/, '')
|
29
|
+
function_name = tool_call.dig("function", "name")
|
30
|
+
function_arguments = tool_call.dig("function", "arguments")
|
31
|
+
function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
|
32
|
+
IndiferentHash.setup function_arguments
|
33
|
+
function_response = block.call function_name, function_arguments
|
34
|
+
|
35
|
+
content = case function_response
|
36
|
+
when nil
|
37
|
+
"success"
|
38
|
+
else
|
39
|
+
function_response
|
40
|
+
end
|
41
|
+
content = content.to_s if Numeric === content
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.tools_to_responses(messages)
|
45
|
+
messages.collect do |message|
|
46
|
+
if message[:role] == 'function_call'
|
47
|
+
info = JSON.parse(message[:content])
|
48
|
+
IndiferentHash.setup info
|
49
|
+
id = info[:id].sub(/^fc_/, '')
|
50
|
+
IndiferentHash.setup({
|
51
|
+
"type" => "function_call",
|
52
|
+
"status" => "completed",
|
53
|
+
"name" => info[:name],
|
54
|
+
"arguments" => (info[:arguments] || {}).to_json,
|
55
|
+
"call_id"=>"call_#{id}",
|
56
|
+
})
|
57
|
+
elsif message[:role] == 'function_call_output'
|
58
|
+
info = JSON.parse(message[:content])
|
59
|
+
IndiferentHash.setup info
|
60
|
+
id = info[:id].sub(/^fc_/, '')
|
61
|
+
{ # append result message
|
62
|
+
"type" => "function_call_output",
|
63
|
+
"output" => info[:content],
|
64
|
+
"call_id"=>"call_#{id}",
|
65
|
+
}
|
66
|
+
else
|
67
|
+
message
|
68
|
+
end
|
69
|
+
end.flatten
|
70
|
+
end
|
71
|
+
|
72
|
+
def self.process_response(response, &block)
|
73
|
+
Log.debug "Respose: #{Log.fingerprint response}"
|
74
|
+
|
75
|
+
response['output'].collect do |output|
|
76
|
+
case output['type']
|
77
|
+
when 'message'
|
78
|
+
output['content'].collect do |content|
|
79
|
+
case content['type']
|
80
|
+
when 'output_text'
|
81
|
+
IndiferentHash.setup({role: 'assistant', content: content['text']})
|
82
|
+
end
|
83
|
+
end
|
84
|
+
when 'function_call'
|
85
|
+
LLM.call_tools [output], &block
|
86
|
+
when 'web_search_call'
|
87
|
+
next
|
88
|
+
else
|
89
|
+
eee output
|
90
|
+
raise
|
91
|
+
end
|
92
|
+
end.compact.flatten
|
93
|
+
end
|
94
|
+
|
95
|
+
def self.process_input(messages)
|
96
|
+
messages = self.tools_to_responses messages
|
97
|
+
|
98
|
+
messages.collect do |message|
|
99
|
+
IndiferentHash.setup(message)
|
100
|
+
if message[:role] == 'image'
|
101
|
+
path = message[:content]
|
102
|
+
if Open.remote?(path)
|
103
|
+
{role: :user, content: {type: :input_image, image_url: path }}
|
104
|
+
elsif Open.exists?(path)
|
105
|
+
path = self.encode_image(path)
|
106
|
+
{role: :user, content: [{type: :input_image, image_url: path }]}
|
107
|
+
else
|
108
|
+
raise
|
109
|
+
end
|
110
|
+
elsif message[:role] == 'pdf'
|
111
|
+
path = message[:content]
|
112
|
+
if Open.remote?(path)
|
113
|
+
{role: :user, content: {type: :input_file, file_url: path }}
|
114
|
+
elsif Open.exists?(path)
|
115
|
+
data = self.encode_pdf(path)
|
116
|
+
{role: :user, content: [{type: :input_file, file_data: data }]}
|
117
|
+
else
|
118
|
+
raise
|
119
|
+
end
|
120
|
+
elsif message[:role] == 'websearch'
|
121
|
+
{role: :tool, content: {type: "web_search_preview"} }
|
122
|
+
else
|
123
|
+
message
|
124
|
+
end
|
125
|
+
end.flatten
|
126
|
+
end
|
127
|
+
|
128
|
+
def self.ask(question, options = {}, &block)
|
129
|
+
original_options = options.dup
|
130
|
+
|
131
|
+
messages = LLM.chat(question)
|
132
|
+
options = options.merge LLM.options messages
|
133
|
+
tools = LLM.tools messages
|
134
|
+
associations = LLM.associations messages
|
135
|
+
|
136
|
+
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
137
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
138
|
+
log_errors: true
|
139
|
+
|
140
|
+
if client.nil?
|
141
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
142
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
143
|
+
client = LLM::OpenAI.client url, key, log_errors
|
144
|
+
end
|
145
|
+
|
146
|
+
if model.nil?
|
147
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
148
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
|
149
|
+
end
|
150
|
+
|
151
|
+
case format
|
152
|
+
when :json, :json_object, "json", "json_object"
|
153
|
+
options['text'] = {format: {type: 'json_object'}}
|
154
|
+
when String, Symbol
|
155
|
+
options['text'] = {format: {type: format}}
|
156
|
+
when Hash
|
157
|
+
if format.include?('format')
|
158
|
+
options['text'] = format
|
159
|
+
elsif format['type'] == 'json_schema'
|
160
|
+
options['text'] = {format: format}
|
161
|
+
else
|
162
|
+
options['text'] = {format: {name: "response_schema",
|
163
|
+
type: "json_schema",
|
164
|
+
additionalProperties: false,
|
165
|
+
required: format['properties'].keys,
|
166
|
+
schema: format,
|
167
|
+
}}
|
168
|
+
end
|
169
|
+
end if format
|
170
|
+
|
171
|
+
parameters = options.merge(model: model)
|
172
|
+
|
173
|
+
if tools.any? || associations.any?
|
174
|
+
parameters[:tools] ||= []
|
175
|
+
parameters[:tools] += tools.values.collect{|a| a.last } if tools
|
176
|
+
parameters[:tools] += associations.values.collect{|a| a.last } if associations
|
177
|
+
parameters[:tools] = parameters[:tools].collect{|tool|
|
178
|
+
function = tool.delete :function;
|
179
|
+
tool.merge function
|
180
|
+
}
|
181
|
+
if not block_given?
|
182
|
+
block = Proc.new do |name,parameters|
|
183
|
+
IndiferentHash.setup parameters
|
184
|
+
if tools[name]
|
185
|
+
workflow = tools[name].first
|
186
|
+
jobname = parameters.delete :jobname
|
187
|
+
workflow.job(name, jobname, parameters).run
|
188
|
+
else
|
189
|
+
kb = associations[name].first
|
190
|
+
entities, reverse = IndiferentHash.process_options parameters, :entities, :reverse
|
191
|
+
if reverse
|
192
|
+
kb.parents(name, entities)
|
193
|
+
else
|
194
|
+
kb.children(name, entities)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
Log.low "Calling client with parameters #{Log.fingerprint parameters}\n#{LLM.print messages}"
|
202
|
+
|
203
|
+
messages = self.process_input messages
|
204
|
+
input = []
|
205
|
+
messages.each do |message|
|
206
|
+
parameters[:tools] ||= []
|
207
|
+
if message[:role].to_s == 'tool'
|
208
|
+
parameters[:tools] << message[:content]
|
209
|
+
else
|
210
|
+
input << message
|
211
|
+
end
|
212
|
+
end
|
213
|
+
parameters[:input] = input
|
214
|
+
|
215
|
+
response = client.responses.create(parameters: parameters)
|
216
|
+
response = self.process_response response, &block
|
217
|
+
|
218
|
+
res = if response.last[:role] == 'function_call_output'
|
219
|
+
response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: parameters[:tools]), &block)
|
220
|
+
else
|
221
|
+
response
|
222
|
+
end
|
223
|
+
|
224
|
+
if return_messages
|
225
|
+
res
|
226
|
+
else
|
227
|
+
res.last['content']
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
end
|
232
|
+
|
233
|
+
def self.image(question, options = {}, &block)
|
234
|
+
original_options = options.dup
|
235
|
+
|
236
|
+
messages = LLM.chat(question)
|
237
|
+
options = options.merge LLM.options messages
|
238
|
+
tools = LLM.tools messages
|
239
|
+
associations = LLM.associations messages
|
240
|
+
|
241
|
+
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
242
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
243
|
+
log_errors: true
|
244
|
+
|
245
|
+
if client.nil?
|
246
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
247
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
248
|
+
client = LLM::OpenAI.client url, key, log_errors
|
249
|
+
end
|
250
|
+
|
251
|
+
if model.nil?
|
252
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
253
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-image-1")
|
254
|
+
end
|
255
|
+
|
256
|
+
messages = self.process_input messages
|
257
|
+
input = []
|
258
|
+
messages.each do |message|
|
259
|
+
parameters[:tools] ||= []
|
260
|
+
if message[:role].to_s == 'tool'
|
261
|
+
parameters[:tools] << message[:content]
|
262
|
+
else
|
263
|
+
input << message
|
264
|
+
end
|
265
|
+
end
|
266
|
+
parameters[:prompt] = LLM.print(input)
|
267
|
+
|
268
|
+
response = client.images.generate(parameters: parameters)
|
269
|
+
|
270
|
+
response[0]['b64_json']
|
271
|
+
end
|
272
|
+
end
|