sublayer 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/sublayer/logging/base.rb +9 -0
- data/lib/sublayer/logging/debug_logger.rb +10 -0
- data/lib/sublayer/logging/json_logger.rb +20 -0
- data/lib/sublayer/logging/null_logger.rb +9 -0
- data/lib/sublayer/providers/claude.rb +32 -2
- data/lib/sublayer/providers/gemini.rb +25 -3
- data/lib/sublayer/providers/open_ai.rb +29 -3
- data/lib/sublayer/version.rb +1 -1
- data/lib/sublayer.rb +4 -1
- metadata +7 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 0a5a558f063c63cab9aca167a16ae3ac31300614e1a042097c544cc6361e3775
|
4
|
+
data.tar.gz: 59eb567e5ccecc071e204685fbe81f8b10fee384dadd4a04ba96275b38e9235f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a0006db5e1aaacc819cc883ac16094d66c9e99b17b3069c19f5d43af59803ab8e032f07c900a7225b66a3ae91c14f49229742240676a74e9ce0548e5b423daeb
|
7
|
+
data.tar.gz: 3072682126490797db51ba03dde2c8accaccb869f7c861338cb5a1019152701079709d26340096cc1685318119716683842ca2b15d9d37833746835dfa89b34b
|
@@ -0,0 +1,20 @@
|
|
1
|
+
module Sublayer
|
2
|
+
module Logging
|
3
|
+
class JsonLogger < Base
|
4
|
+
def initialize(log_file = "./tmp/sublayer.log")
|
5
|
+
@log_file = log_file
|
6
|
+
end
|
7
|
+
|
8
|
+
def log(level, message, data = {})
|
9
|
+
File.open(@log_file, "a") do |f|
|
10
|
+
f.puts JSON.generate({
|
11
|
+
timestamp: Time.now.iso8601,
|
12
|
+
level: level,
|
13
|
+
message: message,
|
14
|
+
data: data
|
15
|
+
})
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -5,6 +5,15 @@ module Sublayer
|
|
5
5
|
module Providers
|
6
6
|
class Claude
|
7
7
|
def self.call(prompt:, output_adapter:)
|
8
|
+
request_id = SecureRandom.uuid
|
9
|
+
Sublayer.configuration.logger.log(:info, "Claude API request", {
|
10
|
+
model: Sublayer.configuration.ai_model,
|
11
|
+
prompt: prompt,
|
12
|
+
request_id: request_id
|
13
|
+
});
|
14
|
+
|
15
|
+
before_request = Time.now
|
16
|
+
|
8
17
|
response = HTTParty.post(
|
9
18
|
"https://api.anthropic.com/v1/messages",
|
10
19
|
headers: {
|
@@ -27,13 +36,34 @@ module Sublayer
|
|
27
36
|
}
|
28
37
|
}
|
29
38
|
],
|
39
|
+
tool_choice: { type: "tool", name: output_adapter.name },
|
30
40
|
messages: [{ "role": "user", "content": prompt }]
|
31
41
|
}.to_json
|
32
42
|
)
|
43
|
+
|
33
44
|
raise "Error generating with Claude, error: #{response.body}" unless response.code == 200
|
34
45
|
|
35
|
-
|
36
|
-
|
46
|
+
after_request = Time.now
|
47
|
+
response_time = after_request - before_request
|
48
|
+
|
49
|
+
json_response = JSON.parse(response.body)
|
50
|
+
|
51
|
+
Sublayer.configuration.logger.log(:info, "Claude API response", {
|
52
|
+
request_id: request_id,
|
53
|
+
response_time: response_time,
|
54
|
+
usage: {
|
55
|
+
input_tokens: json_response.dig("usage", "input_tokens"),
|
56
|
+
output_tokens: json_response.dig("usage", "output_tokens"),
|
57
|
+
total_tokens: json_response.dig("usage", "input_tokens") + json_response.dig("usage", "output_tokens")
|
58
|
+
}
|
59
|
+
})
|
60
|
+
|
61
|
+
tool_use = json_response.dig("content").find { |content| content['type'] == 'tool_use' && content['name'] == output_adapter.name }
|
62
|
+
|
63
|
+
raise "Error generating with Claude, error: No function called. If the answer is in the response, try rewording your prompt or output adapter name to be from the perspective of the model. Response: #{response.body}" unless tool_use
|
64
|
+
raise "Error generating with Claude, error: Max tokens exceeded. Try breaking your problem up into smaller pieces." if json_response.dig("stop_reason") == "max_tokens"
|
65
|
+
|
66
|
+
tool_use.dig("input")[output_adapter.name]
|
37
67
|
end
|
38
68
|
end
|
39
69
|
end
|
@@ -8,6 +8,15 @@ module Sublayer
|
|
8
8
|
module Providers
|
9
9
|
class Gemini
|
10
10
|
def self.call(prompt:, output_adapter:)
|
11
|
+
|
12
|
+
request_id = SecureRandom.uuid
|
13
|
+
before_request = Time.now
|
14
|
+
Sublayer.configuration.logger.log(:info, "Gemini API request", {
|
15
|
+
model: Sublayer.configuration.ai_model,
|
16
|
+
prompt: prompt,
|
17
|
+
request_id: request_id
|
18
|
+
})
|
19
|
+
|
11
20
|
response = HTTParty.post(
|
12
21
|
"https://generativelanguage.googleapis.com/v1beta/models/#{Sublayer.configuration.ai_model}:generateContent?key=#{ENV['GEMINI_API_KEY']}",
|
13
22
|
body: {
|
@@ -17,8 +26,8 @@ module Sublayer
|
|
17
26
|
text: "#{prompt}"
|
18
27
|
},
|
19
28
|
},
|
20
|
-
tools: {
|
21
|
-
|
29
|
+
tools: [{
|
30
|
+
function_declarations: [
|
22
31
|
{
|
23
32
|
name: output_adapter.name,
|
24
33
|
description: output_adapter.description,
|
@@ -29,7 +38,7 @@ module Sublayer
|
|
29
38
|
}
|
30
39
|
}
|
31
40
|
]
|
32
|
-
},
|
41
|
+
}],
|
33
42
|
tool_config: {
|
34
43
|
function_calling_config: {
|
35
44
|
mode: "ANY",
|
@@ -42,6 +51,19 @@ module Sublayer
|
|
42
51
|
}
|
43
52
|
)
|
44
53
|
|
54
|
+
after_request = Time.now
|
55
|
+
response_time = after_request - before_request
|
56
|
+
|
57
|
+
Sublayer.configuration.logger.log(:info, "Gemini API response", {
|
58
|
+
request_id: request_id,
|
59
|
+
response_time: response_time,
|
60
|
+
usage: {
|
61
|
+
input_tokens: response["usageMetadata"]["promptTokenCount"],
|
62
|
+
output_tokens: response["usageMetadata"]["candidatesTokenCount"],
|
63
|
+
total_tokens: response["usageMetadata"]["totalTokenCount"]
|
64
|
+
}
|
65
|
+
})
|
66
|
+
|
45
67
|
raise "Error generating with Gemini, error: #{response.body}" unless response.success?
|
46
68
|
|
47
69
|
argument = response.dig("candidates", 0, "content", "parts", 0, "functionCall", "args", output_adapter.name)
|
@@ -7,6 +7,16 @@ module Sublayer
|
|
7
7
|
def self.call(prompt:, output_adapter:)
|
8
8
|
client = ::OpenAI::Client.new(access_token: ENV.fetch("OPENAI_API_KEY"))
|
9
9
|
|
10
|
+
request_id = SecureRandom.uuid
|
11
|
+
|
12
|
+
Sublayer.configuration.logger.log(:info, "OpenAI API request", {
|
13
|
+
model: Sublayer.configuration.ai_model,
|
14
|
+
prompt: prompt,
|
15
|
+
request_id: request_id,
|
16
|
+
})
|
17
|
+
|
18
|
+
before_request = Time.now
|
19
|
+
|
10
20
|
response = client.chat(
|
11
21
|
parameters: {
|
12
22
|
model: Sublayer.configuration.ai_model,
|
@@ -31,15 +41,31 @@ module Sublayer
|
|
31
41
|
}
|
32
42
|
}
|
33
43
|
]
|
34
|
-
|
35
44
|
})
|
36
45
|
|
46
|
+
after_request = Time.now
|
47
|
+
response_time = after_request - before_request
|
48
|
+
|
49
|
+
Sublayer.configuration.logger.log(:info, "OpenAI API response", {
|
50
|
+
request_id: request_id,
|
51
|
+
response_time: response_time,
|
52
|
+
usage: {
|
53
|
+
input_tokens: response["usage"]["prompt_tokens"],
|
54
|
+
output_tokens: response["usage"]["completion_tokens"],
|
55
|
+
total_tokens: response["usage"]["total_tokens"]
|
56
|
+
}
|
57
|
+
})
|
58
|
+
|
37
59
|
message = response.dig("choices", 0, "message")
|
38
60
|
|
39
|
-
raise "No function called" unless message["tool_calls"]
|
61
|
+
raise "No function called" unless message["tool_calls"]
|
40
62
|
|
41
63
|
function_body = message.dig("tool_calls", 0, "function", "arguments")
|
42
|
-
|
64
|
+
|
65
|
+
raise "Error generating with OpenAI. Empty response. Try rewording your output adapter params to be from the perspective of the model. Full Response: #{response}" if function_body == "{}"
|
66
|
+
raise "Error generating with OpenAI. Error: Max tokens exceeded. Try breaking your problem up into smaller pieces." if response["choices"][0]["finish_reason"] == "length"
|
67
|
+
|
68
|
+
results = JSON.parse(function_body)[output_adapter.name]
|
43
69
|
end
|
44
70
|
end
|
45
71
|
end
|
data/lib/sublayer/version.rb
CHANGED
data/lib/sublayer.rb
CHANGED
@@ -9,6 +9,8 @@ require "httparty"
|
|
9
9
|
require "openai"
|
10
10
|
require "nokogiri"
|
11
11
|
require "listen"
|
12
|
+
require "securerandom"
|
13
|
+
require "time"
|
12
14
|
|
13
15
|
require_relative "sublayer/version"
|
14
16
|
|
@@ -22,7 +24,8 @@ module Sublayer
|
|
22
24
|
def self.configuration
|
23
25
|
@configuration ||= OpenStruct.new(
|
24
26
|
ai_provider: Sublayer::Providers::OpenAI,
|
25
|
-
ai_model: "gpt-4o"
|
27
|
+
ai_model: "gpt-4o",
|
28
|
+
logger: Sublayer::Logging::NullLogger.new
|
26
29
|
)
|
27
30
|
end
|
28
31
|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sublayer
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Scott Werner
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-08-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: ruby-openai
|
@@ -198,6 +198,10 @@ files:
|
|
198
198
|
- lib/sublayer/components/output_adapters/single_string.rb
|
199
199
|
- lib/sublayer/components/output_adapters/string_selection_from_list.rb
|
200
200
|
- lib/sublayer/generators/base.rb
|
201
|
+
- lib/sublayer/logging/base.rb
|
202
|
+
- lib/sublayer/logging/debug_logger.rb
|
203
|
+
- lib/sublayer/logging/json_logger.rb
|
204
|
+
- lib/sublayer/logging/null_logger.rb
|
201
205
|
- lib/sublayer/providers/claude.rb
|
202
206
|
- lib/sublayer/providers/gemini.rb
|
203
207
|
- lib/sublayer/providers/open_ai.rb
|
@@ -229,7 +233,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
229
233
|
- !ruby/object:Gem::Version
|
230
234
|
version: '0'
|
231
235
|
requirements: []
|
232
|
-
rubygems_version: 3.
|
236
|
+
rubygems_version: 3.3.26
|
233
237
|
signing_key:
|
234
238
|
specification_version: 4
|
235
239
|
summary: A model-agnostic Ruby GenerativeAI DSL and Framework
|