sublayer 0.2.1 → 0.2.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9c0626295be488d2a39cfbe94652d3e1238b5885c24c5d4d130e0659939387fd
4
- data.tar.gz: e21ee3e2177caff3a1125de4ba07672db13df661c5888e031d84148f4bfa0a01
3
+ metadata.gz: 0a5a558f063c63cab9aca167a16ae3ac31300614e1a042097c544cc6361e3775
4
+ data.tar.gz: 59eb567e5ccecc071e204685fbe81f8b10fee384dadd4a04ba96275b38e9235f
5
5
  SHA512:
6
- metadata.gz: 8b2a17ca1598e68b7da6a0caf07d61d53f99c6de561f03bbd4f4a74a4bf05ad30ebb1c9261f34320297a1af88c4b82e35163a7711a3d260d97732d2b34b87f1a
7
- data.tar.gz: adcb0094652bdd477f5cbbf3346037bbfa1b2b62a7c54a148ddd0d4cb440bef8d7c994487a8ef47aee30c0039d3bad7ddfafee587c3fa6c688c6e51f0e89d26e
6
+ metadata.gz: a0006db5e1aaacc819cc883ac16094d66c9e99b17b3069c19f5d43af59803ab8e032f07c900a7225b66a3ae91c14f49229742240676a74e9ce0548e5b423daeb
7
+ data.tar.gz: 3072682126490797db51ba03dde2c8accaccb869f7c861338cb5a1019152701079709d26340096cc1685318119716683842ca2b15d9d37833746835dfa89b34b
@@ -0,0 +1,9 @@
1
+ module Sublayer
2
+ module Logging
3
+ class Base
4
+ def log(level, message, data = {})
5
+ raise NotImplementedError, "Subclasses must implement log method"
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,10 @@
1
+ module Sublayer
2
+ module Logging
3
+ class DebugLogger < Base
4
+ def log(level, message, data = {})
5
+ puts "[#{Time.now.iso8601}] #{level.upcase}: #{message}"
6
+ pp data unless data.empty?
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,20 @@
1
+ module Sublayer
2
+ module Logging
3
+ class JsonLogger < Base
4
+ def initialize(log_file = "./tmp/sublayer.log")
5
+ @log_file = log_file
6
+ end
7
+
8
+ def log(level, message, data = {})
9
+ File.open(@log_file, "a") do |f|
10
+ f.puts JSON.generate({
11
+ timestamp: Time.now.iso8601,
12
+ level: level,
13
+ message: message,
14
+ data: data
15
+ })
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,9 @@
1
+ module Sublayer
2
+ module Logging
3
+ class NullLogger < Base
4
+ def log(level, message, data = {})
5
+ # do nothing
6
+ end
7
+ end
8
+ end
9
+ end
@@ -5,6 +5,15 @@ module Sublayer
5
5
  module Providers
6
6
  class Claude
7
7
  def self.call(prompt:, output_adapter:)
8
+ request_id = SecureRandom.uuid
9
+ Sublayer.configuration.logger.log(:info, "Claude API request", {
10
+ model: Sublayer.configuration.ai_model,
11
+ prompt: prompt,
12
+ request_id: request_id
13
+ });
14
+
15
+ before_request = Time.now
16
+
8
17
  response = HTTParty.post(
9
18
  "https://api.anthropic.com/v1/messages",
10
19
  headers: {
@@ -34,9 +43,25 @@ module Sublayer
34
43
 
35
44
  raise "Error generating with Claude, error: #{response.body}" unless response.code == 200
36
45
 
37
- tool_use = JSON.parse(response.body).dig("content").find { |content| content['type'] == 'tool_use' && content['name'] == output_adapter.name }
46
+ after_request = Time.now
47
+ response_time = after_request - before_request
48
+
49
+ json_response = JSON.parse(response.body)
50
+
51
+ Sublayer.configuration.logger.log(:info, "Claude API response", {
52
+ request_id: request_id,
53
+ response_time: response_time,
54
+ usage: {
55
+ input_tokens: json_response.dig("usage", "input_tokens"),
56
+ output_tokens: json_response.dig("usage", "output_tokens"),
57
+ total_tokens: json_response.dig("usage", "input_tokens") + json_response.dig("usage", "output_tokens")
58
+ }
59
+ })
60
+
61
+ tool_use = json_response.dig("content").find { |content| content['type'] == 'tool_use' && content['name'] == output_adapter.name }
38
62
 
39
63
  raise "Error generating with Claude, error: No function called. If the answer is in the response, try rewording your prompt or output adapter name to be from the perspective of the model. Response: #{response.body}" unless tool_use
64
+ raise "Error generating with Claude, error: Max tokens exceeded. Try breaking your problem up into smaller pieces." if json_response.dig("stop_reason") == "max_tokens"
40
65
 
41
66
  tool_use.dig("input")[output_adapter.name]
42
67
  end
@@ -8,6 +8,15 @@ module Sublayer
8
8
  module Providers
9
9
  class Gemini
10
10
  def self.call(prompt:, output_adapter:)
11
+
12
+ request_id = SecureRandom.uuid
13
+ before_request = Time.now
14
+ Sublayer.configuration.logger.log(:info, "Gemini API request", {
15
+ model: Sublayer.configuration.ai_model,
16
+ prompt: prompt,
17
+ request_id: request_id
18
+ })
19
+
11
20
  response = HTTParty.post(
12
21
  "https://generativelanguage.googleapis.com/v1beta/models/#{Sublayer.configuration.ai_model}:generateContent?key=#{ENV['GEMINI_API_KEY']}",
13
22
  body: {
@@ -17,8 +26,8 @@ module Sublayer
17
26
  text: "#{prompt}"
18
27
  },
19
28
  },
20
- tools: {
21
- functionDeclarations: [
29
+ tools: [{
30
+ function_declarations: [
22
31
  {
23
32
  name: output_adapter.name,
24
33
  description: output_adapter.description,
@@ -29,7 +38,7 @@ module Sublayer
29
38
  }
30
39
  }
31
40
  ]
32
- },
41
+ }],
33
42
  tool_config: {
34
43
  function_calling_config: {
35
44
  mode: "ANY",
@@ -42,6 +51,19 @@ module Sublayer
42
51
  }
43
52
  )
44
53
 
54
+ after_request = Time.now
55
+ response_time = after_request - before_request
56
+
57
+ Sublayer.configuration.logger.log(:info, "Gemini API response", {
58
+ request_id: request_id,
59
+ response_time: response_time,
60
+ usage: {
61
+ input_tokens: response["usageMetadata"]["promptTokenCount"],
62
+ output_tokens: response["usageMetadata"]["candidatesTokenCount"],
63
+ total_tokens: response["usageMetadata"]["totalTokenCount"]
64
+ }
65
+ })
66
+
45
67
  raise "Error generating with Gemini, error: #{response.body}" unless response.success?
46
68
 
47
69
  argument = response.dig("candidates", 0, "content", "parts", 0, "functionCall", "args", output_adapter.name)
@@ -7,6 +7,16 @@ module Sublayer
7
7
  def self.call(prompt:, output_adapter:)
8
8
  client = ::OpenAI::Client.new(access_token: ENV.fetch("OPENAI_API_KEY"))
9
9
 
10
+ request_id = SecureRandom.uuid
11
+
12
+ Sublayer.configuration.logger.log(:info, "OpenAI API request", {
13
+ model: Sublayer.configuration.ai_model,
14
+ prompt: prompt,
15
+ request_id: request_id,
16
+ })
17
+
18
+ before_request = Time.now
19
+
10
20
  response = client.chat(
11
21
  parameters: {
12
22
  model: Sublayer.configuration.ai_model,
@@ -31,9 +41,21 @@ module Sublayer
31
41
  }
32
42
  }
33
43
  ]
34
-
35
44
  })
36
45
 
46
+ after_request = Time.now
47
+ response_time = after_request - before_request
48
+
49
+ Sublayer.configuration.logger.log(:info, "OpenAI API response", {
50
+ request_id: request_id,
51
+ response_time: response_time,
52
+ usage: {
53
+ input_tokens: response["usage"]["prompt_tokens"],
54
+ output_tokens: response["usage"]["completion_tokens"],
55
+ total_tokens: response["usage"]["total_tokens"]
56
+ }
57
+ })
58
+
37
59
  message = response.dig("choices", 0, "message")
38
60
 
39
61
  raise "No function called" unless message["tool_calls"]
@@ -41,6 +63,7 @@ module Sublayer
41
63
  function_body = message.dig("tool_calls", 0, "function", "arguments")
42
64
 
43
65
  raise "Error generating with OpenAI. Empty response. Try rewording your output adapter params to be from the perspective of the model. Full Response: #{response}" if function_body == "{}"
66
+ raise "Error generating with OpenAI. Error: Max tokens exceeded. Try breaking your problem up into smaller pieces." if response["choices"][0]["finish_reason"] == "length"
44
67
 
45
68
  results = JSON.parse(function_body)[output_adapter.name]
46
69
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sublayer
4
- VERSION = "0.2.1"
4
+ VERSION = "0.2.2"
5
5
  end
data/lib/sublayer.rb CHANGED
@@ -9,6 +9,8 @@ require "httparty"
9
9
  require "openai"
10
10
  require "nokogiri"
11
11
  require "listen"
12
+ require "securerandom"
13
+ require "time"
12
14
 
13
15
  require_relative "sublayer/version"
14
16
 
@@ -22,7 +24,8 @@ module Sublayer
22
24
  def self.configuration
23
25
  @configuration ||= OpenStruct.new(
24
26
  ai_provider: Sublayer::Providers::OpenAI,
25
- ai_model: "gpt-4o"
27
+ ai_model: "gpt-4o",
28
+ logger: Sublayer::Logging::NullLogger.new
26
29
  )
27
30
  end
28
31
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sublayer
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Scott Werner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-08-04 00:00:00.000000000 Z
11
+ date: 2024-08-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ruby-openai
@@ -198,6 +198,10 @@ files:
198
198
  - lib/sublayer/components/output_adapters/single_string.rb
199
199
  - lib/sublayer/components/output_adapters/string_selection_from_list.rb
200
200
  - lib/sublayer/generators/base.rb
201
+ - lib/sublayer/logging/base.rb
202
+ - lib/sublayer/logging/debug_logger.rb
203
+ - lib/sublayer/logging/json_logger.rb
204
+ - lib/sublayer/logging/null_logger.rb
201
205
  - lib/sublayer/providers/claude.rb
202
206
  - lib/sublayer/providers/gemini.rb
203
207
  - lib/sublayer/providers/open_ai.rb