sublayer 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9c0626295be488d2a39cfbe94652d3e1238b5885c24c5d4d130e0659939387fd
4
- data.tar.gz: e21ee3e2177caff3a1125de4ba07672db13df661c5888e031d84148f4bfa0a01
3
+ metadata.gz: 3b88b5a8d08e537fed46f26b445126a1482f13e79732a545b225c6b6447e9417
4
+ data.tar.gz: 7903ce4058e07fa95faee094bc937f41eb297d23a941a8f2287365c052773797
5
5
  SHA512:
6
- metadata.gz: 8b2a17ca1598e68b7da6a0caf07d61d53f99c6de561f03bbd4f4a74a4bf05ad30ebb1c9261f34320297a1af88c4b82e35163a7711a3d260d97732d2b34b87f1a
7
- data.tar.gz: adcb0094652bdd477f5cbbf3346037bbfa1b2b62a7c54a148ddd0d4cb440bef8d7c994487a8ef47aee30c0039d3bad7ddfafee587c3fa6c688c6e51f0e89d26e
6
+ metadata.gz: 07701102f65938db5dc6e9c1ed3de303073c4a185ecde0fc12c458e6051294d3bd00def3f89219663dc6f6f879b4aac29f84163b000f310bf18831fa94d3e831
7
+ data.tar.gz: e5c34b702ba9aabb599e4d95e13671ef3a433c577dc0a68fd2d1a3ff938d137819da31908ee11cd8eed334fb16dc02f08cd99be21f33ef41dd00db3d5e728065
@@ -3,23 +3,41 @@ module Sublayer
3
3
  module OutputAdapters
4
4
  module Formattable
5
5
  def format_properties
6
+ build_json_schema(self.properties)
7
+ end
8
+
9
+ def build_json_schema(props)
6
10
  formatted_properties = {}
7
- self.properties.each do |prop|
8
- property = {
9
- type: prop.type,
10
- description: prop.description
11
- }
12
-
13
- property[:enum] = prop.enum if prop.respond_to?(:enum) && prop.enum
14
- property[:default] = prop.default if prop.respond_to?(:default) && !prop.default.nil?
15
- property[:minimum] = prop.minimum if prop.respond_to?(:minimum) && !prop.minimum.nil?
16
- property[:maximum] = prop.maximum if prop.respond_to?(:maximum) && !prop.maximum.nil?
17
- property[:items] = prop.items if prop.respond_to?(:items) && prop.items
18
- formatted_properties[prop.name.to_sym] = property
11
+
12
+ props.map do |prop|
13
+ formatted_property = format_property(prop)
14
+ formatted_properties[prop.name.to_sym] = formatted_property
19
15
  end
16
+
20
17
  formatted_properties
21
18
  end
22
19
 
20
+ def format_property(property)
21
+ result = {
22
+ type: property.type,
23
+ description: property.description
24
+ }
25
+
26
+ result[:enum] = property.enum if property.respond_to?(:enum) && property.enum
27
+ result[:default] = property.default if property.respond_to?(:default) && !property.default.nil?
28
+ result[:minimum] = property.minimum if property.respond_to?(:minimum) && !property.minimum.nil?
29
+ result[:maximum] = property.maximum if property.respond_to?(:maximum) && !property.maximum.nil?
30
+
31
+ case property.type
32
+ when 'array'
33
+ result[:items] = property.items.is_a?(OpenStruct) ? format_property(property.items) : property.items
34
+ when 'object'
35
+ result[:properties] = build_json_schema(property.properties) if property.properties
36
+ end
37
+
38
+ result
39
+ end
40
+
23
41
  def format_required
24
42
  self.properties.select(&:required).map(&:name)
25
43
  end
@@ -0,0 +1,31 @@
1
+ module Sublayer
2
+ module Components
3
+ module OutputAdapters
4
+ class NamedStrings
5
+ attr_reader :name, :description, :attributes
6
+
7
+ def initialize(options)
8
+ @name = options[:name]
9
+ @description = options[:description]
10
+ @attributes = options[:attributes]
11
+ end
12
+
13
+ def properties
14
+ [
15
+ OpenStruct.new(
16
+ name: @name,
17
+ type: "object",
18
+ description: @description,
19
+ required: true,
20
+ properties: @attributes.map { |attribute| OpenStruct.new(type: "string", description: attribute[:description], required: true, name: attribute[:name]) }
21
+ )
22
+ ]
23
+ end
24
+
25
+ def materialize_result(raw_result)
26
+ OpenStruct.new( @attributes.map { |attribute| [attribute[:name], raw_result[attribute[:name]]] }.to_h)
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
@@ -10,7 +10,10 @@ module Sublayer
10
10
 
11
11
  def generate
12
12
  self.class::OUTPUT_ADAPTER.load_instance_data(self) if self.class::OUTPUT_ADAPTER.respond_to?(:load_instance_data)
13
- @results = Sublayer.configuration.ai_provider.call(prompt: prompt, output_adapter: self.class::OUTPUT_ADAPTER)
13
+
14
+ raw_results = Sublayer.configuration.ai_provider.call(prompt: prompt, output_adapter: self.class::OUTPUT_ADAPTER)
15
+
16
+ @results = self.class::OUTPUT_ADAPTER.respond_to?(:materialize_result) ? self.class::OUTPUT_ADAPTER.materialize_result(raw_results) : raw_results
14
17
  end
15
18
  end
16
19
  end
@@ -0,0 +1,9 @@
1
+ module Sublayer
2
+ module Logging
3
+ class Base
4
+ def log(level, message, data = {})
5
+ raise NotImplementedError, "Subclasses must implement log method"
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,10 @@
1
+ module Sublayer
2
+ module Logging
3
+ class DebugLogger < Base
4
+ def log(level, message, data = {})
5
+ puts "[#{Time.now.iso8601}] #{level.upcase}: #{message}"
6
+ pp data unless data.empty?
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,20 @@
1
+ module Sublayer
2
+ module Logging
3
+ class JsonLogger < Base
4
+ def initialize(log_file = "./tmp/sublayer.log")
5
+ @log_file = log_file
6
+ end
7
+
8
+ def log(level, message, data = {})
9
+ File.open(@log_file, "a") do |f|
10
+ f.puts JSON.generate({
11
+ timestamp: Time.now.iso8601,
12
+ level: level,
13
+ message: message,
14
+ data: data
15
+ })
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,9 @@
1
+ module Sublayer
2
+ module Logging
3
+ class NullLogger < Base
4
+ def log(level, message, data = {})
5
+ # do nothing
6
+ end
7
+ end
8
+ end
9
+ end
@@ -5,6 +5,15 @@ module Sublayer
5
5
  module Providers
6
6
  class Claude
7
7
  def self.call(prompt:, output_adapter:)
8
+ request_id = SecureRandom.uuid
9
+ Sublayer.configuration.logger.log(:info, "Claude API request", {
10
+ model: Sublayer.configuration.ai_model,
11
+ prompt: prompt,
12
+ request_id: request_id
13
+ });
14
+
15
+ before_request = Time.now
16
+
8
17
  response = HTTParty.post(
9
18
  "https://api.anthropic.com/v1/messages",
10
19
  headers: {
@@ -34,9 +43,25 @@ module Sublayer
34
43
 
35
44
  raise "Error generating with Claude, error: #{response.body}" unless response.code == 200
36
45
 
37
- tool_use = JSON.parse(response.body).dig("content").find { |content| content['type'] == 'tool_use' && content['name'] == output_adapter.name }
46
+ after_request = Time.now
47
+ response_time = after_request - before_request
48
+
49
+ json_response = JSON.parse(response.body)
50
+
51
+ Sublayer.configuration.logger.log(:info, "Claude API response", {
52
+ request_id: request_id,
53
+ response_time: response_time,
54
+ usage: {
55
+ input_tokens: json_response.dig("usage", "input_tokens"),
56
+ output_tokens: json_response.dig("usage", "output_tokens"),
57
+ total_tokens: json_response.dig("usage", "input_tokens") + json_response.dig("usage", "output_tokens")
58
+ }
59
+ })
60
+
61
+ tool_use = json_response.dig("content").find { |content| content['type'] == 'tool_use' && content['name'] == output_adapter.name }
38
62
 
39
63
  raise "Error generating with Claude, error: No function called. If the answer is in the response, try rewording your prompt or output adapter name to be from the perspective of the model. Response: #{response.body}" unless tool_use
64
+ raise "Error generating with Claude, error: Max tokens exceeded. Try breaking your problem up into smaller pieces." if json_response.dig("stop_reason") == "max_tokens"
40
65
 
41
66
  tool_use.dig("input")[output_adapter.name]
42
67
  end
@@ -8,6 +8,15 @@ module Sublayer
8
8
  module Providers
9
9
  class Gemini
10
10
  def self.call(prompt:, output_adapter:)
11
+
12
+ request_id = SecureRandom.uuid
13
+ before_request = Time.now
14
+ Sublayer.configuration.logger.log(:info, "Gemini API request", {
15
+ model: Sublayer.configuration.ai_model,
16
+ prompt: prompt,
17
+ request_id: request_id
18
+ })
19
+
11
20
  response = HTTParty.post(
12
21
  "https://generativelanguage.googleapis.com/v1beta/models/#{Sublayer.configuration.ai_model}:generateContent?key=#{ENV['GEMINI_API_KEY']}",
13
22
  body: {
@@ -17,8 +26,8 @@ module Sublayer
17
26
  text: "#{prompt}"
18
27
  },
19
28
  },
20
- tools: {
21
- functionDeclarations: [
29
+ tools: [{
30
+ function_declarations: [
22
31
  {
23
32
  name: output_adapter.name,
24
33
  description: output_adapter.description,
@@ -29,7 +38,7 @@ module Sublayer
29
38
  }
30
39
  }
31
40
  ]
32
- },
41
+ }],
33
42
  tool_config: {
34
43
  function_calling_config: {
35
44
  mode: "ANY",
@@ -42,6 +51,19 @@ module Sublayer
42
51
  }
43
52
  )
44
53
 
54
+ after_request = Time.now
55
+ response_time = after_request - before_request
56
+
57
+ Sublayer.configuration.logger.log(:info, "Gemini API response", {
58
+ request_id: request_id,
59
+ response_time: response_time,
60
+ usage: {
61
+ input_tokens: response["usageMetadata"]["promptTokenCount"],
62
+ output_tokens: response["usageMetadata"]["candidatesTokenCount"],
63
+ total_tokens: response["usageMetadata"]["totalTokenCount"]
64
+ }
65
+ })
66
+
45
67
  raise "Error generating with Gemini, error: #{response.body}" unless response.success?
46
68
 
47
69
  argument = response.dig("candidates", 0, "content", "parts", 0, "functionCall", "args", output_adapter.name)
@@ -7,6 +7,16 @@ module Sublayer
7
7
  def self.call(prompt:, output_adapter:)
8
8
  client = ::OpenAI::Client.new(access_token: ENV.fetch("OPENAI_API_KEY"))
9
9
 
10
+ request_id = SecureRandom.uuid
11
+
12
+ Sublayer.configuration.logger.log(:info, "OpenAI API request", {
13
+ model: Sublayer.configuration.ai_model,
14
+ prompt: prompt,
15
+ request_id: request_id,
16
+ })
17
+
18
+ before_request = Time.now
19
+
10
20
  response = client.chat(
11
21
  parameters: {
12
22
  model: Sublayer.configuration.ai_model,
@@ -31,9 +41,21 @@ module Sublayer
31
41
  }
32
42
  }
33
43
  ]
34
-
35
44
  })
36
45
 
46
+ after_request = Time.now
47
+ response_time = after_request - before_request
48
+
49
+ Sublayer.configuration.logger.log(:info, "OpenAI API response", {
50
+ request_id: request_id,
51
+ response_time: response_time,
52
+ usage: {
53
+ input_tokens: response["usage"]["prompt_tokens"],
54
+ output_tokens: response["usage"]["completion_tokens"],
55
+ total_tokens: response["usage"]["total_tokens"]
56
+ }
57
+ })
58
+
37
59
  message = response.dig("choices", 0, "message")
38
60
 
39
61
  raise "No function called" unless message["tool_calls"]
@@ -41,6 +63,7 @@ module Sublayer
41
63
  function_body = message.dig("tool_calls", 0, "function", "arguments")
42
64
 
43
65
  raise "Error generating with OpenAI. Empty response. Try rewording your output adapter params to be from the perspective of the model. Full Response: #{response}" if function_body == "{}"
66
+ raise "Error generating with OpenAI. Error: Max tokens exceeded. Try breaking your problem up into smaller pieces." if response["choices"][0]["finish_reason"] == "length"
44
67
 
45
68
  results = JSON.parse(function_body)[output_adapter.name]
46
69
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sublayer
4
- VERSION = "0.2.1"
4
+ VERSION = "0.2.3"
5
5
  end
data/lib/sublayer.rb CHANGED
@@ -9,6 +9,8 @@ require "httparty"
9
9
  require "openai"
10
10
  require "nokogiri"
11
11
  require "listen"
12
+ require "securerandom"
13
+ require "time"
12
14
 
13
15
  require_relative "sublayer/version"
14
16
 
@@ -22,7 +24,8 @@ module Sublayer
22
24
  def self.configuration
23
25
  @configuration ||= OpenStruct.new(
24
26
  ai_provider: Sublayer::Providers::OpenAI,
25
- ai_model: "gpt-4o"
27
+ ai_model: "gpt-4o",
28
+ logger: Sublayer::Logging::NullLogger.new
26
29
  )
27
30
  end
28
31
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sublayer
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.2.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Scott Werner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-08-04 00:00:00.000000000 Z
11
+ date: 2024-08-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ruby-openai
@@ -195,9 +195,14 @@ files:
195
195
  - lib/sublayer/components/output_adapters.rb
196
196
  - lib/sublayer/components/output_adapters/formattable.rb
197
197
  - lib/sublayer/components/output_adapters/list_of_strings.rb
198
+ - lib/sublayer/components/output_adapters/named_strings.rb
198
199
  - lib/sublayer/components/output_adapters/single_string.rb
199
200
  - lib/sublayer/components/output_adapters/string_selection_from_list.rb
200
201
  - lib/sublayer/generators/base.rb
202
+ - lib/sublayer/logging/base.rb
203
+ - lib/sublayer/logging/debug_logger.rb
204
+ - lib/sublayer/logging/json_logger.rb
205
+ - lib/sublayer/logging/null_logger.rb
201
206
  - lib/sublayer/providers/claude.rb
202
207
  - lib/sublayer/providers/gemini.rb
203
208
  - lib/sublayer/providers/open_ai.rb