lammy 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 8a3c37bb78fae05b1c34a205b5a763149077199651acd8f52896f887f8edda45
4
+ data.tar.gz: c6af72cc5f7c29d2ee6ab211743398b8757f4b3ba9a38fcfa20031de7f625982
5
+ SHA512:
6
+ metadata.gz: fe0ca83a8d96703ad5d8543ca6b96a854bf69eaac1a909925e7eb99c9a54fd6b02d9eb0faa5ee0cfadf54643b4b9f8ab3506e9a88f5df85541e161370ad6b334
7
+ data.tar.gz: bd399dcb3f6a1b75a8d5f6b2dd6b3d2657a5db52c736c905a0c72285ab3feb51f545a385c3bd21269243b033568f4f96be6b4aa8d100e498f73098b97b3463a6
data/lib/lammy/chat.rb ADDED
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ module L
4
+ module Chat
5
+ def llm(**kwargs)
6
+ @next_llm_settings = kwargs
7
+ end
8
+
9
+ def handle_llm(method_name)
10
+ settings = @next_llm_settings
11
+ @next_llm_settings = nil
12
+
13
+ # Unbind the original method
14
+ original_method = instance_method(method_name)
15
+
16
+ # Redefine the method
17
+ define_method(method_name) do |*args, &block|
18
+ # Initialize context
19
+ @system_message = nil
20
+
21
+ # Make `context` method available within the instance
22
+ define_singleton_method(:context) do |message|
23
+ @system_message = message
24
+ end
25
+
26
+ # Call the original method to get the user message
27
+ user_message = original_method.bind(self).call(*args, &block)
28
+
29
+ case settings[:model]
30
+ when *OpenAI::MODELS
31
+ client = OpenAI.new
32
+ client.chat(settings, user_message, @system_message)
33
+ else
34
+ raise "Unsupported model: #{settings[:model]}"
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module L
4
+ module Embeddings
5
+ def v(**kwargs)
6
+ @next_v_settings = kwargs
7
+ end
8
+
9
+ def handle_v(method_name)
10
+ settings = @next_v_settings
11
+ @next_v_settings = nil
12
+
13
+ # Unbind the original method
14
+ original_method = instance_method(method_name)
15
+
16
+ # Redefine the method
17
+ define_method(method_name) do |*args, &block|
18
+ # Initialize chunking settings
19
+ @chunk_by_size = nil
20
+
21
+ # Make `chunk_by_size` method available within the instance
22
+ define_singleton_method(:chunk_by_size) do |size|
23
+ @chunk_by_size = size
24
+ end
25
+
26
+ # Call the original method to get the input
27
+ input = original_method.bind(self).call(*args, &block)
28
+
29
+ # # Tokenize the input
30
+ # if @chunk_by_size.blank?
31
+ # input = [ input ]
32
+ # else
33
+ # tokenizer = Tokenizers.from_pretrained("bert-base-cased")
34
+ # input = tokenizer.encode(input).tokens
35
+ # end
36
+ input = [input]
37
+
38
+ case settings[:model]
39
+ when *OpenAI::EMBEDDINGS
40
+ client = OpenAI.new
41
+ client.embeddings(settings, input)
42
+ else
43
+ raise "Unsupported model: #{settings[:model]}"
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'openai'
4
+
5
+ module L
6
+ # Use the OpenAI API's official Ruby library
7
+ class OpenAI
8
+ MODELS = %w[
9
+ gpt-4o-mini gpt-4o gpt-4-turbo gpt-4 gpt-3.5-turbo gpt-4o-mini-2024-07-18 gpt-4o-2024-08-06
10
+ gpt-4o-2024-05-13 gpt-4-turbo-preview gpt-4-turbo-2024-04-09
11
+ ].freeze
12
+
13
+ EMBEDDINGS = %w[
14
+ text-embedding-3-small text-embedding-3-large text-embedding-ada-002
15
+ ].freeze
16
+
17
+ # Generate a response with support for structured output
18
+ def chat(settings, user_message, system_message = nil)
19
+ response = client.chat(
20
+ parameters: {
21
+ model: settings[:model], response_format: schema(settings), messages: [
22
+ system_message ? { role: :system, content: system_message } : nil,
23
+ { role: :user, content: user_message }
24
+ ].compact
25
+ }.compact
26
+ )
27
+
28
+ content = response.dig('choices', 0, 'message', 'content')
29
+ settings[:schema] ? Hashie::Mash.new(JSON.parse(content)) : content
30
+ end
31
+
32
+ # OpenAI’s text embeddings measure the relatedness of text strings. An embedding is a vector of floating point
33
+ # numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness
34
+ # and large distances suggest low relatedness.
35
+ def embeddings(settings, chunks)
36
+ responses = chunks.map do |chunk|
37
+ response = client.embeddings(
38
+ parameters: { model: settings[:model], dimensions: settings[:dimensions], input: chunk }
39
+ )
40
+
41
+ response.dig('data', 0, 'embedding')
42
+ end
43
+
44
+ responses.one? ? responses.first : responses
45
+ end
46
+
47
+ private
48
+
49
+ def schema(settings)
50
+ return unless settings[:schema]
51
+
52
+ {
53
+ type: :json_schema,
54
+ json_schema: {
55
+ name: :schema,
56
+ schema: settings[:schema]
57
+ }
58
+ }
59
+ end
60
+
61
+ def client
62
+ @client ||= ::OpenAI::Client.new(
63
+ access_token: ENV.fetch('OPENAI_ACCESS_TOKEN')
64
+ )
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ module L
4
+ # Structured Outputs is a feature that ensures the model will always generate responses
5
+ # that adhere to your supplied JSON Schema, so you don't need to worry about the model
6
+ # omitting a required key, or hallucinating an invalid enum value. This is a set of
7
+ # helper methods to help you define your JSON Schema easily.
8
+ module Schema
9
+ def to_a(object)
10
+ {
11
+ 'type' => 'object',
12
+ 'properties' => {
13
+ 'items' => {
14
+ 'type' => 'array', 'items' => to_h(object)
15
+ }
16
+ },
17
+ 'required' => ['items'],
18
+ 'additionalProperties' => false
19
+ }
20
+ end
21
+
22
+ def to_h(object)
23
+ {
24
+ 'type' => 'object',
25
+ "properties": object.inject({}) { |h, (k, v)| h.merge(k => { 'type' => v }) },
26
+ "required": object.keys,
27
+ "additionalProperties": false
28
+ }
29
+ end
30
+ end
31
+ end
data/lib/lammy.rb ADDED
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'lammy/embeddings'
4
+ require 'lammy/openai'
5
+ require 'lammy/schema'
6
+ require 'lammy/chat'
7
+
8
+ # Example:
9
+ #
10
+ # ```ruby
11
+ # class User < ApplicationRecord
12
+ # include L
13
+ #
14
+ # llm(model: 'gpt-4o')
15
+ # def welcome
16
+ # context "You are a helpful assistant that writes in lower case."
17
+ # "Say hello to #{name.reverse} with a poem."
18
+ # end
19
+ #
20
+ # v(model: 'text-embedding-3-large')
21
+ # def embeddings
22
+ # chunk_by_size 256
23
+ # welcome
24
+ # end
25
+ # end
26
+ #
27
+ # user = User.new(name: 'John Doe')
28
+ # user.welcome
29
+ #
30
+ # # => "hello eoD nhoJ, let's make a cheer,\n
31
+ # # with a whimsical poem to bring you near.\n
32
+ # # though your name's in reverse, it’s clear and bright,\n
33
+ # # let's dance in verse on this delightful night.\n\n
34
+ # #
35
+ # # to a friend unique, in every single way,\n
36
+ # # we flip the letters but the bond will stay.\n
37
+ # # the sun may set and rise again,\n
38
+ # # with you, the fun will never wane.\n\n
39
+ # #
40
+ # # through twists and turns, in backwards flow,\n
41
+ # # we celebrate you in this poetic show.\n
42
+ # # eoD nhoJ, here's a cheer to you,\n
43
+ # # in every form, our friendship's true!"
44
+ # ```
45
+ module L
46
+ extend Schema
47
+
48
+ def self.included(base)
49
+ base.extend Chat
50
+ base.extend Embeddings
51
+ base.extend ClassMethods
52
+ end
53
+
54
+ # Wrap generative methods with handlers
55
+ module ClassMethods
56
+ def method_added(method_name)
57
+ handle_llm(method_name) if @next_llm_settings
58
+ handle_v(method_name) if @next_v_settings
59
+ super
60
+ end
61
+ end
62
+ end
metadata ADDED
@@ -0,0 +1,75 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: lammy
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Kamil Nicieja
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2024-09-30 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: hashie
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '5.0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '5.0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: ruby-openai
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '7.1'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '7.1'
41
+ description: An LLM library for Ruby
42
+ email: kamil@nicieja.co
43
+ executables: []
44
+ extensions: []
45
+ extra_rdoc_files: []
46
+ files:
47
+ - lib/lammy.rb
48
+ - lib/lammy/chat.rb
49
+ - lib/lammy/embeddings.rb
50
+ - lib/lammy/openai.rb
51
+ - lib/lammy/schema.rb
52
+ homepage: https://github.com/nicieja/lammy
53
+ licenses:
54
+ - MIT
55
+ metadata: {}
56
+ post_install_message:
57
+ rdoc_options: []
58
+ require_paths:
59
+ - lib
60
+ required_ruby_version: !ruby/object:Gem::Requirement
61
+ requirements:
62
+ - - ">="
63
+ - !ruby/object:Gem::Version
64
+ version: '0'
65
+ required_rubygems_version: !ruby/object:Gem::Requirement
66
+ requirements:
67
+ - - ">="
68
+ - !ruby/object:Gem::Version
69
+ version: '0'
70
+ requirements: []
71
+ rubygems_version: 3.5.3
72
+ signing_key:
73
+ specification_version: 4
74
+ summary: Lammy
75
+ test_files: []