sublayer 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +155 -0
- data/Rakefile +8 -0
- data/examples/code_from_blueprint_generator.rb +30 -0
- data/examples/code_from_description_generator.rb +26 -0
- data/examples/description_from_code_generator.rb +23 -0
- data/examples/invalid_to_valid_json_generator.rb +23 -0
- data/lib/sublayer/components/output_adapters/single_string.rb +44 -0
- data/lib/sublayer/components/output_adapters.rb +11 -0
- data/lib/sublayer/generators/base.rb +17 -0
- data/lib/sublayer/providers/claude.rb +55 -0
- data/lib/sublayer/providers/gemini.rb +26 -0
- data/lib/sublayer/providers/groq.rb +53 -0
- data/lib/sublayer/providers/local.rb +56 -0
- data/lib/sublayer/providers/open_ai.rb +34 -0
- data/lib/sublayer/version.rb +5 -0
- data/lib/sublayer.rb +30 -0
- data/sublayer.gemspec +39 -0
- metadata +175 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 49d6bc9e7a56b5f5b7cb47aa3c46bfd5c30fe7ff5927b7b391fe370d70b8e4e3
|
4
|
+
data.tar.gz: e54f5e28724f4c8fb8af83b13bd7a71de005266511b275540955376d91a2b6a9
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: df71cd627df49a5b76a631810137fb639e5c936a2a9f2516ccc90ede301428ea00f6fce79045b37539ff89a02f3bca1b69f4fd53816f91c1cd9aee153760d482
|
7
|
+
data.tar.gz: 694f971637fd0609b6a0b07f3a178d7f3b51d42820b51a4e282c360a39b806541f8534976caa320a065e345b02f32069865da9e88de10ae31000d1697cc52dec
|
data/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2023 Sublayer
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,155 @@
|
|
1
|
+
# Sublayer
|
2
|
+
|
3
|
+
A model-agnostic Ruby Generative AI DSL and framework. Provides base classes for
|
4
|
+
building Generators, Actions, Tasks, and Agents that can be used to build AI
|
5
|
+
powered applications in Ruby.
|
6
|
+
|
7
|
+
## Installation
|
8
|
+
|
9
|
+
Install the gem by running the following commands:
|
10
|
+
|
11
|
+
$ bundle
|
12
|
+
$ gem build sublayer.gemspec
|
13
|
+
$ gem install sublayer-0.0.1.gem
|
14
|
+
|
15
|
+
## Choose your AI Model
|
16
|
+
|
17
|
+
Sublayer is model-agnostic and can be used with any AI model. Below are the
|
18
|
+
|
19
|
+
### OpenAI (Default)
|
20
|
+
|
21
|
+
Expects you to have an OpenAI API key set in the `OPENAI_API_KEY` environment variable.
|
22
|
+
|
23
|
+
Visit [OpenAI](https://openai.com/product) to get an API key.
|
24
|
+
|
25
|
+
Usage:
|
26
|
+
```ruby
|
27
|
+
Sublayer.configuration.ai_provider = Sublayer::Providers::OpenAI
|
28
|
+
Sublayer.configuration.ai_model = "gpt-4-turbo-preview"
|
29
|
+
```
|
30
|
+
|
31
|
+
### Gemini
|
32
|
+
|
33
|
+
Expects you to have a Gemini API key set in the `GEMINI_API_KEY` environment variable.
|
34
|
+
|
35
|
+
Visit [Google AI Studio](https://ai.google.dev/) to get an API key.
|
36
|
+
|
37
|
+
Usage:
|
38
|
+
```ruby
|
39
|
+
Sublayer.configuration.ai_provider = Sublayer::Providers::Gemini
|
40
|
+
Sublayer.configuration.ai_model = "gemini-pro"
|
41
|
+
```
|
42
|
+
|
43
|
+
### Claude
|
44
|
+
|
45
|
+
Expect you to have a Claude API key set in the `ANTHROPIC_API_KEY` environment variable.
|
46
|
+
|
47
|
+
Visit [Anthropic](https://anthropic.com/) to get an API key.
|
48
|
+
|
49
|
+
|
50
|
+
Usage:
|
51
|
+
```ruby
|
52
|
+
Sublayer.configuration.ai_provider = Sublayer::Providers::Claude
|
53
|
+
Sublayer.configuration.ai_model ="claude-3-opus-20240229"
|
54
|
+
```
|
55
|
+
|
56
|
+
### Groq
|
57
|
+
|
58
|
+
Expects you to have a Groq API key set in the `GROQ_API_KEY` environment variable.
|
59
|
+
|
60
|
+
Visit [Groq Console](https://console.groq.com/) to get an API key.
|
61
|
+
|
62
|
+
Usage:
|
63
|
+
```ruby
|
64
|
+
Sublayer.configuration.ai_provider = Sublayer::Providers::Groq
|
65
|
+
Sublayer.configuration.ai_model = "mixtral-8x7b-32768"
|
66
|
+
```
|
67
|
+
|
68
|
+
### Local
|
69
|
+
|
70
|
+
Support for running a local model and serving an API on https://localhost:8080
|
71
|
+
|
72
|
+
The simplest way to do this is to download
|
73
|
+
[llamafile](https://github.com/Mozilla-Ocho/llamafile) and download one of the
|
74
|
+
server llamafiles they provide. We've also tested with [this Mixtral
|
75
|
+
model](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO-GGUF) from
|
76
|
+
[Nous
|
77
|
+
Research](https://nousresearch.com/).
|
78
|
+
|
79
|
+
```ruby
|
80
|
+
Sublayer.configuration.ai_provider = Sublayer::Providers::Local
|
81
|
+
Sublayer.configuration.ai_model = "LLaMA_CPP"
|
82
|
+
```
|
83
|
+
|
84
|
+
## Concepts
|
85
|
+
|
86
|
+
### Generators
|
87
|
+
|
88
|
+
Generators are responsible for generating specific outputs based on input data.
|
89
|
+
They focus on a single generation task and do not perform any actions or complex
|
90
|
+
decision-making. Generators are the building blocks of the Sublayer framework.
|
91
|
+
|
92
|
+
Examples (in the /examples/ directory):
|
93
|
+
- CodeFromDescriptionGenerator: Generates code based on a description and the
|
94
|
+
technologies used.
|
95
|
+
- DescriptionFromCodeGenerator: Generates a description of the code passed in to
|
96
|
+
it.
|
97
|
+
- CodeFromBlueprintGenerator: Generates code based on a blueprint, a blueprint
|
98
|
+
description, and a description of the desired code.
|
99
|
+
|
100
|
+
|
101
|
+
### Actions (Coming Soon)
|
102
|
+
|
103
|
+
Actions are responsible for performing specific operations to get inputs for a
|
104
|
+
Generator or based on the generated output from a Generator. They encapsulate a
|
105
|
+
single action and do not involve complex decision-making. Actions are the
|
106
|
+
executable units that bring the generated inputs to life.
|
107
|
+
|
108
|
+
Examples:
|
109
|
+
- SaveToFileAction: Saves generated output to a file.
|
110
|
+
- RunCommandLineCommandAction: Runs a generated command line command.
|
111
|
+
|
112
|
+
### Tasks (Coming Soon)
|
113
|
+
|
114
|
+
Tasks combine Generators and Actions to accomplish a specific goal. They involve
|
115
|
+
a sequence of generation and action steps that may include basic decision-making
|
116
|
+
and flow control. Tasks are the high-level building blocks that define the
|
117
|
+
desired outcome.
|
118
|
+
|
119
|
+
Examples:
|
120
|
+
- ModifyFileContentsTask: Generates new file contents based on the existing
|
121
|
+
contents and a set of rules, and then saves the new contents to the file.
|
122
|
+
|
123
|
+
### Agents (Coming Soon)
|
124
|
+
|
125
|
+
Agents are high-level entities that coordinate and orchestrate multiple Tasks to
|
126
|
+
achieve a broader goal. They involve complex decision-making, monitoring, and
|
127
|
+
adaptation based on the outcomes of the Tasks. Agents are the intelligent
|
128
|
+
supervisors that manage the overall workflow.
|
129
|
+
|
130
|
+
Examples:
|
131
|
+
- CustomerSupportAgent: Handles customer support inquiries by using various
|
132
|
+
Tasks such as understanding the customer's issue, generating appropriate
|
133
|
+
responses, and performing actions like sending emails or creating support
|
134
|
+
tickets.
|
135
|
+
|
136
|
+
## Usage Examples
|
137
|
+
|
138
|
+
There are sample Generators in the /examples/ directory that demonstrate how to
|
139
|
+
build generators using the Sublayer framework. Alternatively below are links to
|
140
|
+
open source projects that are using generators in different ways:
|
141
|
+
|
142
|
+
- [Blueprints](https://blueprints.sublayer.com) - An open source AI code
|
143
|
+
assistant that allows you to capture patterns in your codebase to use as a
|
144
|
+
base for generating new code.
|
145
|
+
|
146
|
+
- [Clag](https://github.com/sublayerapp/clag) - A ruby gem that generates
|
147
|
+
command line commands from a simple description right in your terminal.
|
148
|
+
|
149
|
+
## Development
|
150
|
+
|
151
|
+
TBD
|
152
|
+
|
153
|
+
## Contributing
|
154
|
+
|
155
|
+
TBD
|
data/Rakefile
ADDED
@@ -0,0 +1,30 @@
|
|
1
|
+
class CodeFromBlueprintGenerator < Sublayer::Generators::Base
|
2
|
+
llm_output_adapter type: :single_string,
|
3
|
+
name: "generated_code",
|
4
|
+
description: "The generated code for the description"
|
5
|
+
|
6
|
+
def initialize(blueprint_description:, blueprint_code:, description:)
|
7
|
+
@blueprint_description = blueprint_description
|
8
|
+
@blueprint_code = blueprint_code
|
9
|
+
@description = description
|
10
|
+
end
|
11
|
+
|
12
|
+
def generate
|
13
|
+
super
|
14
|
+
end
|
15
|
+
|
16
|
+
def prompt
|
17
|
+
<<-PROMPT
|
18
|
+
You are an expert programmer and are great at looking at and understanding existing patterns and applying them to new situations.
|
19
|
+
|
20
|
+
The blueprint we're working with is: #{@blueprint_description}.
|
21
|
+
The code for that blueprint is:
|
22
|
+
#{@blueprint_code}
|
23
|
+
|
24
|
+
You need to use the blueprint above and modify it so that it satisfied the following description:
|
25
|
+
#{@description}
|
26
|
+
|
27
|
+
Take a deep breath and think step by step before you start coding.
|
28
|
+
PROMPT
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
class CodeFromDescriptionGenerator < Sublayer::Generators::Base
|
2
|
+
llm_output_adapter type: :single_string,
|
3
|
+
name: "generated_code",
|
4
|
+
description: "The generated code in the requested language"
|
5
|
+
|
6
|
+
def initialize(description:, technologies:)
|
7
|
+
@description = description
|
8
|
+
@technologies = technologies
|
9
|
+
end
|
10
|
+
|
11
|
+
def generate
|
12
|
+
super
|
13
|
+
end
|
14
|
+
|
15
|
+
def prompt
|
16
|
+
<<-PROMPT
|
17
|
+
You are an expert programmer in #{@technologies.join(", ")}.
|
18
|
+
|
19
|
+
You are tasked with writing code using the following technologies: #{@technologies.join(", ")}.
|
20
|
+
|
21
|
+
The description of the task is #{@description}
|
22
|
+
|
23
|
+
Take a deep breath and think step by step before you start coding.
|
24
|
+
PROMPT
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
class DescriptionFromCodeGenerator < Sublayer::Generators::Base
|
2
|
+
llm_output_adapter type: :single_string,
|
3
|
+
name: "code_description",
|
4
|
+
description: "A description of what the code in the file does"
|
5
|
+
|
6
|
+
def initialize(code:)
|
7
|
+
@code = code
|
8
|
+
end
|
9
|
+
|
10
|
+
def generate
|
11
|
+
super
|
12
|
+
end
|
13
|
+
|
14
|
+
def prompt
|
15
|
+
<<-PROMPT
|
16
|
+
You are an experienced software engineer. Below is a chunk of code:
|
17
|
+
|
18
|
+
#{@code}
|
19
|
+
|
20
|
+
Please read the code carefully and provide a high-level description of what this code does, including its purpose, functionalities, and any noteworthy details.
|
21
|
+
PROMPT
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
class InvalidToValidJsonGenerator < Sublayer::Generators::Base
|
2
|
+
llm_output_adapter type: :single_string,
|
3
|
+
name: "valid_json",
|
4
|
+
description: "The valid JSON string"
|
5
|
+
|
6
|
+
def initialize(invalid_json:)
|
7
|
+
@invalid_json = invalid_json
|
8
|
+
end
|
9
|
+
|
10
|
+
def generate
|
11
|
+
super
|
12
|
+
end
|
13
|
+
|
14
|
+
def prompt
|
15
|
+
<<-PROMPT
|
16
|
+
You are an expert in JSON parsing.
|
17
|
+
|
18
|
+
The given string is not a valid JSON: #{@invalid_json}
|
19
|
+
|
20
|
+
Please fix this and produce a valid JSON.
|
21
|
+
PROMPT
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
module Sublayer
|
2
|
+
module Components
|
3
|
+
module OutputAdapters
|
4
|
+
class SingleString
|
5
|
+
attr_reader :name
|
6
|
+
|
7
|
+
def initialize(options)
|
8
|
+
@name = options[:name]
|
9
|
+
@description = options[:description]
|
10
|
+
end
|
11
|
+
|
12
|
+
def to_hash
|
13
|
+
{
|
14
|
+
name: @name,
|
15
|
+
description: @description,
|
16
|
+
parameters: {
|
17
|
+
type: "object",
|
18
|
+
properties: {
|
19
|
+
@name => {
|
20
|
+
type: "string",
|
21
|
+
description: @description
|
22
|
+
}
|
23
|
+
}
|
24
|
+
}
|
25
|
+
}
|
26
|
+
end
|
27
|
+
|
28
|
+
def to_xml
|
29
|
+
<<-XML
|
30
|
+
<tool_description>
|
31
|
+
<tool_name>#{@name}</tool_name>
|
32
|
+
<tool_description>#{@description}</tool_description>
|
33
|
+
<parameters>
|
34
|
+
<name>#{@name}</name>
|
35
|
+
<type>string</type>
|
36
|
+
<description>#{@description}</description>
|
37
|
+
</parameters>
|
38
|
+
</tool_description>
|
39
|
+
XML
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
require "pry"
|
2
|
+
module Sublayer
|
3
|
+
module Generators
|
4
|
+
class Base
|
5
|
+
attr_reader :results
|
6
|
+
|
7
|
+
def self.llm_output_adapter(options)
|
8
|
+
output_adapter = Sublayer::Components::OutputAdapters.create(options)
|
9
|
+
const_set(:OUTPUT_ADAPTER, output_adapter)
|
10
|
+
end
|
11
|
+
|
12
|
+
def generate
|
13
|
+
@results = Sublayer.configuration.ai_provider.call(prompt: prompt, output_adapter: self.class::OUTPUT_ADAPTER)
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# Sublayer.configuration.ai_provider = Sublayer::Providers::Claude
|
2
|
+
# Sublayer.configuration.ai_model ="claude-3-opus-20240229"
|
3
|
+
|
4
|
+
module Sublayer
|
5
|
+
module Providers
|
6
|
+
class Claude
|
7
|
+
def self.call(prompt:, output_adapter:)
|
8
|
+
system_prompt = <<-PROMPT
|
9
|
+
In this environment you have access to a set of tools you can use to answer the user's question.
|
10
|
+
|
11
|
+
You may call them like this:
|
12
|
+
<function_calls>
|
13
|
+
<invoke>
|
14
|
+
<tool_name>$TOOL_NAME</tool_name>
|
15
|
+
<parameters>
|
16
|
+
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
|
17
|
+
...
|
18
|
+
</parameters>
|
19
|
+
</invoke>
|
20
|
+
</function_calls>
|
21
|
+
|
22
|
+
Here are the tools available:
|
23
|
+
<tools>
|
24
|
+
#{output_adapter.to_xml}
|
25
|
+
</tools>
|
26
|
+
|
27
|
+
Respond only with valid xml. The entire response should be wrapped in a <response> tag. Any additional information not inside a tool call should go in a <scratch> tag.
|
28
|
+
PROMPT
|
29
|
+
|
30
|
+
response = HTTParty.post(
|
31
|
+
"https://api.anthropic.com/v1/messages",
|
32
|
+
headers: {
|
33
|
+
"x-api-key": ENV["ANTHROPIC_API_KEY"],
|
34
|
+
"anthropic-version": "2023-06-01",
|
35
|
+
"content-type": "application/json"
|
36
|
+
},
|
37
|
+
body: {
|
38
|
+
model: Sublayer.configuration.ai_model,
|
39
|
+
max_tokens: 4096,
|
40
|
+
system: system_prompt,
|
41
|
+
messages: [ { "role": "user", "content": prompt }]
|
42
|
+
}.to_json
|
43
|
+
)
|
44
|
+
raise "Error generating with Claude, error: #{response.body}" unless response.code == 200
|
45
|
+
|
46
|
+
text_containing_xml = JSON.parse(response.body).dig("content", 0, "text")
|
47
|
+
xml = text_containing_xml.match(/\<response\>(.*?)\<\/response\>/m).to_s
|
48
|
+
response_xml = ::Nokogiri::XML(xml)
|
49
|
+
function_output = response_xml.at_xpath("//response/function_calls/invoke/parameters/#{output_adapter.name}").children.to_s
|
50
|
+
|
51
|
+
return function_output
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# Sublayer.configuration.ai_provider = Sublayer::Providers::Gemini
|
2
|
+
# Sublayer.configuration.ai_model = "gemini-pro"
|
3
|
+
|
4
|
+
module Sublayer
|
5
|
+
module Providers
|
6
|
+
class Gemini
|
7
|
+
def self.call(prompt:, output_adapter:)
|
8
|
+
response = HTTParty.post(
|
9
|
+
"https://generativelanguage.googleapis.com/v1beta/models/#{Sublayer.configuration.ai_model}:generateContent?key=#{ENV['GEMINI_API_KEY']}",
|
10
|
+
body: {
|
11
|
+
tools: { function_declarations: [output_adapter.to_hash] },
|
12
|
+
contents: { role: "user", parts: { text: prompt } }
|
13
|
+
}.to_json,
|
14
|
+
headers: {
|
15
|
+
"Content-Type" => "application/json"
|
16
|
+
})
|
17
|
+
|
18
|
+
part = response.dig('candidates', 0, 'content', 'parts', 0)
|
19
|
+
raise "No function called" unless part['functionCall']
|
20
|
+
|
21
|
+
args = part['functionCall']['args']
|
22
|
+
args[output_adapter.name]
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# Sublayer.configuration.ai_provider = Sublayer::Providers::Groq
|
2
|
+
# Sublayer.configuration.ai_model = "mixtral-8x7b-32768"
|
3
|
+
|
4
|
+
module Sublayer
|
5
|
+
module Providers
|
6
|
+
class Groq
|
7
|
+
def self.call(prompt:, output_adapter:)
|
8
|
+
system_prompt = <<-PROMPT
|
9
|
+
In this environment you have access to a set of tools you can use to answer the user's question.
|
10
|
+
|
11
|
+
You may call them like this:
|
12
|
+
<function_calls>
|
13
|
+
<invoke>
|
14
|
+
<tool_name>$TOOL_NAME</tool_name>
|
15
|
+
<parameters>
|
16
|
+
<#{output_adapter.name}>value</#{output_adapter.name}>
|
17
|
+
...
|
18
|
+
</parameters>
|
19
|
+
</invoke>
|
20
|
+
</function_calls>
|
21
|
+
|
22
|
+
Here are the tools available:
|
23
|
+
<tools>
|
24
|
+
#{output_adapter.to_xml}
|
25
|
+
</tools>
|
26
|
+
|
27
|
+
Respond only with valid xml.
|
28
|
+
The entire response should be wrapped in a <response> tag.
|
29
|
+
Any additional information not inside a tool call should go in a <scratch> tag.
|
30
|
+
PROMPT
|
31
|
+
|
32
|
+
response = HTTParty.post(
|
33
|
+
"https://api.groq.com/openai/v1/chat/completions",
|
34
|
+
headers: {
|
35
|
+
"Authorization": "Bearer #{ENV["GROQ_API_KEY"]}",
|
36
|
+
"Content-Type": "application/json"
|
37
|
+
},
|
38
|
+
body: {
|
39
|
+
"messages": [{"role": "user", "content": "#{system_prompt}\n#{prompt}"}],
|
40
|
+
"model": Sublayer.configuration.ai_model
|
41
|
+
}.to_json
|
42
|
+
)
|
43
|
+
|
44
|
+
text_containing_xml = JSON.parse(response.body).dig("choices", 0, "message", "content")
|
45
|
+
xml = text_containing_xml.match(/\<response\>(.*?)\<\/response\>/m).to_s
|
46
|
+
response_xml = ::Nokogiri::XML(xml)
|
47
|
+
function_output = response_xml.at_xpath("//response/function_calls/invoke/parameters/command").children.to_s
|
48
|
+
|
49
|
+
return function_output
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
# Sublayer.configuration.ai_provider = Sublayer::Providers::Local
|
2
|
+
# Sublayer.configuration.ai_model = "LLaMA_CPP"
|
3
|
+
|
4
|
+
module Sublayer
|
5
|
+
module Providers
|
6
|
+
class Local
|
7
|
+
def initialize(prompt:, output_adapter:)
|
8
|
+
system_prompt = <<-PROMPT
|
9
|
+
In this environment you have access to a set of tools you can use to answer the user's question.
|
10
|
+
|
11
|
+
You may call them like this:
|
12
|
+
<function_calls>
|
13
|
+
<invoke>
|
14
|
+
<tool_name>$TOOL_NAME</tool_name>
|
15
|
+
<parameters>
|
16
|
+
<#{output_adapter.name}>value</#{output_adapter.name}>
|
17
|
+
...
|
18
|
+
</parameters>
|
19
|
+
</invoke>
|
20
|
+
</function_calls>
|
21
|
+
|
22
|
+
Here are the tools available:
|
23
|
+
<tools>
|
24
|
+
#{output_adapter.to_xml}
|
25
|
+
</tools>
|
26
|
+
|
27
|
+
Respond only with valid xml.
|
28
|
+
The entire response should be wrapped in a <response> tag.
|
29
|
+
Any additional information not inside a tool call should go in a <scratch> tag.
|
30
|
+
PROMPT
|
31
|
+
|
32
|
+
response = HTTParty.post(
|
33
|
+
"http://localhost:8080/v1/chat/completions",
|
34
|
+
headers: {
|
35
|
+
"Authorization": "Bearer no-key",
|
36
|
+
"Content-Type": "application/json"
|
37
|
+
},
|
38
|
+
body: {
|
39
|
+
"model": Sublayer.configuration.ai_model,
|
40
|
+
"messages": [
|
41
|
+
{ "role": "system", "content": system_prompt },
|
42
|
+
{ "role": "user", "content": prompt }
|
43
|
+
]
|
44
|
+
}.to_json
|
45
|
+
)
|
46
|
+
|
47
|
+
text_containing_xml = JSON.parse(response.body).dig("choices", 0, "message", "content")
|
48
|
+
xml = text_containing_xml.match(/\<response\>(.*?)\<\/response\>/m).to_s
|
49
|
+
response_xml = ::Nokogiri::XML(xml)
|
50
|
+
function_output = response_xml.at_xpath("//parameters/#{output_adapter.name}").children.to_s
|
51
|
+
|
52
|
+
return function_output
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# Sublayer.configuration.ai_provider = Sublayer::Providers::OpenAI
|
2
|
+
# Sublayer.configuration.ai_model = "gpt-4-turbo-preview"
|
3
|
+
|
4
|
+
module Sublayer
|
5
|
+
module Providers
|
6
|
+
class OpenAI
|
7
|
+
def self.call(prompt:, output_adapter:)
|
8
|
+
client = ::OpenAI::Client.new(access_token: ENV["OPENAI_API_KEY"])
|
9
|
+
|
10
|
+
response = client.chat(
|
11
|
+
parameters: {
|
12
|
+
model: Sublayer.configuration.ai_model,
|
13
|
+
messages: [
|
14
|
+
{
|
15
|
+
"role": "user",
|
16
|
+
"content": prompt
|
17
|
+
}
|
18
|
+
],
|
19
|
+
function_call: { name: output_adapter.name },
|
20
|
+
functions: [
|
21
|
+
output_adapter.to_hash
|
22
|
+
]
|
23
|
+
})
|
24
|
+
|
25
|
+
message = response.dig("choices", 0, "message")
|
26
|
+
raise "No function called" unless message["function_call"]
|
27
|
+
|
28
|
+
function_name = message.dig("function_call", output_adapter.name)
|
29
|
+
args_from_llm = message.dig("function_call", "arguments")
|
30
|
+
JSON.parse(args_from_llm)[output_adapter.name]
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
data/lib/sublayer.rb
ADDED
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "zeitwerk"
|
4
|
+
require 'active_support'
|
5
|
+
require 'active_support/core_ext/hash/indifferent_access'
|
6
|
+
require 'active_support/inflector'
|
7
|
+
require 'ostruct'
|
8
|
+
require "httparty"
|
9
|
+
require "openai"
|
10
|
+
require "nokogiri"
|
11
|
+
require_relative "sublayer/version"
|
12
|
+
|
13
|
+
loader = Zeitwerk::Loader.for_gem
|
14
|
+
loader.inflector.inflect('open_ai' => 'OpenAI')
|
15
|
+
loader.setup
|
16
|
+
|
17
|
+
module Sublayer
|
18
|
+
class Error < StandardError; end
|
19
|
+
|
20
|
+
def self.configuration
|
21
|
+
@configuration ||= OpenStruct.new(
|
22
|
+
ai_provider: Sublayer::Providers::OpenAI,
|
23
|
+
ai_model: "gpt-4-turbo-preview"
|
24
|
+
)
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.configure
|
28
|
+
yield(configuration) if block_given?
|
29
|
+
end
|
30
|
+
end
|
data/sublayer.gemspec
ADDED
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "lib/sublayer/version"
|
4
|
+
|
5
|
+
Gem::Specification.new do |spec|
|
6
|
+
spec.name = "sublayer"
|
7
|
+
spec.version = Sublayer::VERSION
|
8
|
+
spec.authors = ["Scott Werner"]
|
9
|
+
spec.email = ["scott@sublayer.com"]
|
10
|
+
spec.license = "MIT"
|
11
|
+
|
12
|
+
spec.summary = "A model-agnostic Ruby GenerativeAI DSL and Framework"
|
13
|
+
spec.description = "A DSL and framework for building AI powered applications through the use of Generators, Actions, Tasks, and Agents"
|
14
|
+
spec.homepage = "https://www.sublayer.com"
|
15
|
+
spec.required_ruby_version = ">= 2.6.0"
|
16
|
+
|
17
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
18
|
+
|
19
|
+
# Specify which files should be added to the gem when it is released.
|
20
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
21
|
+
spec.files = Dir.chdir(__dir__) do
|
22
|
+
`git ls-files -z`.split("\x0").reject do |f|
|
23
|
+
(File.expand_path(f) == __FILE__) ||
|
24
|
+
f.start_with?(*%w[bin/ test/ spec/ features/ .git .circleci appveyor Gemfile])
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
spec.require_paths = ["lib"]
|
29
|
+
|
30
|
+
spec.add_dependency "ruby-openai"
|
31
|
+
spec.add_dependency "colorize"
|
32
|
+
spec.add_dependency "activesupport"
|
33
|
+
spec.add_dependency "zeitwerk"
|
34
|
+
spec.add_dependency "nokogiri"
|
35
|
+
spec.add_dependency "httparty"
|
36
|
+
|
37
|
+
spec.add_development_dependency "rspec", "~> 3.12"
|
38
|
+
spec.add_development_dependency "pry", " ~> 0.14"
|
39
|
+
end
|
metadata
ADDED
@@ -0,0 +1,175 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: sublayer
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Scott Werner
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2024-03-12 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: ruby-openai
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '0'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - ">="
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '0'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: colorize
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - ">="
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: activesupport
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - ">="
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '0'
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: zeitwerk
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - ">="
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '0'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - ">="
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '0'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: nokogiri
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - ">="
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '0'
|
76
|
+
type: :runtime
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - ">="
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '0'
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: httparty
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - ">="
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '0'
|
90
|
+
type: :runtime
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - ">="
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '0'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: rspec
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - "~>"
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '3.12'
|
104
|
+
type: :development
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - "~>"
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '3.12'
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: pry
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - "~>"
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: '0.14'
|
118
|
+
type: :development
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - "~>"
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '0.14'
|
125
|
+
description: A DSL and framework for building AI powered applications through the
|
126
|
+
use of Generators, Actions, Tasks, and Agents
|
127
|
+
email:
|
128
|
+
- scott@sublayer.com
|
129
|
+
executables: []
|
130
|
+
extensions: []
|
131
|
+
extra_rdoc_files: []
|
132
|
+
files:
|
133
|
+
- LICENSE
|
134
|
+
- README.md
|
135
|
+
- Rakefile
|
136
|
+
- examples/code_from_blueprint_generator.rb
|
137
|
+
- examples/code_from_description_generator.rb
|
138
|
+
- examples/description_from_code_generator.rb
|
139
|
+
- examples/invalid_to_valid_json_generator.rb
|
140
|
+
- lib/sublayer.rb
|
141
|
+
- lib/sublayer/components/output_adapters.rb
|
142
|
+
- lib/sublayer/components/output_adapters/single_string.rb
|
143
|
+
- lib/sublayer/generators/base.rb
|
144
|
+
- lib/sublayer/providers/claude.rb
|
145
|
+
- lib/sublayer/providers/gemini.rb
|
146
|
+
- lib/sublayer/providers/groq.rb
|
147
|
+
- lib/sublayer/providers/local.rb
|
148
|
+
- lib/sublayer/providers/open_ai.rb
|
149
|
+
- lib/sublayer/version.rb
|
150
|
+
- sublayer.gemspec
|
151
|
+
homepage: https://www.sublayer.com
|
152
|
+
licenses:
|
153
|
+
- MIT
|
154
|
+
metadata:
|
155
|
+
homepage_uri: https://www.sublayer.com
|
156
|
+
post_install_message:
|
157
|
+
rdoc_options: []
|
158
|
+
require_paths:
|
159
|
+
- lib
|
160
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
161
|
+
requirements:
|
162
|
+
- - ">="
|
163
|
+
- !ruby/object:Gem::Version
|
164
|
+
version: 2.6.0
|
165
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
166
|
+
requirements:
|
167
|
+
- - ">="
|
168
|
+
- !ruby/object:Gem::Version
|
169
|
+
version: '0'
|
170
|
+
requirements: []
|
171
|
+
rubygems_version: 3.3.26
|
172
|
+
signing_key:
|
173
|
+
specification_version: 4
|
174
|
+
summary: A model-agnostic Ruby GenerativeAI DSL and Framework
|
175
|
+
test_files: []
|