rach 0.2.0 → 0.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +73 -7
- data/lib/rach/client.rb +43 -30
- data/lib/rach/prompt.rb +26 -0
- data/lib/rach/provider/anthropic.rb +140 -0
- data/lib/rach/provider/base.rb +27 -0
- data/lib/rach/provider/openai.rb +54 -0
- data/lib/rach/provider.rb +27 -0
- data/lib/rach/response.rb +13 -20
- data/lib/rach/version.rb +2 -1
- data/lib/rach.rb +6 -0
- metadata +36 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9e0ae465e2ccd6c5f2c56690cd7ee0a7c7fd7dd892df5e08091f678a9c2a9f32
|
4
|
+
data.tar.gz: d9f5054384e3b7d510f7046ff306a99b5a84b1831ca9dbafe1189c750d268b4d
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f34a357311318ab7086b8cdf42e75ad070897bb1b9bfdad8b07394d85e5894a908e58a96ac51774cfe9334e26c29b2d3b7a7b7c7954004b7fedd188a5cb54ace
|
7
|
+
data.tar.gz: 87ae8011f8c9efcfb025d4019e6343764175915b162b68c281be9b528ef05898cbf1c88384dc865ab79be1ec1b88597b81faec288a6196c214d2da9a058317fa
|
data/README.md
CHANGED
@@ -26,9 +26,10 @@ $ gem install rach
|
|
26
26
|
|
27
27
|
```ruby
|
28
28
|
require 'rach'
|
29
|
-
client = Rach::Client.new(access_token: YOUR_OPENAI_API_KEY)
|
29
|
+
client = Rach::Client.new(access_token: YOUR_OPENAI_API_KEY, model: "gpt-4o")
|
30
30
|
response = client.chat("Hello, how are you?")
|
31
31
|
puts response.content
|
32
|
+
# => "Hello! I'm just a computer program, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?"
|
32
33
|
```
|
33
34
|
|
34
35
|
### Conversations
|
@@ -38,7 +39,7 @@ Rach supports stateful conversations with memory:
|
|
38
39
|
```ruby
|
39
40
|
require 'rach'
|
40
41
|
|
41
|
-
client = Rach::Client.new(access_token: YOUR_OPENAI_API_KEY)
|
42
|
+
client = Rach::Client.new(access_token: YOUR_OPENAI_API_KEY, model: "gpt-4o")
|
42
43
|
convo = Rach::Conversation.new
|
43
44
|
convo.system "You teach the German language."
|
44
45
|
convo.user "Translate: There are two birds singing outside my window."
|
@@ -47,17 +48,19 @@ response = client.chat(convo)
|
|
47
48
|
response.content
|
48
49
|
# => "Es gibt zwei Vögel, die draußen vor meinem Fenster singen."
|
49
50
|
|
50
|
-
convo.add_response(response)
|
51
|
-
|
52
51
|
# Continue the conversation...
|
52
|
+
convo.add_response(response)
|
53
53
|
convo.user "What are the verbs in your translation?"
|
54
|
-
client.chat(convo)
|
55
|
-
|
54
|
+
response = client.chat(convo)
|
55
|
+
response.content
|
56
|
+
# => "The verbs in the translation \"Es gibt zwei Vögel, die vor meinem Fenster singen\" are \"gibt\" and \"singen.\""
|
56
57
|
|
57
58
|
# Remove the last message from the conversation history and continue
|
58
59
|
convo.pop
|
59
60
|
convo.user "Explain the structure of your translation."
|
60
|
-
client.chat(convo)
|
61
|
+
response = client.chat(convo)
|
62
|
+
response.content
|
63
|
+
# => "Your last message to me was: \"Translate: There are two birds singing outside my window.\""
|
61
64
|
```
|
62
65
|
|
63
66
|
### Response Formatting
|
@@ -120,6 +123,69 @@ response.tool_calls
|
|
120
123
|
# "function"=>{"name"=>"get_current_weather", "arguments"=>"{\"location\":\"San Francisco, CA\",\"unit\":\"celsius\"}"}}]
|
121
124
|
```
|
122
125
|
|
126
|
+
### Multiple Providers
|
127
|
+
|
128
|
+
Rach supports using multiple providers in your application. You can configure different providers and their parameters when creating a client:
|
129
|
+
|
130
|
+
```ruby
|
131
|
+
client = Rach::Client.new(
|
132
|
+
providers: {
|
133
|
+
openai: {
|
134
|
+
access_token: YOUR_OPENAI_API_KEY
|
135
|
+
},
|
136
|
+
anthropic: {
|
137
|
+
access_token: YOUR_ANTHROPIC_API_KEY
|
138
|
+
}
|
139
|
+
}
|
140
|
+
)
|
141
|
+
|
142
|
+
# Use specific provider
|
143
|
+
response = client.chat("Hello!", model: "gpt-4o")
|
144
|
+
puts response.content
|
145
|
+
|
146
|
+
# Switch to another provider
|
147
|
+
response = client.chat("Hi there!", model: "claude-3-5-sonnet-20241022")
|
148
|
+
puts response.content
|
149
|
+
```
|
150
|
+
|
151
|
+
### Logging
|
152
|
+
|
153
|
+
Rach supports logging of API calls and their parameters. You can provide any logger that responds to the `info` method:
|
154
|
+
|
155
|
+
```ruby
|
156
|
+
require 'logger'
|
157
|
+
|
158
|
+
# Create a logger that writes to STDOUT
|
159
|
+
logger = Logger.new(STDOUT)
|
160
|
+
|
161
|
+
# Pass the logger when creating the client
|
162
|
+
client = Rach::Client.new(
|
163
|
+
access_token: YOUR_OPENAI_API_KEY,
|
164
|
+
model: "gpt-4",
|
165
|
+
logger: logger
|
166
|
+
)
|
167
|
+
|
168
|
+
# Now when you make API calls, parameters will be logged
|
169
|
+
client.chat("Hello!")
|
170
|
+
# [2024-01-20T10:30:00.000Z] INFO: Making API call to openai
|
171
|
+
# [2024-01-20T10:30:00.000Z] INFO: Request parameters: {:model=>"gpt-4", :messages=>[{:role=>"user", :content=>"Hello!"}], :temperature=>1.0}
|
172
|
+
```
|
173
|
+
|
174
|
+
You can also use your own custom logger as long as it responds to the `info` method:
|
175
|
+
|
176
|
+
```ruby
|
177
|
+
class CustomLogger
|
178
|
+
def info(message)
|
179
|
+
puts "[RACH] #{message}"
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
client = Rach::Client.new(
|
184
|
+
access_token: YOUR_OPENAI_API_KEY,
|
185
|
+
model: "gpt-4",
|
186
|
+
logger: CustomLogger.new
|
187
|
+
)
|
188
|
+
```
|
123
189
|
|
124
190
|
## License
|
125
191
|
|
data/lib/rach/client.rb
CHANGED
@@ -1,44 +1,57 @@
|
|
1
1
|
module Rach
|
2
2
|
class Client
|
3
|
-
attr_reader :tracker, :client, :model
|
3
|
+
attr_reader :tracker, :client, :model, :providers
|
4
|
+
attr_accessor :logger
|
4
5
|
|
5
|
-
def initialize(access_token
|
6
|
-
@client = OpenAI::Client.new(log_errors: true, access_token: access_token)
|
7
|
-
@model = model
|
6
|
+
def initialize(providers: nil, access_token: nil, model: nil, logger: nil, **kwargs)
|
8
7
|
@tracker = UsageTracker.new
|
8
|
+
@providers = {}
|
9
|
+
@logger = logger
|
10
|
+
@default_model = model
|
11
|
+
|
12
|
+
if providers
|
13
|
+
setup_providers(providers)
|
14
|
+
elsif access_token && model
|
15
|
+
provider = Provider.for(model)
|
16
|
+
setup_providers({ provider.key => { access_token: access_token } })
|
17
|
+
else
|
18
|
+
raise ArgumentError, "Either (providers) or (access_token AND model) must be provided"
|
19
|
+
end
|
9
20
|
end
|
10
21
|
|
11
|
-
def chat(
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
22
|
+
def chat(input, **options)
|
23
|
+
prompt = input.is_a?(Prompt) ? input : Prompt.new(input, **options)
|
24
|
+
model = prompt.model || @default_model
|
25
|
+
|
26
|
+
raise ArgumentError, "No model specified" unless model
|
27
|
+
|
28
|
+
provider_key = Provider.for(model).key
|
29
|
+
client = @providers[provider_key]
|
30
|
+
|
31
|
+
# Filter out options that are already handled by Prompt
|
32
|
+
filtered_options = options.reject { |k, _| [:model, :temperature, :response_format, :tools].include?(k) }
|
33
|
+
|
34
|
+
request_params = {
|
35
|
+
model:,
|
36
|
+
messages: prompt.to_messages,
|
37
|
+
response_format: prompt.response_format,
|
38
|
+
temperature: prompt.temperature,
|
39
|
+
tools: prompt.tools&.map(&:function_schema),
|
40
|
+
**filtered_options # Pass through remaining options to the underlying client
|
41
|
+
}.compact
|
42
|
+
|
43
|
+
|
44
|
+
response = client.chat(parameters: request_params)
|
45
|
+
tracker.track(response)
|
27
46
|
response
|
28
47
|
end
|
29
48
|
|
30
49
|
private
|
31
50
|
|
32
|
-
def
|
33
|
-
|
34
|
-
|
35
|
-
[
|
36
|
-
when Message
|
37
|
-
[prompt.to_h]
|
38
|
-
when Conversation
|
39
|
-
prompt.to_a
|
40
|
-
else
|
41
|
-
raise ArgumentError, "prompt must be a String, Message, or Conversation"
|
51
|
+
def setup_providers(provider_configs)
|
52
|
+
provider_configs.each do |provider_key, config|
|
53
|
+
provider_class = Provider.get_provider_class(provider_key)
|
54
|
+
@providers[provider_class.key] = provider_class.new(logger: @logger, **config)
|
42
55
|
end
|
43
56
|
end
|
44
57
|
end
|
data/lib/rach/prompt.rb
ADDED
@@ -0,0 +1,26 @@
|
|
1
|
+
module Rach
|
2
|
+
class Prompt
|
3
|
+
attr_reader :content, :model, :temperature, :response_format, :tools
|
4
|
+
|
5
|
+
def initialize(content, model: nil, temperature: 0, response_format: nil, tools: nil)
|
6
|
+
@content = content
|
7
|
+
@model = model
|
8
|
+
@temperature = temperature
|
9
|
+
@response_format = response_format
|
10
|
+
@tools = tools
|
11
|
+
end
|
12
|
+
|
13
|
+
def to_messages
|
14
|
+
case content
|
15
|
+
when String
|
16
|
+
[{ role: "user", content: content }]
|
17
|
+
when Message
|
18
|
+
[content.to_h]
|
19
|
+
when Conversation
|
20
|
+
content.to_a
|
21
|
+
else
|
22
|
+
raise ArgumentError, "content must be a String, Message, or Conversation"
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,140 @@
|
|
1
|
+
require 'securerandom'
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
module Rach
|
5
|
+
module Provider
|
6
|
+
class Anthropic < Base
|
7
|
+
|
8
|
+
def initialize(access_token: nil, logger: nil, **kwargs)
|
9
|
+
@client = create_client(access_token, **kwargs)
|
10
|
+
@logger = logger
|
11
|
+
end
|
12
|
+
|
13
|
+
def chat(**parameters)
|
14
|
+
# Extract system message if present
|
15
|
+
messages = parameters.dig(:parameters, :messages) || []
|
16
|
+
system_message = messages.find { |msg| msg[:role] == "system" }
|
17
|
+
|
18
|
+
# Remove system message from messages array if present
|
19
|
+
messages = messages.reject { |msg| msg[:role] == "system" } if system_message
|
20
|
+
|
21
|
+
# Convert messages to Anthropic format
|
22
|
+
messages = messages.map do |msg|
|
23
|
+
{
|
24
|
+
role: msg[:role] == "assistant" ? "assistant" : "user",
|
25
|
+
content: msg[:content]
|
26
|
+
}
|
27
|
+
end
|
28
|
+
|
29
|
+
temperature = (parameters.dig(:parameters, :temperature) || 1).clamp(0, 1)
|
30
|
+
max_tokens = parameters.dig(:parameters, :max_tokens) || 1024
|
31
|
+
tools = convert_tools(parameters.dig(:parameters, :tools))
|
32
|
+
|
33
|
+
anthropic_params = {
|
34
|
+
model: parameters.dig(:parameters, :model),
|
35
|
+
messages:,
|
36
|
+
temperature:,
|
37
|
+
max_tokens:,
|
38
|
+
tools:,
|
39
|
+
tool_choice: convert_tool_choice(tools),
|
40
|
+
system: system_message&.[](:content)
|
41
|
+
}.compact
|
42
|
+
|
43
|
+
if @logger
|
44
|
+
@logger.info("Making API call to Anthropic")
|
45
|
+
@logger.info("Request parameters: #{anthropic_params.inspect}")
|
46
|
+
end
|
47
|
+
|
48
|
+
raw_response = @client.messages(
|
49
|
+
parameters: anthropic_params.compact
|
50
|
+
# parameters: {
|
51
|
+
# model: anthropic_params[:model],
|
52
|
+
# messages: anthropic_params[:messages],
|
53
|
+
# system: anthropic_params[:system],
|
54
|
+
# temperature: anthropic_params[:temperature],
|
55
|
+
# max_tokens: anthropic_params[:max_tokens],
|
56
|
+
# tools: anthropic_params[:tools],
|
57
|
+
# tool_choice: anthropic_params[:tool_choice],
|
58
|
+
# }.compact
|
59
|
+
)
|
60
|
+
|
61
|
+
if @logger
|
62
|
+
@logger.info("Request to Anthropic: #{JSON.pretty_generate(anthropic_params)}")
|
63
|
+
@logger.info("Response: #{JSON.pretty_generate(raw_response)}")
|
64
|
+
end
|
65
|
+
|
66
|
+
Response.new(
|
67
|
+
id: raw_response["id"],
|
68
|
+
model: raw_response["model"],
|
69
|
+
content: raw_response.dig("content", 0, "text"),
|
70
|
+
tool_calls: convert_tool_calls(raw_response["content"]),
|
71
|
+
usage: {
|
72
|
+
"prompt_tokens" => raw_response["usage"]["input_tokens"],
|
73
|
+
"completion_tokens" => raw_response["usage"]["output_tokens"],
|
74
|
+
"total_tokens" => raw_response["usage"]["input_tokens"] + raw_response["usage"]["output_tokens"]
|
75
|
+
},
|
76
|
+
raw_response: raw_response
|
77
|
+
)
|
78
|
+
end
|
79
|
+
|
80
|
+
def self.supports?(model)
|
81
|
+
model.start_with?("claude")
|
82
|
+
end
|
83
|
+
|
84
|
+
private
|
85
|
+
|
86
|
+
def create_client(access_token, **kwargs)
|
87
|
+
::Anthropic::Client.new(
|
88
|
+
access_token: access_token,
|
89
|
+
**kwargs
|
90
|
+
)
|
91
|
+
end
|
92
|
+
|
93
|
+
def convert_tools(functions)
|
94
|
+
return nil if functions.nil?
|
95
|
+
|
96
|
+
functions.map do |fn|
|
97
|
+
{
|
98
|
+
name: fn[:function][:name],
|
99
|
+
description: fn[:function][:description],
|
100
|
+
input_schema: {
|
101
|
+
type: "object",
|
102
|
+
properties: fn[:function][:parameters][:properties],
|
103
|
+
required: fn[:function][:parameters][:required]
|
104
|
+
}
|
105
|
+
}
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def convert_tool_calls(content)
|
110
|
+
return nil if content.nil?
|
111
|
+
|
112
|
+
tool_calls = content.select { |c| c["type"] == "tool_use" }
|
113
|
+
return nil if tool_calls.empty?
|
114
|
+
|
115
|
+
tool_calls.map do |call|
|
116
|
+
{
|
117
|
+
"id" => call["id"],
|
118
|
+
"type" => "function",
|
119
|
+
"function" => {
|
120
|
+
"name" => call["name"],
|
121
|
+
"arguments" => call["input"].to_json
|
122
|
+
}
|
123
|
+
}
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
def convert_tool_choice(tools)
|
128
|
+
if tools
|
129
|
+
if tools.size == 1
|
130
|
+
{ type: "tool", name: tools.first[:name] }
|
131
|
+
else
|
132
|
+
{ type: "any" }
|
133
|
+
end
|
134
|
+
else
|
135
|
+
nil
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module Rach
|
2
|
+
module Provider
|
3
|
+
class Base
|
4
|
+
def initialize(access_token, **kwargs)
|
5
|
+
@client = create_client(access_token, **kwargs)
|
6
|
+
end
|
7
|
+
|
8
|
+
def self.key
|
9
|
+
name.split("::").last.downcase.to_sym
|
10
|
+
end
|
11
|
+
|
12
|
+
def self.supports?(model)
|
13
|
+
raise NotImplementedError
|
14
|
+
end
|
15
|
+
|
16
|
+
def chat(**parameters)
|
17
|
+
raise NotImplementedError
|
18
|
+
end
|
19
|
+
|
20
|
+
private
|
21
|
+
|
22
|
+
def create_client(access_token, **kwargs)
|
23
|
+
raise NotImplementedError
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
module Rach
|
2
|
+
module Provider
|
3
|
+
class OpenAI < Base
|
4
|
+
|
5
|
+
def initialize(access_token: nil, logger: nil, **kwargs)
|
6
|
+
@client = create_client(access_token, **kwargs)
|
7
|
+
@logger = logger
|
8
|
+
end
|
9
|
+
|
10
|
+
def chat(**parameters)
|
11
|
+
raw_response = @client.chat(**parameters)
|
12
|
+
|
13
|
+
if @logger
|
14
|
+
@logger.info("Request to OpenAI: #{JSON.pretty_generate(parameters)}")
|
15
|
+
@logger.info("Response: #{JSON.pretty_generate(raw_response)}")
|
16
|
+
end
|
17
|
+
|
18
|
+
Response.new(
|
19
|
+
id: raw_response["id"],
|
20
|
+
model: raw_response["model"],
|
21
|
+
created_at: Time.at(raw_response["created"]),
|
22
|
+
content: raw_response.dig("choices", 0, "message", "content"),
|
23
|
+
tool_calls: raw_response.dig("choices", 0, "message", "tool_calls"),
|
24
|
+
usage: raw_response["usage"],
|
25
|
+
system_fingerprint: raw_response["system_fingerprint"],
|
26
|
+
raw_response: raw_response
|
27
|
+
)
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.supports?(model)
|
31
|
+
model.start_with?("gpt", "o1")
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def create_client(access_token, **kwargs)
|
37
|
+
::OpenAI::Client.new(
|
38
|
+
access_token: access_token,
|
39
|
+
log_errors: true,
|
40
|
+
**kwargs
|
41
|
+
)
|
42
|
+
end
|
43
|
+
|
44
|
+
def convert_params(parameters)
|
45
|
+
{
|
46
|
+
parameters: {
|
47
|
+
**parameters[:parameters],
|
48
|
+
tool_choice: parameters.dig(:parameters, :tools) ? "required" : nil
|
49
|
+
}.compact
|
50
|
+
}
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module Rach
|
2
|
+
module Provider
|
3
|
+
|
4
|
+
AVAILABLE_PROVIDERS = [
|
5
|
+
Provider::OpenAI,
|
6
|
+
Provider::Anthropic
|
7
|
+
].to_h { |p| [p.key, p] }.freeze
|
8
|
+
|
9
|
+
def self.for(model)
|
10
|
+
_key, provider_class = AVAILABLE_PROVIDERS.find { |_, p| p.supports?(model) }
|
11
|
+
raise ArgumentError, "Unsupported model: #{model}" unless provider_class
|
12
|
+
|
13
|
+
provider_class
|
14
|
+
end
|
15
|
+
|
16
|
+
def self.create_client(provider_key, access_token)
|
17
|
+
provider_class = get_provider_class(provider_key)
|
18
|
+
provider_class.new(access_token:)
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.get_provider_class(key)
|
22
|
+
provider_class = AVAILABLE_PROVIDERS[key.to_sym]
|
23
|
+
raise ArgumentError, "Unknown provider: #{key}" unless provider_class
|
24
|
+
provider_class
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/rach/response.rb
CHANGED
@@ -1,17 +1,18 @@
|
|
1
1
|
module Rach
|
2
2
|
class Response
|
3
|
-
attr_reader :
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
3
|
+
attr_reader :id, :model, :created_at, :content, :tool_calls, :usage,
|
4
|
+
:system_fingerprint, :raw_response, :request_params
|
5
|
+
|
6
|
+
def initialize(**options)
|
7
|
+
@id = options[:id]
|
8
|
+
@model = options[:model]
|
9
|
+
@created_at = options[:created_at]
|
10
|
+
@content = options[:content]
|
11
|
+
@tool_calls = options[:tool_calls]
|
12
|
+
@usage = options[:usage]
|
13
|
+
@system_fingerprint = options[:system_fingerprint]
|
14
|
+
@raw_response = options[:raw_response]
|
15
|
+
@request_params = options[:request_params]
|
15
16
|
end
|
16
17
|
|
17
18
|
def function_call?
|
@@ -30,10 +31,6 @@ module Rach
|
|
30
31
|
raise ParseError, "Function arguments are not valid JSON"
|
31
32
|
end
|
32
33
|
|
33
|
-
def usage
|
34
|
-
@raw_response["usage"]
|
35
|
-
end
|
36
|
-
|
37
34
|
def prompt_tokens
|
38
35
|
usage&.fetch("prompt_tokens", 0)
|
39
36
|
end
|
@@ -65,10 +62,6 @@ module Rach
|
|
65
62
|
|
66
63
|
private
|
67
64
|
|
68
|
-
def message
|
69
|
-
@raw_response.dig("choices", 0, "message")
|
70
|
-
end
|
71
|
-
|
72
65
|
def to_json
|
73
66
|
JSON.parse(content)
|
74
67
|
rescue JSON::ParserError
|
data/lib/rach/version.rb
CHANGED
data/lib/rach.rb
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'openai'
|
2
|
+
require 'anthropic'
|
2
3
|
|
3
4
|
require_relative "rach/version"
|
4
5
|
require_relative "rach/client"
|
@@ -10,6 +11,11 @@ require_relative "rach/response_format"
|
|
10
11
|
require_relative "rach/conversation"
|
11
12
|
require_relative "rach/usage_tracker"
|
12
13
|
require_relative "rach/function"
|
14
|
+
require_relative "rach/provider/base"
|
15
|
+
require_relative "rach/provider/openai"
|
16
|
+
require_relative "rach/provider/anthropic"
|
17
|
+
require_relative "rach/provider"
|
18
|
+
require_relative "rach/prompt"
|
13
19
|
|
14
20
|
module Rach
|
15
21
|
# Your code goes here...
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rach
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Roger Garcia
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2025-01-25 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rspec
|
@@ -58,6 +58,34 @@ dependencies:
|
|
58
58
|
- - "~>"
|
59
59
|
- !ruby/object:Gem::Version
|
60
60
|
version: '13.0'
|
61
|
+
- !ruby/object:Gem::Dependency
|
62
|
+
name: anthropic
|
63
|
+
requirement: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - "~>"
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: 0.3.0
|
68
|
+
type: :runtime
|
69
|
+
prerelease: false
|
70
|
+
version_requirements: !ruby/object:Gem::Requirement
|
71
|
+
requirements:
|
72
|
+
- - "~>"
|
73
|
+
- !ruby/object:Gem::Version
|
74
|
+
version: 0.3.0
|
75
|
+
- !ruby/object:Gem::Dependency
|
76
|
+
name: json-schema_builder
|
77
|
+
requirement: !ruby/object:Gem::Requirement
|
78
|
+
requirements:
|
79
|
+
- - "~>"
|
80
|
+
- !ruby/object:Gem::Version
|
81
|
+
version: '0.8'
|
82
|
+
type: :runtime
|
83
|
+
prerelease: false
|
84
|
+
version_requirements: !ruby/object:Gem::Requirement
|
85
|
+
requirements:
|
86
|
+
- - "~>"
|
87
|
+
- !ruby/object:Gem::Version
|
88
|
+
version: '0.8'
|
61
89
|
- !ruby/object:Gem::Dependency
|
62
90
|
name: ruby-openai
|
63
91
|
requirement: !ruby/object:Gem::Requirement
|
@@ -87,6 +115,11 @@ files:
|
|
87
115
|
- lib/rach/function.rb
|
88
116
|
- lib/rach/message.rb
|
89
117
|
- lib/rach/message_template.rb
|
118
|
+
- lib/rach/prompt.rb
|
119
|
+
- lib/rach/provider.rb
|
120
|
+
- lib/rach/provider/anthropic.rb
|
121
|
+
- lib/rach/provider/base.rb
|
122
|
+
- lib/rach/provider/openai.rb
|
90
123
|
- lib/rach/response.rb
|
91
124
|
- lib/rach/response_format.rb
|
92
125
|
- lib/rach/usage_tracker.rb
|
@@ -110,7 +143,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
110
143
|
- !ruby/object:Gem::Version
|
111
144
|
version: '0'
|
112
145
|
requirements: []
|
113
|
-
rubygems_version: 3.5.
|
146
|
+
rubygems_version: 3.5.9
|
114
147
|
signing_key:
|
115
148
|
specification_version: 4
|
116
149
|
summary: Orchestrate AI agents like a virtuoso
|