llm.rb 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +85 -24
  3. data/lib/llm/conversation.rb +62 -10
  4. data/lib/llm/core_ext/ostruct.rb +0 -0
  5. data/lib/llm/error.rb +0 -0
  6. data/lib/llm/file.rb +0 -0
  7. data/lib/llm/http_client.rb +0 -0
  8. data/lib/llm/message.rb +1 -1
  9. data/lib/llm/message_queue.rb +18 -11
  10. data/lib/llm/model.rb +7 -0
  11. data/lib/llm/provider.rb +144 -98
  12. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  13. data/lib/llm/providers/anthropic/format.rb +7 -1
  14. data/lib/llm/providers/anthropic/response_parser.rb +0 -0
  15. data/lib/llm/providers/anthropic.rb +31 -15
  16. data/lib/llm/providers/gemini/error_handler.rb +0 -0
  17. data/lib/llm/providers/gemini/format.rb +7 -1
  18. data/lib/llm/providers/gemini/response_parser.rb +0 -0
  19. data/lib/llm/providers/gemini.rb +25 -14
  20. data/lib/llm/providers/ollama/error_handler.rb +0 -0
  21. data/lib/llm/providers/ollama/format.rb +7 -1
  22. data/lib/llm/providers/ollama/response_parser.rb +13 -0
  23. data/lib/llm/providers/ollama.rb +32 -8
  24. data/lib/llm/providers/openai/error_handler.rb +0 -0
  25. data/lib/llm/providers/openai/format.rb +7 -1
  26. data/lib/llm/providers/openai/response_parser.rb +5 -3
  27. data/lib/llm/providers/openai.rb +22 -12
  28. data/lib/llm/providers/voyageai/error_handler.rb +32 -0
  29. data/lib/llm/providers/voyageai/response_parser.rb +13 -0
  30. data/lib/llm/providers/voyageai.rb +44 -0
  31. data/lib/llm/response/completion.rb +0 -0
  32. data/lib/llm/response/embedding.rb +0 -0
  33. data/lib/llm/response.rb +0 -0
  34. data/lib/llm/version.rb +1 -1
  35. data/lib/llm.rb +19 -9
  36. data/llm.gemspec +6 -1
  37. data/share/llm/models/anthropic.yml +35 -0
  38. data/share/llm/models/gemini.yml +35 -0
  39. data/share/llm/models/ollama.yml +155 -0
  40. data/share/llm/models/openai.yml +46 -0
  41. data/spec/anthropic/completion_spec.rb +11 -27
  42. data/spec/anthropic/embedding_spec.rb +25 -0
  43. data/spec/gemini/completion_spec.rb +34 -29
  44. data/spec/gemini/embedding_spec.rb +4 -12
  45. data/spec/llm/conversation_spec.rb +93 -1
  46. data/spec/ollama/completion_spec.rb +7 -16
  47. data/spec/ollama/embedding_spec.rb +14 -5
  48. data/spec/openai/completion_spec.rb +40 -43
  49. data/spec/openai/embedding_spec.rb +4 -12
  50. data/spec/readme_spec.rb +9 -12
  51. data/spec/setup.rb +7 -16
  52. metadata +81 -4
  53. data/lib/llm/lazy_conversation.rb +0 -39
  54. data/spec/llm/lazy_conversation_spec.rb +0 -110
@@ -11,7 +11,6 @@ module LLM
11
11
  include Format
12
12
 
13
13
  HOST = "api.anthropic.com"
14
- DEFAULT_PARAMS = {max_tokens: 1024, model: "claude-3-5-sonnet-20240620"}.freeze
15
14
 
16
15
  ##
17
16
  # @param secret (see LLM::Provider#initialize)
@@ -20,14 +19,17 @@ module LLM
20
19
  end
21
20
 
22
21
  ##
22
+ # Provides an embedding via VoyageAI per
23
+ # [Anthropic's recommendation](https://docs.anthropic.com/en/docs/build-with-claude/embeddings)
23
24
  # @param input (see LLM::Provider#embed)
25
+ # @param [String] token
26
+ # Valid token for the VoyageAI API
27
+ # @param [Hash] params
28
+ # Additional parameters to pass to the API
24
29
  # @return (see LLM::Provider#embed)
25
- def embed(input, **params)
26
- req = Net::HTTP::Post.new ["api.voyageai.com/v1", "embeddings"].join("/")
27
- body = {input:, model: "voyage-2"}.merge!(params)
28
- req = preflight(req, body)
29
- res = request(@http, req)
30
- Response::Embedding.new(res).extend(response_parser)
30
+ def embed(input, token:, **params)
31
+ llm = LLM.voyageai(token)
32
+ llm.embed(input, **params)
31
33
  end
32
34
 
33
35
  ##
@@ -36,20 +38,34 @@ module LLM
36
38
  # @param role (see LLM::Provider#complete)
37
39
  # @return (see LLM::Provider#complete)
38
40
  def complete(prompt, role = :user, **params)
39
- req = Net::HTTP::Post.new ["/v1", "messages"].join("/")
41
+ params = {max_tokens: 1024, model: "claude-3-5-sonnet-20240620"}.merge!(params)
42
+ req = Net::HTTP::Post.new("/v1/messages", headers)
40
43
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
41
- params = DEFAULT_PARAMS.merge(params)
42
- body = {messages: format(messages)}.merge!(params)
43
- req = preflight(req, body)
44
- res = request(@http, req)
44
+ req.body = JSON.dump({messages: format(messages)}.merge!(params))
45
+ res = request(@http, req)
45
46
  Response::Completion.new(res).extend(response_parser)
46
47
  end
47
48
 
49
+ ##
50
+ # @return (see LLM::Provider#assistant_role)
51
+ def assistant_role
52
+ "assistant"
53
+ end
54
+
55
+ ##
56
+ # @return (see LLM::Provider#models)
57
+ def models
58
+ @models ||= load_models!("anthropic")
59
+ end
60
+
48
61
  private
49
62
 
50
- def auth(req)
51
- req["anthropic-version"] = "2023-06-01"
52
- req["x-api-key"] = @secret
63
+ def headers
64
+ {
65
+ "Content-Type" => "application/json",
66
+ "x-api-key" => @secret,
67
+ "anthropic-version" => "2023-06-01"
68
+ }
53
69
  end
54
70
 
55
71
  def response_parser
File without changes
@@ -7,7 +7,13 @@ class LLM::Gemini
7
7
  # The messages to format
8
8
  # @return [Array<Hash>]
9
9
  def format(messages)
10
- messages.map { {role: _1.role, parts: [format_content(_1.content)]} }
10
+ messages.map do
11
+ if Hash === _1
12
+ {role: _1[:role], parts: [format_content(_1[:content])]}
13
+ else
14
+ {role: _1.role, parts: [format_content(_1.content)]}
15
+ end
16
+ end
11
17
  end
12
18
 
13
19
  private
File without changes
@@ -11,7 +11,6 @@ module LLM
11
11
  include Format
12
12
 
13
13
  HOST = "generativelanguage.googleapis.com"
14
- DEFAULT_PARAMS = {model: "gemini-1.5-flash"}.freeze
15
14
 
16
15
  ##
17
16
  # @param secret (see LLM::Provider#initialize)
@@ -23,11 +22,10 @@ module LLM
23
22
  # @param input (see LLM::Provider#embed)
24
23
  # @return (see LLM::Provider#embed)
25
24
  def embed(input, **params)
26
- path = ["/v1beta/models", "text-embedding-004"].join("/")
27
- req = Net::HTTP::Post.new [path, "embedContent"].join(":")
28
- body = {content: {parts: [{text: input}]}}
29
- req = preflight(req, body)
30
- res = request @http, req
25
+ path = ["/v1beta/models/text-embedding-004", "embedContent?key=#{@secret}"].join(":")
26
+ req = Net::HTTP::Post.new(path, headers)
27
+ req.body = JSON.dump({content: {parts: [{text: input}]}})
28
+ res = request(@http, req)
31
29
  Response::Embedding.new(res).extend(response_parser)
32
30
  end
33
31
 
@@ -37,20 +35,33 @@ module LLM
37
35
  # @param role (see LLM::Provider#complete)
38
36
  # @return (see LLM::Provider#complete)
39
37
  def complete(prompt, role = :user, **params)
40
- params = DEFAULT_PARAMS.merge(params)
41
- path = ["/v1beta/models", params.delete(:model)].join("/")
42
- req = Net::HTTP::Post.new [path, "generateContent"].join(":")
38
+ params = {model: "gemini-1.5-flash"}.merge!(params)
39
+ path = ["/v1beta/models/#{params.delete(:model)}", "generateContent?key=#{@secret}"].join(":")
40
+ req = Net::HTTP::Post.new(path, headers)
43
41
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
44
- body = {contents: format(messages)}
45
- req = preflight(req, body)
46
- res = request(@http, req)
42
+ req.body = JSON.dump({contents: format(messages)})
43
+ res = request(@http, req)
47
44
  Response::Completion.new(res).extend(response_parser)
48
45
  end
49
46
 
47
+ ##
48
+ # @return (see LLM::Provider#assistant_role)
49
+ def assistant_role
50
+ "model"
51
+ end
52
+
53
+ ##
54
+ # @return (see LLM::Provider#models)
55
+ def models
56
+ @models ||= load_models!("gemini")
57
+ end
58
+
50
59
  private
51
60
 
52
- def auth(req)
53
- req.path.replace [req.path, URI.encode_www_form(key: @secret)].join("?")
61
+ def headers
62
+ {
63
+ "Content-Type" => "application/json"
64
+ }
54
65
  end
55
66
 
56
67
  def response_parser
File without changes
@@ -7,7 +7,13 @@ class LLM::Ollama
7
7
  # The messages to format
8
8
  # @return [Array<Hash>]
9
9
  def format(messages)
10
- messages.map { {role: _1.role, content: format_content(_1.content)} }
10
+ messages.map do
11
+ if Hash === _1
12
+ {role: _1[:role], content: format_content(_1[:content])}
13
+ else
14
+ {role: _1.role, content: format_content(_1.content)}
15
+ end
16
+ end
11
17
  end
12
18
 
13
19
  private
@@ -2,6 +2,19 @@
2
2
 
3
3
  class LLM::Ollama
4
4
  module ResponseParser
5
+ ##
6
+ # @param [Hash] body
7
+ # The response body from the LLM provider
8
+ # @return [Hash]
9
+ def parse_embedding(body)
10
+ {
11
+ model: body["model"],
12
+ embeddings: body["data"].map { _1["embedding"] },
13
+ prompt_tokens: body.dig("usage", "prompt_tokens"),
14
+ total_tokens: body.dig("usage", "total_tokens")
15
+ }
16
+ end
17
+
5
18
  ##
6
19
  # @param [Hash] body
7
20
  # The response body from the LLM provider
@@ -11,7 +11,6 @@ module LLM
11
11
  include Format
12
12
 
13
13
  HOST = "localhost"
14
- DEFAULT_PARAMS = {model: "llama3.2", stream: false}.freeze
15
14
 
16
15
  ##
17
16
  # @param secret (see LLM::Provider#initialize)
@@ -19,25 +18,50 @@ module LLM
19
18
  super(secret, host: HOST, port: 11434, ssl: false, **)
20
19
  end
21
20
 
21
+ ##
22
+ # @param input (see LLM::Provider#embed)
23
+ # @return (see LLM::Provider#embed)
24
+ def embed(input, **params)
25
+ params = {model: "llama3.2"}.merge!(params)
26
+ req = Net::HTTP::Post.new("/v1/embeddings", headers)
27
+ req.body = JSON.dump({input:}.merge!(params))
28
+ res = request(@http, req)
29
+ Response::Embedding.new(res).extend(response_parser)
30
+ end
31
+
22
32
  ##
23
33
  # @see https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion Ollama docs
24
34
  # @param prompt (see LLM::Provider#complete)
25
35
  # @param role (see LLM::Provider#complete)
26
36
  # @return (see LLM::Provider#complete)
27
37
  def complete(prompt, role = :user, **params)
28
- req = Net::HTTP::Post.new ["/api", "chat"].join("/")
38
+ params = {model: "llama3.2", stream: false}.merge!(params)
39
+ req = Net::HTTP::Post.new("/api/chat", headers)
29
40
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
30
- params = DEFAULT_PARAMS.merge(params)
31
- body = {messages: messages.map(&:to_h)}.merge!(params)
32
- req = preflight(req, body)
33
- res = request(@http, req)
41
+ req.body = JSON.dump({messages: messages.map(&:to_h)}.merge!(params))
42
+ res = request(@http, req)
34
43
  Response::Completion.new(res).extend(response_parser)
35
44
  end
36
45
 
46
+ ##
47
+ # @return (see LLM::Provider#assistant_role)
48
+ def assistant_role
49
+ "assistant"
50
+ end
51
+
52
+ ##
53
+ # @return (see LLM::Provider#models)
54
+ def models
55
+ @models ||= load_models!("ollama")
56
+ end
57
+
37
58
  private
38
59
 
39
- def auth(req)
40
- req["Authorization"] = "Bearer #{@secret}"
60
+ def headers
61
+ {
62
+ "Content-Type" => "application/json",
63
+ "Authorization" => "Bearer #{@secret}"
64
+ }
41
65
  end
42
66
 
43
67
  def response_parser
File without changes
@@ -7,7 +7,13 @@ class LLM::OpenAI
7
7
  # The messages to format
8
8
  # @return [Array<Hash>]
9
9
  def format(messages)
10
- messages.map { {role: _1.role, content: format_content(_1.content)} }
10
+ messages.map do
11
+ if Hash === _1
12
+ {role: _1[:role], content: format_content(_1[:content])}
13
+ else
14
+ {role: _1.role, content: format_content(_1.content)}
15
+ end
16
+ end
11
17
  end
12
18
 
13
19
  private
@@ -2,12 +2,14 @@
2
2
 
3
3
  class LLM::OpenAI
4
4
  module ResponseParser
5
+ ##
6
+ # @param [Hash] body
7
+ # The response body from the LLM provider
8
+ # @return [Hash]
5
9
  def parse_embedding(body)
6
10
  {
7
11
  model: body["model"],
8
- embeddings: body.dig("data").map do |data|
9
- data["embedding"]
10
- end,
12
+ embeddings: body["data"].map { _1["embedding"] },
11
13
  prompt_tokens: body.dig("usage", "prompt_tokens"),
12
14
  total_tokens: body.dig("usage", "total_tokens")
13
15
  }
@@ -11,7 +11,6 @@ module LLM
11
11
  include Format
12
12
 
13
13
  HOST = "api.openai.com"
14
- DEFAULT_PARAMS = {model: "gpt-4o-mini"}.freeze
15
14
 
16
15
  ##
17
16
  # @param secret (see LLM::Provider#initialize)
@@ -23,10 +22,9 @@ module LLM
23
22
  # @param input (see LLM::Provider#embed)
24
23
  # @return (see LLM::Provider#embed)
25
24
  def embed(input, **params)
26
- req = Net::HTTP::Post.new ["/v1", "embeddings"].join("/")
27
- body = {input:, model: "text-embedding-3-small"}.merge!(params)
28
- req = preflight(req, body)
29
- res = request @http, req
25
+ req = Net::HTTP::Post.new("/v1/embeddings", headers)
26
+ req.body = JSON.dump({input:, model: "text-embedding-3-small"}.merge!(params))
27
+ res = request(@http, req)
30
28
  Response::Embedding.new(res).extend(response_parser)
31
29
  end
32
30
 
@@ -36,19 +34,31 @@ module LLM
36
34
  # @param role (see LLM::Provider#complete)
37
35
  # @return (see LLM::Provider#complete)
38
36
  def complete(prompt, role = :user, **params)
39
- req = Net::HTTP::Post.new ["/v1", "chat", "completions"].join("/")
37
+ params = {model: "gpt-4o-mini"}.merge!(params)
38
+ req = Net::HTTP::Post.new("/v1/chat/completions", headers)
40
39
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
41
- params = DEFAULT_PARAMS.merge(params)
42
- body = {messages: format(messages)}.merge!(params)
43
- req = preflight(req, body)
44
- res = request(@http, req)
40
+ req.body = JSON.dump({messages: format(messages)}.merge!(params))
41
+ res = request(@http, req)
45
42
  Response::Completion.new(res).extend(response_parser)
46
43
  end
47
44
 
45
+ ##
46
+ # @return (see LLM::Provider#assistant_role)
47
+ def assistant_role
48
+ "assistant"
49
+ end
50
+
51
+ def models
52
+ @models ||= load_models!("openai")
53
+ end
54
+
48
55
  private
49
56
 
50
- def auth(req)
51
- req["Authorization"] = "Bearer #{@secret}"
57
+ def headers
58
+ {
59
+ "Content-Type" => "application/json",
60
+ "Authorization" => "Bearer #{@secret}"
61
+ }
52
62
  end
53
63
 
54
64
  def response_parser
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::VoyageAI
4
+ class ErrorHandler
5
+ ##
6
+ # @return [Net::HTTPResponse]
7
+ # Non-2XX response from the server
8
+ attr_reader :res
9
+
10
+ ##
11
+ # @param [Net::HTTPResponse] res
12
+ # The response from the server
13
+ # @return [LLM::OpenAI::ErrorHandler]
14
+ def initialize(res)
15
+ @res = res
16
+ end
17
+
18
+ ##
19
+ # @raise [LLM::Error]
20
+ # Raises a subclass of {LLM::Error LLM::Error}
21
+ def raise_error!
22
+ case res
23
+ when Net::HTTPUnauthorized
24
+ raise LLM::Error::Unauthorized.new { _1.response = res }, "Authentication error"
25
+ when Net::HTTPTooManyRequests
26
+ raise LLM::Error::RateLimit.new { _1.response = res }, "Too many requests"
27
+ else
28
+ raise LLM::Error::BadResponse.new { _1.response = res }, "Unexpected response"
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::VoyageAI
4
+ module ResponseParser
5
+ def parse_embedding(body)
6
+ {
7
+ model: body["model"],
8
+ embeddings: body["data"].map { _1["embedding"] },
9
+ total_tokens: body.dig("usage", "total_tokens")
10
+ }
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ class VoyageAI < Provider
5
+ require_relative "voyageai/error_handler"
6
+ require_relative "voyageai/response_parser"
7
+ HOST = "api.voyageai.com"
8
+
9
+ ##
10
+ # @param secret (see LLM::Provider#initialize)
11
+ def initialize(secret, **)
12
+ super(secret, host: HOST, **)
13
+ end
14
+
15
+ ##
16
+ # Provides an embedding via VoyageAI per
17
+ # [Anthropic's recommendation](https://docs.anthropic.com/en/docs/build-with-claude/embeddings)
18
+ # @param input (see LLM::Provider#embed)
19
+ # @return (see LLM::Provider#embed)
20
+ def embed(input, **params)
21
+ req = Net::HTTP::Post.new("/v1/embeddings", headers)
22
+ req.body = JSON.dump({input:, model: "voyage-2"}.merge!(params))
23
+ res = request(@http, req)
24
+ Response::Embedding.new(res).extend(response_parser)
25
+ end
26
+
27
+ private
28
+
29
+ def headers
30
+ {
31
+ "Content-Type" => "application/json",
32
+ "Authorization" => "Bearer #{@secret}"
33
+ }
34
+ end
35
+
36
+ def response_parser
37
+ LLM::VoyageAI::ResponseParser
38
+ end
39
+
40
+ def error_handler
41
+ LLM::VoyageAI::ErrorHandler
42
+ end
43
+ end
44
+ end
File without changes
File without changes
data/lib/llm/response.rb CHANGED
File without changes
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.1.0"
4
+ VERSION = "0.2.1"
5
5
  end
data/lib/llm.rb CHANGED
@@ -6,9 +6,10 @@ module LLM
6
6
  require_relative "llm/message"
7
7
  require_relative "llm/response"
8
8
  require_relative "llm/file"
9
+ require_relative "llm/model"
9
10
  require_relative "llm/provider"
10
11
  require_relative "llm/conversation"
11
- require_relative "llm/lazy_conversation"
12
+ require_relative "llm/message_queue"
12
13
  require_relative "llm/core_ext/ostruct"
13
14
 
14
15
  module_function
@@ -16,32 +17,41 @@ module LLM
16
17
  ##
17
18
  # @param secret (see LLM::Anthropic#initialize)
18
19
  # @return (see LLM::Anthropic#initialize)
19
- def anthropic(secret, **)
20
+ def anthropic(secret, options = {})
20
21
  require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
21
- LLM::Anthropic.new(secret, **)
22
+ require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
23
+ LLM::Anthropic.new(secret, **options)
24
+ end
25
+
26
+ ##
27
+ # @param secret (see LLM::VoyageAI#initialize)
28
+ # @return (see LLM::VoyageAI#initialize)
29
+ def voyageai(secret, options = {})
30
+ require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
31
+ LLM::VoyageAI.new(secret, **options)
22
32
  end
23
33
 
24
34
  ##
25
35
  # @param secret (see LLM::Gemini#initialize)
26
36
  # @return (see LLM::Gemini#initialize)
27
- def gemini(secret, **)
37
+ def gemini(secret, options = {})
28
38
  require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
29
- LLM::Gemini.new(secret, **)
39
+ LLM::Gemini.new(secret, **options)
30
40
  end
31
41
 
32
42
  ##
33
43
  # @param host (see LLM::Ollama#initialize)
34
44
  # @return (see LLM::Ollama#initialize)
35
- def ollama(secret)
45
+ def ollama(secret, options = {})
36
46
  require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
37
- LLM::Ollama.new(secret)
47
+ LLM::Ollama.new(secret, **options)
38
48
  end
39
49
 
40
50
  ##
41
51
  # @param secret (see LLM::OpenAI#initialize)
42
52
  # @return (see LLM::OpenAI#initialize)
43
- def openai(secret, **)
53
+ def openai(secret, options = {})
44
54
  require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
45
- LLM::OpenAI.new(secret, **)
55
+ LLM::OpenAI.new(secret, **options)
46
56
  end
47
57
  end
data/llm.gemspec CHANGED
@@ -25,16 +25,21 @@ Gem::Specification.new do |spec|
25
25
  "README.md", "LICENSE.txt",
26
26
  "lib/*.rb", "lib/**/*.rb",
27
27
  "spec/*.rb", "spec/**/*.rb",
28
- "llm.gemspec"
28
+ "share/llm/models/*.yml", "llm.gemspec"
29
29
  ]
30
30
  spec.require_paths = ["lib"]
31
31
 
32
32
  spec.add_runtime_dependency "net-http", "~> 0.6.0"
33
33
  spec.add_runtime_dependency "json"
34
+ spec.add_runtime_dependency "yaml"
34
35
 
35
36
  spec.add_development_dependency "webmock", "~> 3.24.0"
36
37
  spec.add_development_dependency "yard", "~> 0.9.37"
37
38
  spec.add_development_dependency "kramdown", "~> 2.4"
38
39
  spec.add_development_dependency "webrick", "~> 1.8"
39
40
  spec.add_development_dependency "test-cmd.rb", "~> 0.12.0"
41
+ spec.add_development_dependency "rake", "~> 13.0"
42
+ spec.add_development_dependency "rspec", "~> 3.0"
43
+ spec.add_development_dependency "standard", "~> 1.40"
44
+ spec.add_development_dependency "vcr", "~> 6.0"
40
45
  end
@@ -0,0 +1,35 @@
1
+ claude-3-7-sonnet-20250219:
2
+ name: Claude 3.7 Sonnet
3
+ parameters: Unknown
4
+ description: Most intelligent Claude model with extended thinking and high capability
5
+ to_param: claude-3-7-sonnet-20250219
6
+
7
+ claude-3-5-sonnet-20241022:
8
+ name: Claude 3.5 Sonnet (v2)
9
+ parameters: Unknown
10
+ description: High intelligence and capability; upgraded from previous Sonnet
11
+ to_param: claude-3-5-sonnet-20241022
12
+
13
+ claude-3-5-sonnet-20240620:
14
+ name: Claude 3.5 Sonnet
15
+ parameters: Unknown
16
+ description: Intelligent and capable general-purpose model
17
+ to_param: claude-3-5-sonnet-20240620
18
+
19
+ claude-3-5-haiku-20241022:
20
+ name: Claude 3.5 Haiku
21
+ parameters: Unknown
22
+ description: Blazing fast model for low-latency text generation
23
+ to_param: claude-3-5-haiku-20241022
24
+
25
+ claude-3-opus-20240229:
26
+ name: Claude 3 Opus
27
+ parameters: Unknown
28
+ description: Top-level intelligence, fluency, and reasoning for complex tasks
29
+ to_param: claude-3-opus-20240229
30
+
31
+ claude-3-haiku-20240307:
32
+ name: Claude 3 Haiku
33
+ parameters: Unknown
34
+ description: Fastest and most compact Claude model for near-instant responsiveness
35
+ to_param: claude-3-haiku-20240307
@@ -0,0 +1,35 @@
1
+ gemini-2.5-pro-exp-03-25:
2
+ name: Gemini
3
+ parameters: Unknown
4
+ description: Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more
5
+ to_param: gemini-2.5-pro-exp-03-25
6
+
7
+ gemini-2.0-flash:
8
+ name: Gemini
9
+ parameters: Unknown
10
+ description: Next generation features, speed, thinking, realtime streaming, and multimodal generation
11
+ to_param: gemini-2.0-flash
12
+
13
+ gemini-2.0-flash-lite:
14
+ name: Gemini
15
+ parameters: Unknown
16
+ description: Cost efficiency and low latency
17
+ to_param: gemini-2.0-flash-lite
18
+
19
+ gemini-1.5-flash:
20
+ name: Gemini
21
+ parameters: Unknown
22
+ description: Fast and versatile performance across a diverse variety of tasks
23
+ to_param: gemini-1.5-flash
24
+
25
+ gemini-1.5-flash-8b:
26
+ name: Gemini
27
+ parameters: 8B
28
+ description: High volume and lower intelligence tasks
29
+ to_param: gemini-1.5-flash-8b
30
+
31
+ gemini-1.5-pro:
32
+ name: Gemini
33
+ parameters: Unknown
34
+ description: Complex reasoning tasks requiring more intelligence
35
+ to_param: gemini-1.5-pro