spectre_ai 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '03910c4dd38bf7a272fab91c0e9d1431d0f9bdc593abe62035271e9e07f22e89'
4
- data.tar.gz: 0f1e927a42785d2f4735e4adf9140efad6815247fca6ce6270ac10ef286ae217
3
+ metadata.gz: 2a0cd0a25dd4345c62af319d8669a3702ce2e505299c01fbf43e4a94cb3b3109
4
+ data.tar.gz: 544e5bb1462d2d9477601d6a581d455a30e5b0a5f107ec1264608c0f9b713307
5
5
  SHA512:
6
- metadata.gz: 48e634fedb903de30ff0acba0b1de5b725fccb2ac0882bf8890740100312c92ae48c6c182612554cdd6968b34df5f6c7958a62caf19492ae29a42d8e084e1458
7
- data.tar.gz: b7a382fb583431eff8715147df8f5251e30d528b0a77ecc6c2ecf68b324f57f76e9c6a2634da07f55159713d9cd58684e212b75fee7f5a7e20e56b437725dd55
6
+ metadata.gz: cdb1062a23c35d4df2ca354eeff42ebd28b5d2ddef3a710ee032917a5a1bad811645f61dc8d5e5914b2fc16cd319768a3bd6ec84f3e9bc4ed50c22af1a073855
7
+ data.tar.gz: ecca27c2bd3f7ada23f744dd93017b488fbf193652b1bbdf5c3f47110bfd7ae7c042a66398d53592bc3c59d79031c0f46987b12551ebc1b4f1923f215efa55ce
data/CHANGELOG.md CHANGED
@@ -280,3 +280,59 @@ Key Benefits:\
280
280
  ### Behavior Notes
281
281
 
282
282
  - Gemini OpenAI-compatible chat endpoint requires that the last message in `messages` has role 'user'. Spectre raises an ArgumentError if this requirement is not met to prevent 400 INVALID_ARGUMENT errors from the API.
283
+
284
+
285
+ # Changelog for Version 2.1.0
286
+
287
+ **Release Date:** [12th Nov 2025]
288
+
289
+ ### New Provider: OpenRouter
290
+
291
+ - Added Spectre::Openrouter provider with:
292
+ - Chat Completions via `https://openrouter.ai/api/v1/chat/completions` (OpenAI-compatible interface).
293
+ - Embeddings via `https://openrouter.ai/api/v1/embeddings`.
294
+ - Provider configuration: `Spectre.setup { |c| c.openrouter { |o| o.api_key = ENV['OPENROUTER_API_KEY']; o.referer = 'https://your.app' ; o.app_title = 'Your App' } }`.
295
+ - Optional headers supported: `HTTP-Referer` and `X-Title` (as recommended by OpenRouter).
296
+ - Finish reasons handled per OpenRouter docs: `stop`, `tool_calls`/`function_call`, `length`/`model_length`, `content_filter`, `error`.
297
+ - Refusal handling (raises an error if the model returns a refusal).
298
+
299
+ ### Structured Outputs (json_schema)
300
+
301
+ - OpenRouter completions support OpenAI-style `response_format: { type: 'json_schema', json_schema: ... }`.
302
+ - Note for schema authors: many OpenRouter-backed providers require a strict schema:
303
+ - Include a non-empty `required` array listing all keys in `properties`.
304
+ - Consider `strict: true` and `additionalProperties: false` for best adherence.
305
+
306
+ ### Tests
307
+
308
+ - Added RSpec tests for `Spectre::Openrouter::Completions` and `Spectre::Openrouter::Embeddings` covering:
309
+ - Success responses, error propagation, JSON parse errors.
310
+ - Finish reasons and refusal handling.
311
+ - Request body formation (max_tokens, tools, response_format.json_schema).
312
+
313
+ ### Breaking Changes
314
+
315
+ - Unified `max_tokens` option across providers:
316
+ - Now accepted only as a top-level argument: `... Completions.create(messages: ..., max_tokens: 256)`.
317
+ - Removed support for provider-scoped forms like `openai: { max_tokens: ... }`, `openrouter: { max_tokens: ... }`, `claude: { max_tokens: ... }`, `gemini: { max_tokens: ... }`.
318
+
319
+ ### Usage Examples
320
+
321
+ - OpenRouter (completions):
322
+ ```ruby
323
+ Spectre.setup do |c|
324
+ c.default_llm_provider = :openrouter
325
+ c.openrouter { |o| o.api_key = ENV['OPENROUTER_API_KEY'] }
326
+ end
327
+
328
+ Spectre::Openrouter::Completions.create(
329
+ messages: [ { role: 'user', content: 'Hello!' } ],
330
+ model: 'openai/gpt-4o-mini',
331
+ max_tokens: 256
332
+ )
333
+ ```
334
+
335
+ - OpenRouter (embeddings):
336
+ ```ruby
337
+ Spectre::Openrouter::Embeddings.create('some text', model: 'text-embedding-3-small')
338
+ ```
@@ -21,7 +21,7 @@ module Spectre
21
21
  # @param json_schema [Hash, nil] Optional JSON Schema; when provided, it will be converted into a tool with input_schema and forced via tool_choice unless overridden
22
22
  # @param tools [Array<Hash>, nil] An optional array of tool definitions for function calling
23
23
  # @param tool_choice [Hash, nil] Optional tool_choice to force a specific tool use (e.g., { type: 'tool', name: 'record_summary' })
24
- # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. For Claude, max_tokens can be passed in the claude hash.
24
+ # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. Provide max_tokens at the top level only.
25
25
  # @return [Hash] The parsed response including any tool calls or content
26
26
  # @raise [APIKeyNotConfiguredError] If the API key is not set
27
27
  # @raise [RuntimeError] For general API errors or unexpected issues
@@ -43,7 +43,7 @@ module Spectre
43
43
  'anthropic-version' => ANTHROPIC_VERSION
44
44
  })
45
45
 
46
- max_tokens = args.dig(:claude, :max_tokens) || 1024
46
+ max_tokens = args[:max_tokens] || 1024
47
47
  request.body = generate_body(messages, model, json_schema, max_tokens, tools, tool_choice).to_json
48
48
  response = http.request(request)
49
49
 
@@ -18,7 +18,7 @@ module Spectre
18
18
  # @param model [String] The model to be used for generating completions, defaults to DEFAULT_MODEL
19
19
  # @param json_schema [Hash, nil] An optional JSON schema to enforce structured output (OpenAI-compatible "response_format")
20
20
  # @param tools [Array<Hash>, nil] An optional array of tool definitions for function calling
21
- # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. For Gemini, max_tokens can be passed in the gemini hash.
21
+ # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. Provide max_tokens at the top level only.
22
22
  # @return [Hash] The parsed response including any function calls or content
23
23
  # @raise [APIKeyNotConfiguredError] If the API key is not set
24
24
  # @raise [RuntimeError] For general API errors or unexpected issues
@@ -39,7 +39,7 @@ module Spectre
39
39
  'Authorization' => "Bearer #{api_key}"
40
40
  })
41
41
 
42
- max_tokens = args.dig(:gemini, :max_tokens)
42
+ max_tokens = args[:max_tokens]
43
43
  request.body = generate_body(messages, model, json_schema, max_tokens, tools).to_json
44
44
  response = http.request(request)
45
45
 
@@ -17,7 +17,7 @@ module Spectre
17
17
  # @param model [String] The model to be used for generating completions, defaults to DEFAULT_MODEL
18
18
  # @param json_schema [Hash, nil] An optional JSON schema to enforce structured output
19
19
  # @param tools [Array<Hash>, nil] An optional array of tool definitions for function calling
20
- # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. For OpenAI, max_tokens can be passed in the openai hash.
20
+ # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. Provide max_tokens at the top level only.
21
21
  # @return [Hash] The parsed response including any function calls or content
22
22
  # @raise [APIKeyNotConfiguredError] If the API key is not set
23
23
  # @raise [RuntimeError] For general API errors or unexpected issues
@@ -38,7 +38,7 @@ module Spectre
38
38
  'Authorization' => "Bearer #{api_key}"
39
39
  })
40
40
 
41
- max_tokens = args.dig(:openai, :max_tokens)
41
+ max_tokens = args[:max_tokens]
42
42
  request.body = generate_body(messages, model, json_schema, max_tokens, tools).to_json
43
43
  response = http.request(request)
44
44
 
@@ -0,0 +1,107 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+ require 'uri'
6
+
7
+ module Spectre
8
+ module Openrouter
9
+ class Completions
10
+ API_URL = 'https://openrouter.ai/api/v1/chat/completions'
11
+ DEFAULT_MODEL = 'openai/gpt-4o-mini'
12
+ DEFAULT_TIMEOUT = 60
13
+
14
+ # Generate a completion based on user messages and optional tools
15
+ #
16
+ # @param messages [Array<Hash>] The conversation messages, each with a role and content
17
+ # @param model [String] The model to be used for generating completions
18
+ # @param json_schema [Hash, nil] An optional JSON schema to enforce structured output (OpenAI-compatible)
19
+ # @param tools [Array<Hash>, nil] An optional array of tool definitions for function calling
20
+ # @param args [Hash, nil] optional arguments like read_timeout and open_timeout. Provide max_tokens at the top level only.
21
+ # @return [Hash] The parsed response including any tool calls or content
22
+ # @raise [APIKeyNotConfiguredError] If the API key is not set
23
+ # @raise [RuntimeError] For general API errors or unexpected issues
24
+ def self.create(messages:, model: DEFAULT_MODEL, json_schema: nil, tools: nil, **args)
25
+ cfg = Spectre.openrouter_configuration
26
+ api_key = cfg&.api_key
27
+ raise APIKeyNotConfiguredError, 'API key is not configured' unless api_key
28
+
29
+ validate_messages!(messages)
30
+
31
+ uri = URI(API_URL)
32
+ http = Net::HTTP.new(uri.host, uri.port)
33
+ http.use_ssl = true
34
+ http.read_timeout = args.fetch(:read_timeout, DEFAULT_TIMEOUT)
35
+ http.open_timeout = args.fetch(:open_timeout, DEFAULT_TIMEOUT)
36
+
37
+ headers = {
38
+ 'Content-Type' => 'application/json',
39
+ 'Authorization' => "Bearer #{api_key}"
40
+ }
41
+ headers['HTTP-Referer'] = cfg.referer if cfg.respond_to?(:referer) && cfg.referer
42
+ headers['X-Title'] = cfg.app_title if cfg.respond_to?(:app_title) && cfg.app_title
43
+
44
+ request = Net::HTTP::Post.new(uri.path, headers)
45
+
46
+ max_tokens = args[:max_tokens]
47
+ request.body = generate_body(messages, model, json_schema, max_tokens, tools).to_json
48
+ response = http.request(request)
49
+
50
+ unless response.is_a?(Net::HTTPSuccess)
51
+ raise "OpenRouter API Error: #{response.code} - #{response.message}: #{response.body}"
52
+ end
53
+
54
+ parsed_response = JSON.parse(response.body)
55
+ handle_response(parsed_response)
56
+ rescue JSON::ParserError => e
57
+ raise "JSON Parse Error: #{e.message}"
58
+ end
59
+
60
+ private
61
+
62
+ def self.validate_messages!(messages)
63
+ unless messages.is_a?(Array) && messages.all? { |msg| msg.is_a?(Hash) }
64
+ raise ArgumentError, 'Messages must be an array of message hashes.'
65
+ end
66
+ raise ArgumentError, 'Messages cannot be empty.' if messages.empty?
67
+ end
68
+
69
+ def self.generate_body(messages, model, json_schema, max_tokens, tools)
70
+ body = {
71
+ model: model,
72
+ messages: messages
73
+ }
74
+ body[:max_tokens] = max_tokens if max_tokens
75
+ body[:response_format] = { type: 'json_schema', json_schema: json_schema } if json_schema
76
+ body[:tools] = tools if tools
77
+ body
78
+ end
79
+
80
+ # Handle OpenRouter finish reasons
81
+ # https://openrouter.ai/docs/api-reference/overview#finish-reason
82
+ def self.handle_response(response)
83
+ message = response.dig('choices', 0, 'message') || {}
84
+ finish_reason = response.dig('choices', 0, 'finish_reason')
85
+
86
+ if message['refusal']
87
+ raise "Refusal: #{message['refusal']}"
88
+ end
89
+
90
+ case finish_reason
91
+ when 'stop'
92
+ return { content: message['content'] }
93
+ when 'tool_calls', 'function_call'
94
+ return { tool_calls: message['tool_calls'], content: message['content'] }
95
+ when 'length', 'model_length'
96
+ raise 'Incomplete response: The completion was cut off due to token limit.'
97
+ when 'content_filter'
98
+ raise "Content filtered: The model's output was blocked due to policy violations."
99
+ when 'error'
100
+ raise "Model returned finish_reason=error: #{response.inspect}"
101
+ else
102
+ raise "Unexpected finish_reason: #{finish_reason}"
103
+ end
104
+ end
105
+ end
106
+ end
107
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+ require 'uri'
6
+
7
+ module Spectre
8
+ module Openrouter
9
+ class Embeddings
10
+ API_URL = 'https://openrouter.ai/api/v1/embeddings'
11
+ DEFAULT_MODEL = 'text-embedding-3-small' # OpenRouter proxies OpenAI and others; user can override with provider/model
12
+ DEFAULT_TIMEOUT = 60
13
+
14
+ # Generate embeddings for a given text
15
+ #
16
+ # @param text [String] the text input for which embeddings are to be generated
17
+ # @param model [String] the model to be used for generating embeddings, defaults to DEFAULT_MODEL
18
+ # @param args [Hash] optional arguments like read_timeout and open_timeout
19
+ # @return [Array<Float>] the generated embedding vector
20
+ # @raise [APIKeyNotConfiguredError] if the API key is not set
21
+ # @raise [RuntimeError] for general API errors or unexpected issues
22
+ def self.create(text, model: DEFAULT_MODEL, **args)
23
+ cfg = Spectre.openrouter_configuration
24
+ api_key = cfg&.api_key
25
+ raise APIKeyNotConfiguredError, 'API key is not configured' unless api_key
26
+
27
+ uri = URI(API_URL)
28
+ http = Net::HTTP.new(uri.host, uri.port)
29
+ http.use_ssl = true
30
+ http.read_timeout = args.fetch(:read_timeout, DEFAULT_TIMEOUT)
31
+ http.open_timeout = args.fetch(:open_timeout, DEFAULT_TIMEOUT)
32
+
33
+ headers = {
34
+ 'Content-Type' => 'application/json',
35
+ 'Authorization' => "Bearer #{api_key}"
36
+ }
37
+ headers['HTTP-Referer'] = cfg.referer if cfg.respond_to?(:referer) && cfg.referer
38
+ headers['X-Title'] = cfg.app_title if cfg.respond_to?(:app_title) && cfg.app_title
39
+
40
+ request = Net::HTTP::Post.new(uri.path, headers)
41
+ request.body = { model: model, input: text }.to_json
42
+ response = http.request(request)
43
+
44
+ unless response.is_a?(Net::HTTPSuccess)
45
+ raise "OpenRouter API Error: #{response.code} - #{response.message}: #{response.body}"
46
+ end
47
+
48
+ JSON.parse(response.body).dig('data', 0, 'embedding')
49
+ rescue JSON::ParserError => e
50
+ raise "JSON Parse Error: #{e.message}"
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Spectre
4
+ module Openrouter
5
+ # Require each specific client file here
6
+ require_relative 'openrouter/embeddings'
7
+ require_relative 'openrouter/completions'
8
+ end
9
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Spectre # :nodoc:all
4
- VERSION = "2.0.0"
4
+ VERSION = "2.1.0"
5
5
  end
data/lib/spectre.rb CHANGED
@@ -7,6 +7,7 @@ require "spectre/openai"
7
7
  require "spectre/ollama"
8
8
  require "spectre/claude"
9
9
  require "spectre/gemini"
10
+ require "spectre/openrouter"
10
11
  require "spectre/logging"
11
12
  require 'spectre/prompt'
12
13
  require 'spectre/errors'
@@ -16,8 +17,8 @@ module Spectre
16
17
  openai: Spectre::Openai,
17
18
  ollama: Spectre::Ollama,
18
19
  claude: Spectre::Claude,
19
- gemini: Spectre::Gemini
20
- # cohere: Spectre::Cohere,
20
+ gemini: Spectre::Gemini,
21
+ openrouter: Spectre::Openrouter
21
22
  }.freeze
22
23
 
23
24
  def self.included(base)
@@ -66,6 +67,11 @@ module Spectre
66
67
  yield @providers[:gemini] if block_given?
67
68
  end
68
69
 
70
+ def openrouter
71
+ @providers[:openrouter] ||= OpenrouterConfiguration.new
72
+ yield @providers[:openrouter] if block_given?
73
+ end
74
+
69
75
  def provider_configuration
70
76
  providers[default_llm_provider] || raise("No configuration found for provider: #{default_llm_provider}")
71
77
  end
@@ -87,6 +93,11 @@ module Spectre
87
93
  attr_accessor :api_key
88
94
  end
89
95
 
96
+ class OpenrouterConfiguration
97
+ # OpenRouter additionally recommends setting Referer and X-Title headers
98
+ attr_accessor :api_key, :referer, :app_title
99
+ end
100
+
90
101
  class << self
91
102
  attr_accessor :config
92
103
 
@@ -120,6 +131,10 @@ module Spectre
120
131
  config.providers[:gemini]
121
132
  end
122
133
 
134
+ def openrouter_configuration
135
+ config.providers[:openrouter]
136
+ end
137
+
123
138
  private
124
139
 
125
140
  def validate_llm_provider!
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: spectre_ai
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0
4
+ version: 2.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ilya Klapatok
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-09-24 00:00:00.000000000 Z
12
+ date: 2025-11-13 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: rspec-rails
@@ -67,6 +67,9 @@ files:
67
67
  - lib/spectre/openai.rb
68
68
  - lib/spectre/openai/completions.rb
69
69
  - lib/spectre/openai/embeddings.rb
70
+ - lib/spectre/openrouter.rb
71
+ - lib/spectre/openrouter/completions.rb
72
+ - lib/spectre/openrouter/embeddings.rb
70
73
  - lib/spectre/prompt.rb
71
74
  - lib/spectre/searchable.rb
72
75
  - lib/spectre/version.rb