dspy 0.30.1 → 0.31.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +51 -64
- data/lib/dspy/evals.rb +21 -2
- data/lib/dspy/lm/adapter_factory.rb +40 -17
- data/lib/dspy/lm/errors.rb +3 -0
- data/lib/dspy/lm/json_strategy.rb +24 -8
- data/lib/dspy/lm.rb +62 -19
- data/lib/dspy/mixins/type_coercion.rb +2 -0
- data/lib/dspy/module.rb +6 -6
- data/lib/dspy/prompt.rb +207 -36
- data/lib/dspy/re_act.rb +50 -17
- data/lib/dspy/schema/sorbet_json_schema.rb +5 -2
- data/lib/dspy/schema/sorbet_toon_adapter.rb +81 -0
- data/lib/dspy/structured_outputs_prompt.rb +5 -3
- data/lib/dspy/type_serializer.rb +2 -1
- data/lib/dspy/version.rb +1 -1
- metadata +14 -51
- data/lib/dspy/lm/adapters/anthropic_adapter.rb +0 -291
- data/lib/dspy/lm/adapters/gemini/schema_converter.rb +0 -186
- data/lib/dspy/lm/adapters/gemini_adapter.rb +0 -220
- data/lib/dspy/lm/adapters/ollama_adapter.rb +0 -73
- data/lib/dspy/lm/adapters/openai/schema_converter.rb +0 -359
- data/lib/dspy/lm/adapters/openai_adapter.rb +0 -188
- data/lib/dspy/lm/adapters/openrouter_adapter.rb +0 -68
|
@@ -1,188 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
require 'openai'
|
|
4
|
-
require_relative 'openai/schema_converter'
|
|
5
|
-
require_relative '../vision_models'
|
|
6
|
-
|
|
7
|
-
module DSPy
|
|
8
|
-
class LM
|
|
9
|
-
class OpenAIAdapter < Adapter
|
|
10
|
-
def initialize(model:, api_key:, structured_outputs: false)
|
|
11
|
-
super(model: model, api_key: api_key)
|
|
12
|
-
validate_api_key!(api_key, 'openai')
|
|
13
|
-
@client = OpenAI::Client.new(api_key: api_key)
|
|
14
|
-
@structured_outputs_enabled = structured_outputs
|
|
15
|
-
end
|
|
16
|
-
|
|
17
|
-
def chat(messages:, signature: nil, response_format: nil, &block)
|
|
18
|
-
normalized_messages = normalize_messages(messages)
|
|
19
|
-
|
|
20
|
-
# Validate vision support if images are present
|
|
21
|
-
if contains_images?(normalized_messages)
|
|
22
|
-
VisionModels.validate_vision_support!('openai', model)
|
|
23
|
-
# Convert messages to OpenAI format with proper image handling
|
|
24
|
-
normalized_messages = format_multimodal_messages(normalized_messages)
|
|
25
|
-
end
|
|
26
|
-
|
|
27
|
-
# Handle O1 model restrictions - convert system messages to user messages
|
|
28
|
-
if o1_model?(model)
|
|
29
|
-
normalized_messages = handle_o1_messages(normalized_messages)
|
|
30
|
-
end
|
|
31
|
-
|
|
32
|
-
request_params = default_request_params.merge(
|
|
33
|
-
messages: normalized_messages
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
# Add temperature based on model capabilities
|
|
37
|
-
unless o1_model?(model)
|
|
38
|
-
temperature = case model
|
|
39
|
-
when /^gpt-5/, /^gpt-4o/
|
|
40
|
-
1.0 # GPT-5 and GPT-4o models only support default temperature of 1.0
|
|
41
|
-
else
|
|
42
|
-
0.0 # Near-deterministic for other models (0.0 no longer universally supported)
|
|
43
|
-
end
|
|
44
|
-
request_params[:temperature] = temperature
|
|
45
|
-
end
|
|
46
|
-
|
|
47
|
-
# Add response format if provided by strategy
|
|
48
|
-
if response_format
|
|
49
|
-
request_params[:response_format] = response_format
|
|
50
|
-
elsif @structured_outputs_enabled && signature && supports_structured_outputs?
|
|
51
|
-
# Legacy behavior for backward compatibility
|
|
52
|
-
response_format = DSPy::LM::Adapters::OpenAI::SchemaConverter.to_openai_format(signature)
|
|
53
|
-
request_params[:response_format] = response_format
|
|
54
|
-
end
|
|
55
|
-
|
|
56
|
-
# Add streaming if block provided
|
|
57
|
-
if block_given?
|
|
58
|
-
request_params[:stream] = proc do |chunk, _bytesize|
|
|
59
|
-
block.call(chunk) if chunk.dig("choices", 0, "delta", "content")
|
|
60
|
-
end
|
|
61
|
-
end
|
|
62
|
-
|
|
63
|
-
begin
|
|
64
|
-
response = @client.chat.completions.create(**request_params)
|
|
65
|
-
|
|
66
|
-
if response.respond_to?(:error) && response.error
|
|
67
|
-
raise AdapterError, "OpenAI API error: #{response.error}"
|
|
68
|
-
end
|
|
69
|
-
|
|
70
|
-
choice = response.choices.first
|
|
71
|
-
message = choice.message
|
|
72
|
-
content = message.content
|
|
73
|
-
usage = response.usage
|
|
74
|
-
|
|
75
|
-
# Handle structured output refusals
|
|
76
|
-
if message.respond_to?(:refusal) && message.refusal
|
|
77
|
-
raise AdapterError, "OpenAI refused to generate output: #{message.refusal}"
|
|
78
|
-
end
|
|
79
|
-
|
|
80
|
-
# Convert usage data to typed struct
|
|
81
|
-
usage_struct = UsageFactory.create('openai', usage)
|
|
82
|
-
|
|
83
|
-
# Create typed metadata
|
|
84
|
-
metadata = ResponseMetadataFactory.create('openai', {
|
|
85
|
-
model: model,
|
|
86
|
-
response_id: response.id,
|
|
87
|
-
created: response.created,
|
|
88
|
-
structured_output: @structured_outputs_enabled && signature && supports_structured_outputs?,
|
|
89
|
-
system_fingerprint: response.system_fingerprint,
|
|
90
|
-
finish_reason: choice.finish_reason
|
|
91
|
-
})
|
|
92
|
-
|
|
93
|
-
Response.new(
|
|
94
|
-
content: content,
|
|
95
|
-
usage: usage_struct,
|
|
96
|
-
metadata: metadata
|
|
97
|
-
)
|
|
98
|
-
rescue => e
|
|
99
|
-
# Check for specific error types and messages
|
|
100
|
-
error_msg = e.message.to_s
|
|
101
|
-
|
|
102
|
-
# Try to parse error body if it looks like JSON
|
|
103
|
-
error_body = if error_msg.start_with?('{')
|
|
104
|
-
JSON.parse(error_msg) rescue nil
|
|
105
|
-
elsif e.respond_to?(:response) && e.response
|
|
106
|
-
e.response[:body] rescue nil
|
|
107
|
-
end
|
|
108
|
-
|
|
109
|
-
# Check for specific image-related errors
|
|
110
|
-
if error_msg.include?('image_parse_error') || error_msg.include?('unsupported image')
|
|
111
|
-
raise AdapterError, "Image processing failed: #{error_msg}. Ensure your image is a valid PNG, JPEG, GIF, or WebP format and under 5MB."
|
|
112
|
-
elsif error_msg.include?('rate') && error_msg.include?('limit')
|
|
113
|
-
raise AdapterError, "OpenAI rate limit exceeded: #{error_msg}. Please wait and try again."
|
|
114
|
-
elsif error_msg.include?('authentication') || error_msg.include?('API key') || error_msg.include?('Unauthorized')
|
|
115
|
-
raise AdapterError, "OpenAI authentication failed: #{error_msg}. Check your API key."
|
|
116
|
-
elsif error_body && error_body.dig('error', 'message')
|
|
117
|
-
raise AdapterError, "OpenAI API error: #{error_body.dig('error', 'message')}"
|
|
118
|
-
else
|
|
119
|
-
# Generic error handling
|
|
120
|
-
raise AdapterError, "OpenAI adapter error: #{e.message}"
|
|
121
|
-
end
|
|
122
|
-
end
|
|
123
|
-
end
|
|
124
|
-
|
|
125
|
-
protected
|
|
126
|
-
|
|
127
|
-
# Allow subclasses to override request params (add headers, etc)
|
|
128
|
-
def default_request_params
|
|
129
|
-
{
|
|
130
|
-
model: model
|
|
131
|
-
}
|
|
132
|
-
end
|
|
133
|
-
|
|
134
|
-
private
|
|
135
|
-
|
|
136
|
-
def supports_structured_outputs?
|
|
137
|
-
DSPy::LM::Adapters::OpenAI::SchemaConverter.supports_structured_outputs?(model)
|
|
138
|
-
end
|
|
139
|
-
|
|
140
|
-
def format_multimodal_messages(messages)
|
|
141
|
-
messages.map do |msg|
|
|
142
|
-
if msg[:content].is_a?(Array)
|
|
143
|
-
# Convert multimodal content to OpenAI format
|
|
144
|
-
formatted_content = msg[:content].map do |item|
|
|
145
|
-
case item[:type]
|
|
146
|
-
when 'text'
|
|
147
|
-
{ type: 'text', text: item[:text] }
|
|
148
|
-
when 'image'
|
|
149
|
-
# Validate image compatibility before formatting
|
|
150
|
-
item[:image].validate_for_provider!('openai')
|
|
151
|
-
item[:image].to_openai_format
|
|
152
|
-
else
|
|
153
|
-
item
|
|
154
|
-
end
|
|
155
|
-
end
|
|
156
|
-
|
|
157
|
-
{
|
|
158
|
-
role: msg[:role],
|
|
159
|
-
content: formatted_content
|
|
160
|
-
}
|
|
161
|
-
else
|
|
162
|
-
msg
|
|
163
|
-
end
|
|
164
|
-
end
|
|
165
|
-
end
|
|
166
|
-
|
|
167
|
-
# Check if model is an O1 reasoning model (includes O1, O3, O4 series)
|
|
168
|
-
def o1_model?(model_name)
|
|
169
|
-
model_name.match?(/^o[134](-.*)?$/)
|
|
170
|
-
end
|
|
171
|
-
|
|
172
|
-
# Handle O1 model message restrictions
|
|
173
|
-
def handle_o1_messages(messages)
|
|
174
|
-
messages.map do |msg|
|
|
175
|
-
# Convert system messages to user messages for O1 models
|
|
176
|
-
if msg[:role] == 'system'
|
|
177
|
-
{
|
|
178
|
-
role: 'user',
|
|
179
|
-
content: "Instructions: #{msg[:content]}"
|
|
180
|
-
}
|
|
181
|
-
else
|
|
182
|
-
msg
|
|
183
|
-
end
|
|
184
|
-
end
|
|
185
|
-
end
|
|
186
|
-
end
|
|
187
|
-
end
|
|
188
|
-
end
|
|
@@ -1,68 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
require 'openai'
|
|
4
|
-
|
|
5
|
-
module DSPy
|
|
6
|
-
class LM
|
|
7
|
-
class OpenrouterAdapter < OpenAIAdapter
|
|
8
|
-
BASE_URL = 'https://openrouter.ai/api/v1'
|
|
9
|
-
|
|
10
|
-
def initialize(model:, api_key: nil, structured_outputs: true, http_referrer: nil, x_title: nil)
|
|
11
|
-
# Don't call parent's initialize, do it manually to control client creation
|
|
12
|
-
@model = model
|
|
13
|
-
@api_key = api_key
|
|
14
|
-
@structured_outputs_enabled = structured_outputs
|
|
15
|
-
|
|
16
|
-
@http_referrer = http_referrer
|
|
17
|
-
@x_title = x_title
|
|
18
|
-
|
|
19
|
-
validate_configuration!
|
|
20
|
-
|
|
21
|
-
# Create client with custom base URL
|
|
22
|
-
@client = OpenAI::Client.new(
|
|
23
|
-
api_key: @api_key,
|
|
24
|
-
base_url: BASE_URL
|
|
25
|
-
)
|
|
26
|
-
end
|
|
27
|
-
|
|
28
|
-
def chat(messages:, signature: nil, response_format: nil, &block)
|
|
29
|
-
# For OpenRouter, we need to be more lenient with structured outputs
|
|
30
|
-
# as the model behind it may not fully support OpenAI's response_format spec
|
|
31
|
-
begin
|
|
32
|
-
super
|
|
33
|
-
rescue => e
|
|
34
|
-
# If structured output fails, retry with enhanced prompting
|
|
35
|
-
if @structured_outputs_enabled && signature && e.message.include?('response_format')
|
|
36
|
-
DSPy.logger.debug("OpenRouter structured output failed, falling back to enhanced prompting")
|
|
37
|
-
@structured_outputs_enabled = false
|
|
38
|
-
retry
|
|
39
|
-
else
|
|
40
|
-
raise
|
|
41
|
-
end
|
|
42
|
-
end
|
|
43
|
-
end
|
|
44
|
-
|
|
45
|
-
protected
|
|
46
|
-
|
|
47
|
-
# Add any OpenRouter-specific headers to all requests
|
|
48
|
-
def default_request_params
|
|
49
|
-
headers = {
|
|
50
|
-
'X-Title' => @x_title,
|
|
51
|
-
'HTTP-Referer' => @http_referrer
|
|
52
|
-
}.compact
|
|
53
|
-
|
|
54
|
-
upstream_params = super
|
|
55
|
-
upstream_params.merge!(request_options: { extra_headers: headers }) if headers.any?
|
|
56
|
-
upstream_params
|
|
57
|
-
end
|
|
58
|
-
|
|
59
|
-
private
|
|
60
|
-
|
|
61
|
-
def supports_structured_outputs?
|
|
62
|
-
# Different models behind OpenRouter may have different capabilities
|
|
63
|
-
# For now, we rely on whatever was passed to the constructor
|
|
64
|
-
@structured_outputs_enabled
|
|
65
|
-
end
|
|
66
|
-
end
|
|
67
|
-
end
|
|
68
|
-
end
|