dspy 0.30.0 → 0.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +68 -37
- data/lib/dspy/callbacks.rb +21 -2
- data/lib/dspy/context.rb +52 -1
- data/lib/dspy/evals.rb +21 -2
- data/lib/dspy/lm/adapter_factory.rb +40 -17
- data/lib/dspy/lm/errors.rb +3 -0
- data/lib/dspy/lm/json_strategy.rb +24 -8
- data/lib/dspy/lm.rb +62 -19
- data/lib/dspy/module.rb +213 -17
- data/lib/dspy/prompt.rb +94 -36
- data/lib/dspy/re_act.rb +50 -17
- data/lib/dspy/schema/sorbet_json_schema.rb +5 -2
- data/lib/dspy/schema/sorbet_toon_adapter.rb +80 -0
- data/lib/dspy/structured_outputs_prompt.rb +5 -3
- data/lib/dspy/type_serializer.rb +2 -1
- data/lib/dspy/version.rb +1 -1
- data/lib/dspy.rb +6 -0
- metadata +14 -51
- data/lib/dspy/lm/adapters/anthropic_adapter.rb +0 -291
- data/lib/dspy/lm/adapters/gemini/schema_converter.rb +0 -186
- data/lib/dspy/lm/adapters/gemini_adapter.rb +0 -220
- data/lib/dspy/lm/adapters/ollama_adapter.rb +0 -73
- data/lib/dspy/lm/adapters/openai/schema_converter.rb +0 -359
- data/lib/dspy/lm/adapters/openai_adapter.rb +0 -188
- data/lib/dspy/lm/adapters/openrouter_adapter.rb +0 -68
|
@@ -1,186 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
require "sorbet-runtime"
|
|
4
|
-
|
|
5
|
-
module DSPy
|
|
6
|
-
class LM
|
|
7
|
-
module Adapters
|
|
8
|
-
module Gemini
|
|
9
|
-
# Converts DSPy signatures to Gemini structured output format
|
|
10
|
-
class SchemaConverter
|
|
11
|
-
extend T::Sig
|
|
12
|
-
|
|
13
|
-
# Models that support structured outputs (JSON + Schema)
|
|
14
|
-
# Based on official Google documentation: https://ai.google.dev/gemini-api/docs/models/gemini
|
|
15
|
-
# Last updated: Oct 2025
|
|
16
|
-
# Note: Gemini 1.5 series deprecated Oct 2025
|
|
17
|
-
STRUCTURED_OUTPUT_MODELS = T.let([
|
|
18
|
-
# Gemini 2.0 series
|
|
19
|
-
"gemini-2.0-flash",
|
|
20
|
-
"gemini-2.0-flash-lite",
|
|
21
|
-
# Gemini 2.5 series (current)
|
|
22
|
-
"gemini-2.5-pro",
|
|
23
|
-
"gemini-2.5-flash",
|
|
24
|
-
"gemini-2.5-flash-lite",
|
|
25
|
-
"gemini-2.5-flash-image"
|
|
26
|
-
].freeze, T::Array[String])
|
|
27
|
-
|
|
28
|
-
# Models that do not support structured outputs or are deprecated
|
|
29
|
-
UNSUPPORTED_MODELS = T.let([
|
|
30
|
-
# Legacy Gemini 1.0 series
|
|
31
|
-
"gemini-pro",
|
|
32
|
-
"gemini-1.0-pro-002",
|
|
33
|
-
"gemini-1.0-pro",
|
|
34
|
-
# Deprecated Gemini 1.5 series (removed Oct 2025)
|
|
35
|
-
"gemini-1.5-pro",
|
|
36
|
-
"gemini-1.5-pro-preview-0514",
|
|
37
|
-
"gemini-1.5-pro-preview-0409",
|
|
38
|
-
"gemini-1.5-flash",
|
|
39
|
-
"gemini-1.5-flash-8b"
|
|
40
|
-
].freeze, T::Array[String])
|
|
41
|
-
|
|
42
|
-
sig { params(signature_class: T.class_of(DSPy::Signature)).returns(T::Hash[Symbol, T.untyped]) }
|
|
43
|
-
def self.to_gemini_format(signature_class)
|
|
44
|
-
# Get the output JSON schema from the signature class
|
|
45
|
-
output_schema = signature_class.output_json_schema
|
|
46
|
-
|
|
47
|
-
# Convert to Gemini format (OpenAPI 3.0 Schema subset - not related to OpenAI)
|
|
48
|
-
convert_dspy_schema_to_gemini(output_schema)
|
|
49
|
-
end
|
|
50
|
-
|
|
51
|
-
sig { params(model: String).returns(T::Boolean) }
|
|
52
|
-
def self.supports_structured_outputs?(model)
|
|
53
|
-
# Extract base model name without provider prefix
|
|
54
|
-
base_model = model.sub(/^gemini\//, "")
|
|
55
|
-
|
|
56
|
-
# Check if it's a supported model or a newer version
|
|
57
|
-
STRUCTURED_OUTPUT_MODELS.any? { |supported| base_model.start_with?(supported) }
|
|
58
|
-
end
|
|
59
|
-
|
|
60
|
-
sig { params(schema: T::Hash[Symbol, T.untyped]).returns(T::Array[String]) }
|
|
61
|
-
def self.validate_compatibility(schema)
|
|
62
|
-
issues = []
|
|
63
|
-
|
|
64
|
-
# Check for deeply nested objects (Gemini has depth limits)
|
|
65
|
-
depth = calculate_depth(schema)
|
|
66
|
-
if depth > 5
|
|
67
|
-
issues << "Schema depth (#{depth}) exceeds recommended limit of 5 levels"
|
|
68
|
-
end
|
|
69
|
-
|
|
70
|
-
issues
|
|
71
|
-
end
|
|
72
|
-
|
|
73
|
-
private
|
|
74
|
-
|
|
75
|
-
sig { params(dspy_schema: T::Hash[Symbol, T.untyped]).returns(T::Hash[Symbol, T.untyped]) }
|
|
76
|
-
def self.convert_dspy_schema_to_gemini(dspy_schema)
|
|
77
|
-
# For Gemini's responseJsonSchema, we need pure JSON Schema format
|
|
78
|
-
# Remove OpenAPI-specific fields like "$schema"
|
|
79
|
-
result = {
|
|
80
|
-
type: "object",
|
|
81
|
-
properties: {},
|
|
82
|
-
required: []
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
# Convert properties
|
|
86
|
-
properties = dspy_schema[:properties] || {}
|
|
87
|
-
properties.each do |prop_name, prop_schema|
|
|
88
|
-
result[:properties][prop_name] = convert_property_to_gemini(prop_schema)
|
|
89
|
-
end
|
|
90
|
-
|
|
91
|
-
# Set required fields
|
|
92
|
-
result[:required] = (dspy_schema[:required] || []).map(&:to_s)
|
|
93
|
-
|
|
94
|
-
result
|
|
95
|
-
end
|
|
96
|
-
|
|
97
|
-
sig { params(property_schema: T::Hash[Symbol, T.untyped]).returns(T::Hash[Symbol, T.untyped]) }
|
|
98
|
-
def self.convert_property_to_gemini(property_schema)
|
|
99
|
-
# Handle oneOf/anyOf schemas (union types) - Gemini supports these in responseJsonSchema
|
|
100
|
-
if property_schema[:oneOf]
|
|
101
|
-
return {
|
|
102
|
-
oneOf: property_schema[:oneOf].map { |schema| convert_property_to_gemini(schema) },
|
|
103
|
-
description: property_schema[:description]
|
|
104
|
-
}.compact
|
|
105
|
-
end
|
|
106
|
-
|
|
107
|
-
if property_schema[:anyOf]
|
|
108
|
-
return {
|
|
109
|
-
anyOf: property_schema[:anyOf].map { |schema| convert_property_to_gemini(schema) },
|
|
110
|
-
description: property_schema[:description]
|
|
111
|
-
}.compact
|
|
112
|
-
end
|
|
113
|
-
|
|
114
|
-
case property_schema[:type]
|
|
115
|
-
when "string"
|
|
116
|
-
result = { type: "string" }
|
|
117
|
-
# Gemini responseJsonSchema doesn't support const, so convert to single-value enum
|
|
118
|
-
# See: https://ai.google.dev/api/generate-content#FIELDS.response_json_schema
|
|
119
|
-
if property_schema[:const]
|
|
120
|
-
result[:enum] = [property_schema[:const]]
|
|
121
|
-
elsif property_schema[:enum]
|
|
122
|
-
result[:enum] = property_schema[:enum]
|
|
123
|
-
end
|
|
124
|
-
result
|
|
125
|
-
when "integer"
|
|
126
|
-
{ type: "integer" }
|
|
127
|
-
when "number"
|
|
128
|
-
{ type: "number" }
|
|
129
|
-
when "boolean"
|
|
130
|
-
{ type: "boolean" }
|
|
131
|
-
when "array"
|
|
132
|
-
{
|
|
133
|
-
type: "array",
|
|
134
|
-
items: convert_property_to_gemini(property_schema[:items] || { type: "string" })
|
|
135
|
-
}
|
|
136
|
-
when "object"
|
|
137
|
-
result = { type: "object" }
|
|
138
|
-
|
|
139
|
-
if property_schema[:properties]
|
|
140
|
-
result[:properties] = {}
|
|
141
|
-
property_schema[:properties].each do |nested_prop, nested_schema|
|
|
142
|
-
result[:properties][nested_prop] = convert_property_to_gemini(nested_schema)
|
|
143
|
-
end
|
|
144
|
-
|
|
145
|
-
# Set required fields for nested objects
|
|
146
|
-
if property_schema[:required]
|
|
147
|
-
result[:required] = property_schema[:required].map(&:to_s)
|
|
148
|
-
end
|
|
149
|
-
end
|
|
150
|
-
|
|
151
|
-
result
|
|
152
|
-
else
|
|
153
|
-
# Default to string for unknown types
|
|
154
|
-
{ type: "string" }
|
|
155
|
-
end
|
|
156
|
-
end
|
|
157
|
-
|
|
158
|
-
sig { params(schema: T::Hash[Symbol, T.untyped], current_depth: Integer).returns(Integer) }
|
|
159
|
-
def self.calculate_depth(schema, current_depth = 0)
|
|
160
|
-
return current_depth unless schema.is_a?(Hash)
|
|
161
|
-
|
|
162
|
-
max_depth = current_depth
|
|
163
|
-
|
|
164
|
-
# Check properties
|
|
165
|
-
if schema[:properties].is_a?(Hash)
|
|
166
|
-
schema[:properties].each_value do |prop|
|
|
167
|
-
if prop.is_a?(Hash)
|
|
168
|
-
prop_depth = calculate_depth(prop, current_depth + 1)
|
|
169
|
-
max_depth = [max_depth, prop_depth].max
|
|
170
|
-
end
|
|
171
|
-
end
|
|
172
|
-
end
|
|
173
|
-
|
|
174
|
-
# Check array items
|
|
175
|
-
if schema[:items].is_a?(Hash)
|
|
176
|
-
items_depth = calculate_depth(schema[:items], current_depth + 1)
|
|
177
|
-
max_depth = [max_depth, items_depth].max
|
|
178
|
-
end
|
|
179
|
-
|
|
180
|
-
max_depth
|
|
181
|
-
end
|
|
182
|
-
end
|
|
183
|
-
end
|
|
184
|
-
end
|
|
185
|
-
end
|
|
186
|
-
end
|
|
@@ -1,220 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
require 'gemini-ai'
|
|
4
|
-
require 'json'
|
|
5
|
-
require_relative '../vision_models'
|
|
6
|
-
|
|
7
|
-
module DSPy
|
|
8
|
-
class LM
|
|
9
|
-
class GeminiAdapter < Adapter
|
|
10
|
-
def initialize(model:, api_key:, structured_outputs: false)
|
|
11
|
-
super(model: model, api_key: api_key)
|
|
12
|
-
validate_api_key!(api_key, 'gemini')
|
|
13
|
-
|
|
14
|
-
@structured_outputs_enabled = structured_outputs
|
|
15
|
-
|
|
16
|
-
# Disable streaming for VCR tests since SSE responses don't record properly
|
|
17
|
-
# But keep streaming enabled for SSEVCR tests (SSE-specific cassettes)
|
|
18
|
-
@use_streaming = true
|
|
19
|
-
begin
|
|
20
|
-
vcr_active = defined?(VCR) && VCR.current_cassette
|
|
21
|
-
ssevcr_active = defined?(SSEVCR) && SSEVCR.turned_on?
|
|
22
|
-
|
|
23
|
-
# Only disable streaming if regular VCR is active but SSEVCR is not
|
|
24
|
-
@use_streaming = false if vcr_active && !ssevcr_active
|
|
25
|
-
rescue
|
|
26
|
-
# If VCR/SSEVCR is not available or any error occurs, use streaming
|
|
27
|
-
@use_streaming = true
|
|
28
|
-
end
|
|
29
|
-
|
|
30
|
-
@client = Gemini.new(
|
|
31
|
-
credentials: {
|
|
32
|
-
service: 'generative-language-api',
|
|
33
|
-
api_key: api_key,
|
|
34
|
-
version: 'v1beta' # Use beta API version for structured outputs support
|
|
35
|
-
},
|
|
36
|
-
options: {
|
|
37
|
-
model: model,
|
|
38
|
-
server_sent_events: @use_streaming
|
|
39
|
-
}
|
|
40
|
-
)
|
|
41
|
-
end
|
|
42
|
-
|
|
43
|
-
def chat(messages:, signature: nil, **extra_params, &block)
|
|
44
|
-
normalized_messages = normalize_messages(messages)
|
|
45
|
-
|
|
46
|
-
# Validate vision support if images are present
|
|
47
|
-
if contains_images?(normalized_messages)
|
|
48
|
-
VisionModels.validate_vision_support!('gemini', model)
|
|
49
|
-
# Convert messages to Gemini format with proper image handling
|
|
50
|
-
normalized_messages = format_multimodal_messages(normalized_messages)
|
|
51
|
-
end
|
|
52
|
-
|
|
53
|
-
# Convert DSPy message format to Gemini format
|
|
54
|
-
gemini_messages = convert_messages_to_gemini_format(normalized_messages)
|
|
55
|
-
|
|
56
|
-
request_params = {
|
|
57
|
-
contents: gemini_messages
|
|
58
|
-
}.merge(extra_params)
|
|
59
|
-
|
|
60
|
-
begin
|
|
61
|
-
content = ""
|
|
62
|
-
final_response_data = nil
|
|
63
|
-
|
|
64
|
-
# Check if we're using streaming or not
|
|
65
|
-
if @use_streaming
|
|
66
|
-
# Streaming mode
|
|
67
|
-
@client.stream_generate_content(request_params) do |chunk|
|
|
68
|
-
# Handle case where chunk might be a string (from SSE VCR)
|
|
69
|
-
if chunk.is_a?(String)
|
|
70
|
-
begin
|
|
71
|
-
chunk = JSON.parse(chunk)
|
|
72
|
-
rescue JSON::ParserError => e
|
|
73
|
-
raise AdapterError, "Failed to parse Gemini streaming response: #{e.message}"
|
|
74
|
-
end
|
|
75
|
-
end
|
|
76
|
-
|
|
77
|
-
# Extract content from chunks
|
|
78
|
-
if chunk.dig('candidates', 0, 'content', 'parts')
|
|
79
|
-
chunk_text = extract_text_from_parts(chunk.dig('candidates', 0, 'content', 'parts'))
|
|
80
|
-
content += chunk_text
|
|
81
|
-
|
|
82
|
-
# Call block only if provided (for real streaming)
|
|
83
|
-
block.call(chunk) if block_given?
|
|
84
|
-
end
|
|
85
|
-
|
|
86
|
-
# Store final response data (usage, metadata) from last chunk
|
|
87
|
-
if chunk['usageMetadata'] || chunk.dig('candidates', 0, 'finishReason')
|
|
88
|
-
final_response_data = chunk
|
|
89
|
-
end
|
|
90
|
-
end
|
|
91
|
-
else
|
|
92
|
-
# Non-streaming mode (for VCR tests)
|
|
93
|
-
response = @client.generate_content(request_params)
|
|
94
|
-
|
|
95
|
-
# Extract content from single response
|
|
96
|
-
if response.dig('candidates', 0, 'content', 'parts')
|
|
97
|
-
content = extract_text_from_parts(response.dig('candidates', 0, 'content', 'parts'))
|
|
98
|
-
end
|
|
99
|
-
|
|
100
|
-
# Use response as final data
|
|
101
|
-
final_response_data = response
|
|
102
|
-
end
|
|
103
|
-
|
|
104
|
-
# Extract usage information from final chunk
|
|
105
|
-
usage_data = final_response_data&.dig('usageMetadata')
|
|
106
|
-
usage_struct = usage_data ? UsageFactory.create('gemini', usage_data) : nil
|
|
107
|
-
|
|
108
|
-
# Create metadata from final chunk
|
|
109
|
-
metadata = {
|
|
110
|
-
provider: 'gemini',
|
|
111
|
-
model: model,
|
|
112
|
-
finish_reason: final_response_data&.dig('candidates', 0, 'finishReason'),
|
|
113
|
-
safety_ratings: final_response_data&.dig('candidates', 0, 'safetyRatings'),
|
|
114
|
-
streaming: block_given?
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
# Create typed metadata
|
|
118
|
-
typed_metadata = ResponseMetadataFactory.create('gemini', metadata)
|
|
119
|
-
|
|
120
|
-
Response.new(
|
|
121
|
-
content: content,
|
|
122
|
-
usage: usage_struct,
|
|
123
|
-
metadata: typed_metadata
|
|
124
|
-
)
|
|
125
|
-
rescue => e
|
|
126
|
-
handle_gemini_error(e)
|
|
127
|
-
end
|
|
128
|
-
end
|
|
129
|
-
|
|
130
|
-
private
|
|
131
|
-
|
|
132
|
-
# Convert DSPy message format to Gemini format
|
|
133
|
-
def convert_messages_to_gemini_format(messages)
|
|
134
|
-
# Gemini expects contents array with role and parts
|
|
135
|
-
messages.map do |msg|
|
|
136
|
-
role = case msg[:role]
|
|
137
|
-
when 'system'
|
|
138
|
-
'user' # Gemini doesn't have explicit system role, merge with user
|
|
139
|
-
when 'assistant'
|
|
140
|
-
'model'
|
|
141
|
-
else
|
|
142
|
-
msg[:role]
|
|
143
|
-
end
|
|
144
|
-
|
|
145
|
-
if msg[:content].is_a?(Array)
|
|
146
|
-
# Multimodal content
|
|
147
|
-
parts = msg[:content].map do |item|
|
|
148
|
-
case item[:type]
|
|
149
|
-
when 'text'
|
|
150
|
-
{ text: item[:text] }
|
|
151
|
-
when 'image'
|
|
152
|
-
item[:image].to_gemini_format
|
|
153
|
-
else
|
|
154
|
-
item
|
|
155
|
-
end
|
|
156
|
-
end
|
|
157
|
-
|
|
158
|
-
{ role: role, parts: parts }
|
|
159
|
-
else
|
|
160
|
-
# Text-only content
|
|
161
|
-
{ role: role, parts: [{ text: msg[:content] }] }
|
|
162
|
-
end
|
|
163
|
-
end
|
|
164
|
-
end
|
|
165
|
-
|
|
166
|
-
# Extract text content from Gemini parts array
|
|
167
|
-
def extract_text_from_parts(parts)
|
|
168
|
-
return "" unless parts.is_a?(Array)
|
|
169
|
-
|
|
170
|
-
parts.map { |part| part['text'] }.compact.join
|
|
171
|
-
end
|
|
172
|
-
|
|
173
|
-
# Format multimodal messages for Gemini
|
|
174
|
-
def format_multimodal_messages(messages)
|
|
175
|
-
messages.map do |msg|
|
|
176
|
-
if msg[:content].is_a?(Array)
|
|
177
|
-
# Convert multimodal content to Gemini format
|
|
178
|
-
formatted_content = msg[:content].map do |item|
|
|
179
|
-
case item[:type]
|
|
180
|
-
when 'text'
|
|
181
|
-
{ type: 'text', text: item[:text] }
|
|
182
|
-
when 'image'
|
|
183
|
-
# Validate image compatibility before formatting
|
|
184
|
-
item[:image].validate_for_provider!('gemini')
|
|
185
|
-
item[:image].to_gemini_format
|
|
186
|
-
else
|
|
187
|
-
item
|
|
188
|
-
end
|
|
189
|
-
end
|
|
190
|
-
|
|
191
|
-
{
|
|
192
|
-
role: msg[:role],
|
|
193
|
-
content: formatted_content
|
|
194
|
-
}
|
|
195
|
-
else
|
|
196
|
-
msg
|
|
197
|
-
end
|
|
198
|
-
end
|
|
199
|
-
end
|
|
200
|
-
|
|
201
|
-
# Handle Gemini-specific errors
|
|
202
|
-
def handle_gemini_error(error)
|
|
203
|
-
error_msg = error.message.to_s
|
|
204
|
-
|
|
205
|
-
if error_msg.include?('API_KEY') || error_msg.include?('status 400') || error_msg.include?('status 401') || error_msg.include?('status 403')
|
|
206
|
-
raise AdapterError, "Gemini authentication failed: #{error_msg}. Check your API key."
|
|
207
|
-
elsif error_msg.include?('RATE_LIMIT') || error_msg.downcase.include?('quota') || error_msg.include?('status 429')
|
|
208
|
-
raise AdapterError, "Gemini rate limit exceeded: #{error_msg}. Please wait and try again."
|
|
209
|
-
elsif error_msg.include?('SAFETY') || error_msg.include?('blocked')
|
|
210
|
-
raise AdapterError, "Gemini content was blocked by safety filters: #{error_msg}"
|
|
211
|
-
elsif error_msg.include?('image') || error_msg.include?('media')
|
|
212
|
-
raise AdapterError, "Gemini image processing failed: #{error_msg}. Ensure your image is a valid format and under size limits."
|
|
213
|
-
else
|
|
214
|
-
# Generic error handling
|
|
215
|
-
raise AdapterError, "Gemini adapter error: #{error_msg}"
|
|
216
|
-
end
|
|
217
|
-
end
|
|
218
|
-
end
|
|
219
|
-
end
|
|
220
|
-
end
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
require 'openai'
|
|
4
|
-
|
|
5
|
-
module DSPy
|
|
6
|
-
class LM
|
|
7
|
-
class OllamaAdapter < OpenAIAdapter
|
|
8
|
-
DEFAULT_BASE_URL = 'http://localhost:11434/v1'
|
|
9
|
-
|
|
10
|
-
def initialize(model:, api_key: nil, base_url: nil, structured_outputs: true)
|
|
11
|
-
# Ollama doesn't require API key for local instances
|
|
12
|
-
# But may need it for remote/protected instances
|
|
13
|
-
api_key ||= 'ollama' # OpenAI client requires non-empty key
|
|
14
|
-
base_url ||= DEFAULT_BASE_URL
|
|
15
|
-
|
|
16
|
-
# Store base_url before calling super
|
|
17
|
-
@base_url = base_url
|
|
18
|
-
|
|
19
|
-
# Don't call parent's initialize, do it manually to control client creation
|
|
20
|
-
@model = model
|
|
21
|
-
@api_key = api_key
|
|
22
|
-
@structured_outputs_enabled = structured_outputs
|
|
23
|
-
validate_configuration!
|
|
24
|
-
|
|
25
|
-
# Create client with custom base URL
|
|
26
|
-
@client = OpenAI::Client.new(
|
|
27
|
-
api_key: @api_key,
|
|
28
|
-
base_url: @base_url
|
|
29
|
-
)
|
|
30
|
-
end
|
|
31
|
-
|
|
32
|
-
def chat(messages:, signature: nil, response_format: nil, &block)
|
|
33
|
-
# For Ollama, we need to be more lenient with structured outputs
|
|
34
|
-
# as it may not fully support OpenAI's response_format spec
|
|
35
|
-
begin
|
|
36
|
-
super
|
|
37
|
-
rescue => e
|
|
38
|
-
# If structured output fails, retry with enhanced prompting
|
|
39
|
-
if @structured_outputs_enabled && signature && e.message.include?('response_format')
|
|
40
|
-
DSPy.logger.debug("Ollama structured output failed, falling back to enhanced prompting")
|
|
41
|
-
@structured_outputs_enabled = false
|
|
42
|
-
retry
|
|
43
|
-
else
|
|
44
|
-
raise
|
|
45
|
-
end
|
|
46
|
-
end
|
|
47
|
-
end
|
|
48
|
-
|
|
49
|
-
private
|
|
50
|
-
|
|
51
|
-
def validate_configuration!
|
|
52
|
-
super
|
|
53
|
-
# Additional Ollama-specific validation could go here
|
|
54
|
-
end
|
|
55
|
-
|
|
56
|
-
def validate_api_key!(api_key, provider)
|
|
57
|
-
# For Ollama, API key is optional for local instances
|
|
58
|
-
# Only validate if it looks like a remote URL
|
|
59
|
-
if @base_url && !@base_url.include?('localhost') && !@base_url.include?('127.0.0.1')
|
|
60
|
-
super
|
|
61
|
-
end
|
|
62
|
-
end
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
# Ollama may have different model support for structured outputs
|
|
66
|
-
def supports_structured_outputs?
|
|
67
|
-
# For now, assume all Ollama models support basic JSON mode
|
|
68
|
-
# but may not support full OpenAI structured output spec
|
|
69
|
-
true
|
|
70
|
-
end
|
|
71
|
-
end
|
|
72
|
-
end
|
|
73
|
-
end
|