open_router_enhanced 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +7 -0
  2. data/.env.example +1 -0
  3. data/.rspec +3 -0
  4. data/.rubocop.yml +13 -0
  5. data/.rubocop_todo.yml +130 -0
  6. data/.ruby-version +1 -0
  7. data/CHANGELOG.md +41 -0
  8. data/CODE_OF_CONDUCT.md +84 -0
  9. data/CONTRIBUTING.md +384 -0
  10. data/Gemfile +22 -0
  11. data/Gemfile.lock +138 -0
  12. data/LICENSE.txt +21 -0
  13. data/MIGRATION.md +556 -0
  14. data/README.md +1660 -0
  15. data/Rakefile +334 -0
  16. data/SECURITY.md +150 -0
  17. data/VCR_CONFIGURATION.md +80 -0
  18. data/docs/model_selection.md +637 -0
  19. data/docs/observability.md +430 -0
  20. data/docs/prompt_templates.md +422 -0
  21. data/docs/streaming.md +467 -0
  22. data/docs/structured_outputs.md +466 -0
  23. data/docs/tools.md +1016 -0
  24. data/examples/basic_completion.rb +122 -0
  25. data/examples/model_selection_example.rb +141 -0
  26. data/examples/observability_example.rb +199 -0
  27. data/examples/prompt_template_example.rb +184 -0
  28. data/examples/smart_completion_example.rb +89 -0
  29. data/examples/streaming_example.rb +176 -0
  30. data/examples/structured_outputs_example.rb +191 -0
  31. data/examples/tool_calling_example.rb +149 -0
  32. data/lib/open_router/client.rb +552 -0
  33. data/lib/open_router/http.rb +118 -0
  34. data/lib/open_router/json_healer.rb +263 -0
  35. data/lib/open_router/model_registry.rb +378 -0
  36. data/lib/open_router/model_selector.rb +462 -0
  37. data/lib/open_router/prompt_template.rb +290 -0
  38. data/lib/open_router/response.rb +371 -0
  39. data/lib/open_router/schema.rb +288 -0
  40. data/lib/open_router/streaming_client.rb +210 -0
  41. data/lib/open_router/tool.rb +221 -0
  42. data/lib/open_router/tool_call.rb +180 -0
  43. data/lib/open_router/usage_tracker.rb +277 -0
  44. data/lib/open_router/version.rb +5 -0
  45. data/lib/open_router.rb +123 -0
  46. data/sig/open_router.rbs +20 -0
  47. metadata +186 -0
@@ -0,0 +1,122 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "open_router"
4
+
5
+ # Basic completion example using OpenRouter Enhanced gem
6
+ #
7
+ # This example demonstrates:
8
+ # - Simple client initialization
9
+ # - Basic chat completion
10
+ # - Accessing response data
11
+ # - Cost tracking
12
+
13
+ # Configure the client
14
+ OpenRouter.configure do |config|
15
+ config.access_token = ENV["OPENROUTER_API_KEY"]
16
+ config.site_name = "Basic Completion Example"
17
+ config.site_url = "https://github.com/yourusername/open_router_enhanced"
18
+ end
19
+
20
+ # Initialize client
21
+ client = OpenRouter::Client.new
22
+
23
+ puts "=" * 60
24
+ puts "Basic Completion Example"
25
+ puts "=" * 60
26
+
27
+ # Simple completion
28
+ puts "\n1. Simple Chat Completion"
29
+ puts "-" * 60
30
+
31
+ messages = [
32
+ { role: "user", content: "What is the capital of France?" }
33
+ ]
34
+
35
+ response = client.complete(
36
+ messages,
37
+ model: "openai/gpt-4o-mini"
38
+ )
39
+
40
+ puts "Response: #{response.content}"
41
+ puts "Model: #{response.model}"
42
+ puts "Tokens used: #{response.total_tokens}"
43
+
44
+ # Multi-turn conversation
45
+ puts "\n2. Multi-turn Conversation"
46
+ puts "-" * 60
47
+
48
+ conversation = [
49
+ { role: "user", content: "Tell me a short joke about programming" }
50
+ ]
51
+
52
+ response = client.complete(
53
+ conversation,
54
+ model: "anthropic/claude-3-haiku"
55
+ )
56
+
57
+ puts "Assistant: #{response.content}"
58
+ conversation << { role: "assistant", content: response.content }
59
+
60
+ # Follow-up question
61
+ conversation << { role: "user", content: "Explain why that's funny" }
62
+
63
+ response = client.complete(
64
+ conversation,
65
+ model: "anthropic/claude-3-haiku"
66
+ )
67
+
68
+ puts "\nExplanation: #{response.content}"
69
+
70
+ # System message
71
+ puts "\n3. Using System Messages"
72
+ puts "-" * 60
73
+
74
+ messages = [
75
+ { role: "system", content: "You are a helpful but concise assistant. Keep responses under 50 words." },
76
+ { role: "user", content: "Explain quantum computing" }
77
+ ]
78
+
79
+ response = client.complete(
80
+ messages,
81
+ model: "openai/gpt-4o-mini",
82
+ extras: { max_tokens: 100 }
83
+ )
84
+
85
+ puts "Concise response: #{response.content}"
86
+ puts "Completion tokens: #{response.completion_tokens}"
87
+
88
+ # Error handling
89
+ puts "\n4. Error Handling"
90
+ puts "-" * 60
91
+
92
+ begin
93
+ client.complete(
94
+ [{ role: "user", content: "Hello!" }],
95
+ model: "invalid/model-name"
96
+ )
97
+ rescue OpenRouter::ServerError => e
98
+ puts "Caught error: #{e.message}"
99
+ end
100
+
101
+ # Response metadata
102
+ puts "\n5. Response Metadata"
103
+ puts "-" * 60
104
+
105
+ response = client.complete(
106
+ [{ role: "user", content: "Say 'Hello, World!'" }],
107
+ model: "openai/gpt-4o-mini"
108
+ )
109
+
110
+ puts "Response ID: #{response.id}"
111
+ puts "Model: #{response.model}"
112
+ puts "Provider: #{response.provider}" if response.provider
113
+ puts "Finish reason: #{response.finish_reason}"
114
+ puts "Created at: #{Time.at(response.created).strftime("%Y-%m-%d %H:%M:%S")}" if response.created
115
+ puts "Prompt tokens: #{response.prompt_tokens}"
116
+ puts "Completion tokens: #{response.completion_tokens}"
117
+ puts "Total tokens: #{response.total_tokens}"
118
+ puts "Cached tokens: #{response.cached_tokens}"
119
+
120
+ puts "\n#{"=" * 60}"
121
+ puts "Example completed successfully!"
122
+ puts "=" * 60
@@ -0,0 +1,141 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example demonstrating the ModelSelector functionality
5
+ # Run this with: ruby -I lib examples/model_selection_example.rb
6
+
7
+ require "open_router"
8
+
9
+ # Configure OpenRouter (you would set your actual API key)
10
+ OpenRouter.configure do |config|
11
+ config.access_token = ENV["OPENROUTER_API_KEY"] || "your-api-key-here"
12
+ config.site_name = "ModelSelector Example"
13
+ config.site_url = "https://example.com"
14
+ end
15
+
16
+ puts "🤖 OpenRouter ModelSelector Examples"
17
+ puts "=" * 50
18
+
19
+ # Example 1: Basic cost optimization
20
+ puts "\n1. Basic cost optimization:"
21
+ selector = OpenRouter::ModelSelector.new
22
+ cheapest_model = selector.optimize_for(:cost).choose
23
+
24
+ if cheapest_model
25
+ puts " Cheapest model: #{cheapest_model}"
26
+ cost_info = OpenRouter::ModelRegistry.get_model_info(cheapest_model)
27
+ puts " Cost: $#{cost_info[:cost_per_1k_tokens][:input]} per 1k input tokens"
28
+ else
29
+ puts " No models available"
30
+ end
31
+
32
+ # Example 2: Find models with specific capabilities
33
+ puts "\n2. Models with function calling capability:"
34
+ function_models = OpenRouter::ModelSelector.new
35
+ .require(:function_calling)
36
+ .optimize_for(:cost)
37
+ .choose_with_fallbacks(limit: 3)
38
+
39
+ if function_models.any?
40
+ function_models.each_with_index do |model, i|
41
+ puts " #{i + 1}. #{model}"
42
+ end
43
+ else
44
+ puts " No models with function calling found"
45
+ end
46
+
47
+ # Example 3: Budget-constrained selection with multiple requirements
48
+ puts "\n3. Budget-constrained selection ($0.01 max, with vision):"
49
+ budget_model = OpenRouter::ModelSelector.new
50
+ .within_budget(max_cost: 0.01)
51
+ .require(:vision)
52
+ .optimize_for(:cost)
53
+ .choose
54
+
55
+ if budget_model
56
+ puts " Selected: #{budget_model}"
57
+ model_info = OpenRouter::ModelRegistry.get_model_info(budget_model)
58
+ puts " Capabilities: #{model_info[:capabilities].join(", ")}"
59
+ puts " Cost: $#{model_info[:cost_per_1k_tokens][:input]} per 1k input tokens"
60
+ else
61
+ puts " No models found within budget with vision capability"
62
+ end
63
+
64
+ # Example 4: Provider preferences
65
+ puts "\n4. Prefer specific providers:"
66
+ provider_model = OpenRouter::ModelSelector.new
67
+ .prefer_providers("anthropic", "openai")
68
+ .require(:function_calling)
69
+ .optimize_for(:cost)
70
+ .choose
71
+
72
+ if provider_model
73
+ puts " Selected: #{provider_model}"
74
+ provider = provider_model.split("/").first
75
+ puts " Provider: #{provider}"
76
+ else
77
+ puts " No models found from preferred providers"
78
+ end
79
+
80
+ # Example 5: Latest models with fallback
81
+ puts "\n5. Latest models (with graceful fallback):"
82
+ latest_model = OpenRouter::ModelSelector.new
83
+ .optimize_for(:latest)
84
+ .require(:function_calling)
85
+ .min_context(100_000)
86
+ .choose_with_fallback
87
+
88
+ if latest_model
89
+ puts " Selected: #{latest_model}"
90
+ model_info = OpenRouter::ModelRegistry.get_model_info(latest_model)
91
+ puts " Context length: #{model_info[:context_length]} tokens"
92
+ puts " Released: #{Time.at(model_info[:created_at])}"
93
+ else
94
+ puts " No suitable models found"
95
+ end
96
+
97
+ # Example 6: Complex chaining example
98
+ puts "\n6. Complex requirements with method chaining:"
99
+ complex_selector = OpenRouter::ModelSelector.new
100
+ .optimize_for(:performance)
101
+ .require(:function_calling, :structured_outputs)
102
+ .within_budget(max_cost: 0.05)
103
+ .avoid_patterns("*-free", "*-preview")
104
+ .prefer_providers("anthropic", "openai")
105
+
106
+ models = complex_selector.choose_with_fallbacks(limit: 2)
107
+ if models.any?
108
+ puts " Found #{models.length} suitable models:"
109
+ models.each_with_index do |model, i|
110
+ model_info = OpenRouter::ModelRegistry.get_model_info(model)
111
+ puts " #{i + 1}. #{model} (#{model_info[:performance_tier]} tier)"
112
+ end
113
+ else
114
+ puts " No models meet all requirements"
115
+ end
116
+
117
+ # Example 7: Cost estimation
118
+ puts "\n7. Cost estimation:"
119
+ if cheapest_model
120
+ estimated_cost = OpenRouter::ModelSelector.new.estimate_cost(
121
+ cheapest_model,
122
+ input_tokens: 2000,
123
+ output_tokens: 500
124
+ )
125
+ puts " Cost for 2000 input + 500 output tokens with #{cheapest_model}:"
126
+ puts " $#{estimated_cost.round(6)}"
127
+ end
128
+
129
+ # Example 8: Selection criteria inspection
130
+ puts "\n8. Selection criteria:"
131
+ criteria = OpenRouter::ModelSelector.new
132
+ .optimize_for(:cost)
133
+ .require(:function_calling)
134
+ .within_budget(max_cost: 0.02)
135
+ .selection_criteria
136
+
137
+ puts " Strategy: #{criteria[:strategy]}"
138
+ puts " Required capabilities: #{criteria[:requirements][:capabilities]}"
139
+ puts " Max cost: $#{criteria[:requirements][:max_input_cost]}"
140
+
141
+ puts "\n✅ ModelSelector examples completed!"
@@ -0,0 +1,199 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "open_router"
4
+
5
+ # Observability example using OpenRouter Enhanced gem
6
+ #
7
+ # This example demonstrates:
8
+ # - Usage tracking and analytics
9
+ # - Cost monitoring
10
+ # - Performance metrics
11
+ # - Callback system for observability
12
+ # - Export capabilities
13
+
14
+ # Configure the client
15
+ OpenRouter.configure do |config|
16
+ config.access_token = ENV["OPENROUTER_API_KEY"]
17
+ config.site_name = "Observability Example"
18
+ config.site_url = "https://github.com/yourusername/open_router_enhanced"
19
+ end
20
+
21
+ puts "=" * 60
22
+ puts "Observability & Usage Tracking Example"
23
+ puts "=" * 60
24
+
25
+ # Example 1: Basic usage tracking
26
+ puts "\n1. Basic Usage Tracking"
27
+ puts "-" * 60
28
+
29
+ client = OpenRouter::Client.new(track_usage: true)
30
+
31
+ # Make some requests
32
+ 3.times do |i|
33
+ response = client.complete(
34
+ [{ role: "user", content: "Count to #{i + 1}" }],
35
+ model: "openai/gpt-4o-mini"
36
+ )
37
+ puts "Request #{i + 1}: #{response.total_tokens} tokens"
38
+ end
39
+
40
+ # View usage summary
41
+ tracker = client.usage_tracker
42
+ puts "\nUsage Summary:"
43
+ puts " Total requests: #{tracker.request_count}"
44
+ puts " Total tokens: #{tracker.total_tokens}"
45
+ puts " Prompt tokens: #{tracker.total_prompt_tokens}"
46
+ puts " Completion tokens: #{tracker.total_completion_tokens}"
47
+ puts " Cached tokens: #{tracker.total_cached_tokens}"
48
+ puts " Total cost: $#{tracker.total_cost.round(4)}"
49
+ puts " Average tokens/request: #{tracker.average_tokens_per_request.round(0)}"
50
+ puts " Average cost/request: $#{tracker.average_cost_per_request.round(4)}"
51
+
52
+ # Example 2: Per-model breakdown
53
+ puts "\n2. Per-Model Usage Breakdown"
54
+ puts "-" * 60
55
+
56
+ client.usage_tracker.reset! # Start fresh
57
+
58
+ # Use different models
59
+ client.complete([{ role: "user", content: "Say hi" }], model: "openai/gpt-4o-mini")
60
+ client.complete([{ role: "user", content: "Say hi" }], model: "anthropic/claude-3-haiku")
61
+ client.complete([{ role: "user", content: "Say hi" }], model: "openai/gpt-4o-mini")
62
+
63
+ # View per-model stats
64
+ puts "\nModel usage breakdown:"
65
+ client.usage_tracker.model_usage.each do |model, stats|
66
+ puts "\n #{model}:"
67
+ puts " Requests: #{stats[:requests]}"
68
+ puts " Tokens: #{stats[:prompt_tokens] + stats[:completion_tokens]}"
69
+ puts " Cost: $#{stats[:cost].round(4)}"
70
+ end
71
+
72
+ puts "\nMost used model: #{client.usage_tracker.most_used_model}"
73
+ puts "Most expensive model: #{client.usage_tracker.most_expensive_model}"
74
+
75
+ # Example 3: Cache hit rate tracking
76
+ puts "\n3. Cache Hit Rate Tracking"
77
+ puts "-" * 60
78
+
79
+ client.usage_tracker.reset!
80
+
81
+ # Make repeated requests (OpenRouter may cache)
82
+ 3.times do
83
+ client.complete(
84
+ [{ role: "user", content: "What is 2+2?" }],
85
+ model: "openai/gpt-4o-mini"
86
+ )
87
+ end
88
+
89
+ puts "Cache hit rate: #{client.usage_tracker.cache_hit_rate.round(2)}%"
90
+ puts "Cached tokens: #{client.usage_tracker.total_cached_tokens}"
91
+
92
+ # Example 4: Performance metrics
93
+ puts "\n4. Performance Metrics"
94
+ puts "-" * 60
95
+
96
+ tracker = client.usage_tracker
97
+ duration = tracker.session_duration
98
+
99
+ puts "Session duration: #{duration.round(2)} seconds"
100
+ puts "Tokens per second: #{tracker.tokens_per_second.round(2)}"
101
+ puts "Average tokens per request: #{tracker.average_tokens_per_request.round(0)}"
102
+
103
+ # Example 5: Callback-based monitoring
104
+ puts "\n5. Callback-Based Monitoring"
105
+ puts "-" * 60
106
+
107
+ monitored_client = OpenRouter::Client.new(track_usage: true)
108
+
109
+ # Set up monitoring callbacks
110
+ monitored_client.on(:before_request) do |params|
111
+ puts "→ Request starting (model: #{params[:model]})"
112
+ end
113
+
114
+ monitored_client.on(:after_response) do |response|
115
+ tokens = response.total_tokens
116
+ cost = response.cost_estimate
117
+ puts "← Response received: #{tokens} tokens" + (cost ? ", $#{cost.round(4)}" : "")
118
+ end
119
+
120
+ monitored_client.on(:on_error) do |error|
121
+ puts "✗ Error occurred: #{error.message}"
122
+ end
123
+
124
+ # Make some requests
125
+ monitored_client.complete(
126
+ [{ role: "user", content: "Hello!" }],
127
+ model: "openai/gpt-4o-mini"
128
+ )
129
+
130
+ # Example 6: Cost tracking and budgets
131
+ puts "\n6. Cost Tracking & Budget Monitoring"
132
+ puts "-" * 60
133
+
134
+ budget_client = OpenRouter::Client.new(track_usage: true)
135
+ budget_limit = 0.10 # $0.10 budget
136
+
137
+ budget_client.on(:after_response) do |_response|
138
+ total_cost = budget_client.usage_tracker.total_cost
139
+
140
+ puts "⚠️ Warning: 80% of budget used ($#{total_cost.round(4)}/$#{budget_limit})" if total_cost > budget_limit * 0.8
141
+
142
+ puts "🛑 Budget exceeded! Total: $#{total_cost.round(4)}" if total_cost > budget_limit
143
+ end
144
+
145
+ # Make requests
146
+ 5.times do |i|
147
+ budget_client.complete(
148
+ [{ role: "user", content: "Short response #{i}" }],
149
+ model: "openai/gpt-4o-mini",
150
+ extras: { max_tokens: 10 }
151
+ )
152
+ end
153
+
154
+ # Example 7: Export usage data
155
+ puts "\n7. Export Usage Data"
156
+ puts "-" * 60
157
+
158
+ # Print detailed summary
159
+ puts "\nDetailed summary:"
160
+ client.usage_tracker.print_summary
161
+
162
+ # Export as CSV
163
+ csv_data = client.usage_tracker.export_csv
164
+ puts "\nCSV export (first 200 chars):"
165
+ puts "#{csv_data[0...200]}..."
166
+
167
+ # Get structured summary
168
+ summary = client.usage_tracker.summary
169
+ puts "\nStructured summary available with keys:"
170
+ puts summary.keys.inspect
171
+
172
+ # Example 8: Request history
173
+ puts "\n8. Request History"
174
+ puts "-" * 60
175
+
176
+ history_client = OpenRouter::Client.new(track_usage: true)
177
+
178
+ # Make some requests
179
+ history_client.complete([{ role: "user", content: "Test 1" }], model: "openai/gpt-4o-mini")
180
+ history_client.complete([{ role: "user", content: "Test 2" }], model: "anthropic/claude-3-haiku")
181
+ history_client.complete([{ role: "user", content: "Test 3" }], model: "openai/gpt-4o-mini")
182
+
183
+ # View recent history
184
+ recent = history_client.usage_tracker.history(limit: 5)
185
+ puts "\nRecent requests:"
186
+ recent.each_with_index do |entry, i|
187
+ puts " #{i + 1}. #{entry[:model]} - #{entry[:prompt_tokens] + entry[:completion_tokens]} tokens at #{entry[:timestamp].strftime("%H:%M:%S")}"
188
+ end
189
+
190
+ puts "\n#{"=" * 60}"
191
+ puts "Observability examples completed!"
192
+ puts "=" * 60
193
+ puts "\nKey Takeaways:"
194
+ puts " • Use track_usage: true to enable automatic tracking"
195
+ puts " • Access tracker via client.usage_tracker"
196
+ puts " • Set up callbacks for real-time monitoring"
197
+ puts " • Export data as CSV for external analysis"
198
+ puts " • Monitor costs and set budget alerts"
199
+ puts "=" * 60
@@ -0,0 +1,184 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example demonstrating the PromptTemplate system
5
+ # Run this with: ruby -I lib examples/prompt_template_example.rb
6
+
7
+ require "open_router"
8
+
9
+ puts "🎯 Prompt Template Examples"
10
+ puts "=" * 60
11
+
12
+ # Create a client
13
+ client = OpenRouter::Client.new(access_token: ENV["OPENROUTER_API_KEY"])
14
+
15
+ # Example 1: Simple template
16
+ puts "\n1. Simple Template:"
17
+ puts "-" * 40
18
+
19
+ translation_template = OpenRouter::PromptTemplate.new(
20
+ template: "Translate the following {language_from} text to {language_to}:\n\n'{text}'",
21
+ input_variables: %i[language_from language_to text]
22
+ )
23
+
24
+ prompt = translation_template.format(
25
+ language_from: "English",
26
+ language_to: "French",
27
+ text: "Hello, how are you today?"
28
+ )
29
+
30
+ puts prompt
31
+ puts "\nFormatted prompt ready for OpenRouter API!"
32
+
33
+ # Example 2: Few-shot template for consistent formatting
34
+ puts "\n2. Few-Shot Template (Learning from Examples):"
35
+ puts "-" * 40
36
+
37
+ sentiment_template = OpenRouter::PromptTemplate.new(
38
+ prefix: "Classify the sentiment of these messages as 'positive', 'negative', or 'neutral'.",
39
+ examples: [
40
+ { text: "I love this product! It's amazing!", sentiment: "positive" },
41
+ { text: "This is terrible, waste of money.", sentiment: "negative" },
42
+ { text: "It works as expected.", sentiment: "neutral" }
43
+ ],
44
+ example_template: "Text: {text}\nSentiment: {sentiment}",
45
+ suffix: "Text: {input}\nSentiment:",
46
+ input_variables: [:input]
47
+ )
48
+
49
+ prompt = sentiment_template.format(input: "The service was okay, nothing special.")
50
+ puts prompt
51
+
52
+ # Example 3: Chat-style template with role markers
53
+ puts "\n3. Chat-Style Template (Multi-role conversation):"
54
+ puts "-" * 40
55
+
56
+ chat_template = OpenRouter::PromptTemplate.new(
57
+ template: <<~TEMPLATE,
58
+ System: You are a helpful coding assistant specializing in {language}.
59
+ Always provide clear explanations and working code examples.
60
+
61
+ User: {question}
62
+ TEMPLATE
63
+ input_variables: %i[language question]
64
+ )
65
+
66
+ # Convert to messages array for OpenRouter API
67
+ messages = chat_template.to_messages(
68
+ language: "Ruby",
69
+ question: "How do I read a JSON file and parse it?"
70
+ )
71
+
72
+ puts "Messages array for API:"
73
+ puts messages.inspect
74
+
75
+ # Example 4: Using DSL for template creation
76
+ puts "\n4. DSL-Style Template Creation:"
77
+ puts "-" * 40
78
+
79
+ code_review_template = OpenRouter::PromptTemplate.build do
80
+ template <<~PROMPT
81
+ Review this {language} code for:
82
+ - Code quality and best practices
83
+ - Potential bugs or issues
84
+ - Performance considerations
85
+ - Suggestions for improvement
86
+
87
+ Code to review:
88
+ ```{language}
89
+ {code}
90
+ ```
91
+
92
+ Provide your review in a structured format.
93
+ PROMPT
94
+ variables :language, :code
95
+ end
96
+
97
+ review_prompt = code_review_template.format(
98
+ language: "ruby",
99
+ code: "def add(a, b)\n return a + b\nend"
100
+ )
101
+
102
+ puts review_prompt
103
+
104
+ # Example 5: Partial templates (pre-filling some variables)
105
+ puts "\n5. Partial Templates (Pre-filled Variables):"
106
+ puts "-" * 40
107
+
108
+ qa_template = OpenRouter::PromptTemplate.new(
109
+ template: "Context: {context}\n\nQuestion: {question}\n\nAnswer:",
110
+ input_variables: %i[context question]
111
+ )
112
+
113
+ # Create a partial with context pre-filled
114
+ science_qa = qa_template.partial(
115
+ context: "Water boils at 100°C (212°F) at sea level atmospheric pressure."
116
+ )
117
+
118
+ # Now only need to provide the question
119
+ prompt1 = science_qa.format(question: "What is the boiling point of water in Celsius?")
120
+ prompt2 = science_qa.format(question: "How does altitude affect boiling point?")
121
+
122
+ puts "First question:\n#{prompt1}\n"
123
+ puts "Second question:\n#{prompt2}"
124
+
125
+ # Example 6: Factory methods for common patterns
126
+ puts "\n6. Factory Methods (Convenient Creation):"
127
+ puts "-" * 40
128
+
129
+ # Simple template
130
+ simple = OpenRouter::Prompt.template(
131
+ "Summarize this text in {word_count} words:\n\n{text}",
132
+ variables: %i[word_count text]
133
+ )
134
+
135
+ # Few-shot template
136
+ translation = OpenRouter::Prompt.few_shot(
137
+ prefix: "Translate from English to Spanish:",
138
+ examples: [
139
+ { english: "Hello", spanish: "Hola" },
140
+ { english: "Goodbye", spanish: "Adiós" }
141
+ ],
142
+ example_template: "{english} → {spanish}",
143
+ suffix: "{input} →",
144
+ variables: [:input]
145
+ )
146
+
147
+ puts simple.format(word_count: 50, text: "Long text here...")
148
+ puts "\n"
149
+ puts translation.format(input: "Thank you")
150
+
151
+ # Example 7: Integration with OpenRouter Client (if API key is set)
152
+ puts "\n7. Using with OpenRouter Client:"
153
+ puts "-" * 40
154
+
155
+ if ENV["OPENROUTER_API_KEY"]
156
+ story_template = OpenRouter::PromptTemplate.new(
157
+ template: "Write a short story about {character} who {plot}. Make it {tone}.",
158
+ input_variables: %i[character plot tone]
159
+ )
160
+
161
+ messages = story_template.to_messages(
162
+ character: "a robot",
163
+ plot: "discovers emotions",
164
+ tone: "heartwarming"
165
+ )
166
+
167
+ begin
168
+ response = client.complete(messages, model: "openai/gpt-4o-mini")
169
+ puts "AI Response:\n#{response.content}"
170
+ rescue StandardError
171
+ puts "API call would be made with messages: #{messages.inspect}"
172
+ puts "(Set OPENROUTER_API_KEY to test real API calls)"
173
+ end
174
+ else
175
+ puts "Set OPENROUTER_API_KEY environment variable to test with real API"
176
+ end
177
+
178
+ puts "\n✅ Prompt template examples completed!"
179
+ puts "\n💡 Key Benefits:"
180
+ puts " - Consistent prompt formatting"
181
+ puts " - Variable validation"
182
+ puts " - Few-shot learning support"
183
+ puts " - Easy chat message formatting"
184
+ puts " - Reusable templates with partials"