open_router_enhanced 1.2.2 → 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop_todo.yml +33 -15
- data/CHANGELOG.md +12 -0
- data/Gemfile.lock +1 -1
- data/docs/tools.md +234 -1
- data/examples/dynamic_model_switching_example.rb +328 -0
- data/examples/real_world_schemas_example.rb +262 -0
- data/examples/responses_api_example.rb +324 -0
- data/examples/tool_loop_example.rb +317 -0
- data/lib/open_router/streaming_client.rb +8 -4
- data/lib/open_router/version.rb +1 -1
- metadata +6 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 8974bb5aa236326b1c938307f3ba59724b95dfbad012fa2151624d4e9247d2d4
|
|
4
|
+
data.tar.gz: ce35a08da2c0d6a6b41ad65ff4a1120be661a4eb7cf0f896f880c4fd90be044b
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: b2add01adf57714a7b16848cac438f6cc3df98ea7dc2c0e6436e3f78dd2cdc8889c3ab05b3646686645c290001d6c13fd08a9c059157f2b05af1df1ae649c5c1
|
|
7
|
+
data.tar.gz: c54028cfbdfef0aa67658bbea76205ee278cb902850fe315f8a1a38523db80e0efcb7c191512fc4a6f144dd8ca621b5b4600d68b269d6073ec9885c730c3f0d1
|
data/.rubocop_todo.yml
CHANGED
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
# This configuration was generated by
|
|
2
2
|
# `rubocop --auto-gen-config`
|
|
3
|
-
# on 2025-
|
|
3
|
+
# on 2025-12-25 09:19:33 UTC using RuboCop version 1.82.1.
|
|
4
4
|
# The point is for the user to remove these configuration records
|
|
5
5
|
# one by one as the offenses are removed from the code base.
|
|
6
6
|
# Note that changes in the inspected code, or installation of new
|
|
7
7
|
# versions of RuboCop, may require this file to be generated again.
|
|
8
8
|
|
|
9
9
|
# Offense count: 1
|
|
10
|
-
# Configuration parameters: Severity, Include.
|
|
11
|
-
# Include: **/*.gemspec
|
|
12
10
|
Gemspec/RequiredRubyVersion:
|
|
13
11
|
Exclude:
|
|
14
12
|
- 'open_router_enhanced.gemspec'
|
|
@@ -20,42 +18,42 @@ Lint/ShadowedException:
|
|
|
20
18
|
|
|
21
19
|
# Offense count: 19
|
|
22
20
|
# This cop supports safe autocorrection (--autocorrect).
|
|
23
|
-
# Configuration parameters:
|
|
21
|
+
# Configuration parameters: IgnoreEmptyBlocks, AllowUnusedKeywordArguments.
|
|
24
22
|
Lint/UnusedBlockArgument:
|
|
25
23
|
Exclude:
|
|
26
24
|
- 'spec/force_structured_output_spec.rb'
|
|
27
25
|
- 'spec/integration/structured_output_flow_spec.rb'
|
|
28
26
|
|
|
29
|
-
# Offense count:
|
|
27
|
+
# Offense count: 26
|
|
30
28
|
# Configuration parameters: AllowedMethods, AllowedPatterns, CountRepeatedAttributes.
|
|
31
29
|
Metrics/AbcSize:
|
|
32
30
|
Max: 69
|
|
33
31
|
|
|
34
|
-
# Offense count:
|
|
32
|
+
# Offense count: 193
|
|
35
33
|
# Configuration parameters: CountComments, CountAsOne, AllowedMethods, AllowedPatterns.
|
|
36
34
|
# AllowedMethods: refine
|
|
37
35
|
Metrics/BlockLength:
|
|
38
36
|
Max: 464
|
|
39
37
|
|
|
40
|
-
# Offense count:
|
|
38
|
+
# Offense count: 6
|
|
41
39
|
# Configuration parameters: CountComments, CountAsOne.
|
|
42
40
|
Metrics/ClassLength:
|
|
43
|
-
Max:
|
|
41
|
+
Max: 260
|
|
44
42
|
|
|
45
43
|
# Offense count: 16
|
|
46
44
|
# Configuration parameters: AllowedMethods, AllowedPatterns.
|
|
47
45
|
Metrics/CyclomaticComplexity:
|
|
48
46
|
Max: 21
|
|
49
47
|
|
|
50
|
-
# Offense count:
|
|
48
|
+
# Offense count: 48
|
|
51
49
|
# Configuration parameters: CountComments, CountAsOne, AllowedMethods, AllowedPatterns.
|
|
52
50
|
Metrics/MethodLength:
|
|
53
|
-
Max:
|
|
51
|
+
Max: 40
|
|
54
52
|
|
|
55
53
|
# Offense count: 6
|
|
56
54
|
# Configuration parameters: CountKeywordArgs, MaxOptionalParameters.
|
|
57
55
|
Metrics/ParameterLists:
|
|
58
|
-
Max:
|
|
56
|
+
Max: 9
|
|
59
57
|
|
|
60
58
|
# Offense count: 13
|
|
61
59
|
# Configuration parameters: AllowedMethods, AllowedPatterns.
|
|
@@ -87,7 +85,13 @@ Naming/VariableNumber:
|
|
|
87
85
|
Exclude:
|
|
88
86
|
- 'spec/vcr/prompt_template_spec.rb'
|
|
89
87
|
|
|
90
|
-
# Offense count:
|
|
88
|
+
# Offense count: 2
|
|
89
|
+
Security/Eval:
|
|
90
|
+
Exclude:
|
|
91
|
+
- 'examples/responses_api_example.rb'
|
|
92
|
+
- 'examples/tool_loop_example.rb'
|
|
93
|
+
|
|
94
|
+
# Offense count: 8
|
|
91
95
|
# Configuration parameters: AllowedConstants.
|
|
92
96
|
Style/Documentation:
|
|
93
97
|
Exclude:
|
|
@@ -100,7 +104,21 @@ Style/Documentation:
|
|
|
100
104
|
- 'lib/open_router/response.rb'
|
|
101
105
|
- 'lib/open_router/schema.rb'
|
|
102
106
|
- 'lib/open_router/tool.rb'
|
|
103
|
-
|
|
107
|
+
|
|
108
|
+
# Offense count: 5
|
|
109
|
+
# This cop supports safe autocorrection (--autocorrect).
|
|
110
|
+
# Configuration parameters: EnforcedStyle.
|
|
111
|
+
# SupportedStyles: format, sprintf, percent
|
|
112
|
+
Style/FormatString:
|
|
113
|
+
Exclude:
|
|
114
|
+
- 'examples/dynamic_model_switching_example.rb'
|
|
115
|
+
- 'examples/real_world_schemas_example.rb'
|
|
116
|
+
|
|
117
|
+
# Offense count: 2
|
|
118
|
+
# This cop supports unsafe autocorrection (--autocorrect-all).
|
|
119
|
+
Style/IdenticalConditionalBranches:
|
|
120
|
+
Exclude:
|
|
121
|
+
- 'examples/responses_api_example.rb'
|
|
104
122
|
|
|
105
123
|
# Offense count: 8
|
|
106
124
|
# This cop supports safe autocorrection (--autocorrect).
|
|
@@ -122,9 +140,9 @@ Style/OptionalBooleanParameter:
|
|
|
122
140
|
Exclude:
|
|
123
141
|
- 'lib/open_router/schema.rb'
|
|
124
142
|
|
|
125
|
-
# Offense count:
|
|
143
|
+
# Offense count: 40
|
|
126
144
|
# This cop supports safe autocorrection (--autocorrect).
|
|
127
|
-
# Configuration parameters: AllowHeredoc, AllowURI, AllowQualifiedName, URISchemes,
|
|
145
|
+
# Configuration parameters: AllowHeredoc, AllowURI, AllowQualifiedName, URISchemes, AllowRBSInlineAnnotation, AllowCopDirectives, AllowedPatterns, SplitStrings.
|
|
128
146
|
# URISchemes: http, https
|
|
129
147
|
Layout/LineLength:
|
|
130
148
|
Max: 210
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
## [Unreleased]
|
|
2
2
|
|
|
3
|
+
## [1.2.2] - 2025-12-25
|
|
4
|
+
|
|
5
|
+
### Fixed
|
|
6
|
+
- Fixed SSL certificate verification error in `ModelRegistry` by switching from `Net::HTTP` to `Faraday` for consistent HTTP handling across the gem
|
|
7
|
+
|
|
8
|
+
### Added
|
|
9
|
+
- New examples in `examples/` directory:
|
|
10
|
+
- `real_world_schemas_example.rb` - Practical structured data extraction scenarios
|
|
11
|
+
- `tool_loop_example.rb` - Complete Chat Completions API tool calling workflow
|
|
12
|
+
- `responses_api_example.rb` - Responses API with multi-turn tool loops
|
|
13
|
+
- `dynamic_model_switching_example.rb` - Runtime model selection and capability detection
|
|
14
|
+
|
|
3
15
|
## [1.2.1] - 2025-12-24
|
|
4
16
|
|
|
5
17
|
### Fixed
|
data/Gemfile.lock
CHANGED
data/docs/tools.md
CHANGED
|
@@ -999,7 +999,7 @@ end
|
|
|
999
999
|
api_tool = OpenRouter::Tool.define do
|
|
1000
1000
|
name "external_api"
|
|
1001
1001
|
description "Call external REST APIs"
|
|
1002
|
-
|
|
1002
|
+
|
|
1003
1003
|
parameters do
|
|
1004
1004
|
string :endpoint, required: true,
|
|
1005
1005
|
description: "API endpoint URL"
|
|
@@ -1013,4 +1013,237 @@ api_tool = OpenRouter::Tool.define do
|
|
|
1013
1013
|
description: "Request timeout in seconds"
|
|
1014
1014
|
end
|
|
1015
1015
|
end
|
|
1016
|
+
```
|
|
1017
|
+
|
|
1018
|
+
### Model Delegation Tool (AI-as-a-Tool)
|
|
1019
|
+
|
|
1020
|
+
A powerful pattern where one model can delegate tasks to a different, specialized model and incorporate the results. This enables "multi-agent" workflows where you route specific subtasks to the best model for the job.
|
|
1021
|
+
|
|
1022
|
+
```ruby
|
|
1023
|
+
# Define a tool that calls a different model
|
|
1024
|
+
specialist_tool = OpenRouter::Tool.define do
|
|
1025
|
+
name "consult_specialist"
|
|
1026
|
+
description "Consult a specialist AI model for specific tasks like code review, math, or creative writing. Use this when a task would benefit from a specialized model's expertise."
|
|
1027
|
+
|
|
1028
|
+
parameters do
|
|
1029
|
+
string :task_type, required: true,
|
|
1030
|
+
enum: ["code_review", "math_reasoning", "creative_writing", "analysis"],
|
|
1031
|
+
description: "Type of task to delegate"
|
|
1032
|
+
string :prompt, required: true,
|
|
1033
|
+
description: "The specific question or task for the specialist"
|
|
1034
|
+
string :context,
|
|
1035
|
+
description: "Additional context to provide to the specialist"
|
|
1036
|
+
end
|
|
1037
|
+
end
|
|
1038
|
+
|
|
1039
|
+
# Tool executor that routes to different models
|
|
1040
|
+
class ModelDelegationExecutor
|
|
1041
|
+
MODEL_ROUTING = {
|
|
1042
|
+
"code_review" => "anthropic/claude-sonnet-4",
|
|
1043
|
+
"math_reasoning" => "deepseek/deepseek-r1",
|
|
1044
|
+
"creative_writing" => "anthropic/claude-sonnet-4",
|
|
1045
|
+
"analysis" => "openai/gpt-4o"
|
|
1046
|
+
}.freeze
|
|
1047
|
+
|
|
1048
|
+
def initialize(client)
|
|
1049
|
+
@client = client
|
|
1050
|
+
end
|
|
1051
|
+
|
|
1052
|
+
def execute(tool_call)
|
|
1053
|
+
args = tool_call.arguments
|
|
1054
|
+
task_type = args["task_type"]
|
|
1055
|
+
prompt = args["prompt"]
|
|
1056
|
+
context = args["context"]
|
|
1057
|
+
|
|
1058
|
+
# Select the appropriate specialist model
|
|
1059
|
+
specialist_model = MODEL_ROUTING[task_type]
|
|
1060
|
+
|
|
1061
|
+
# Build the specialist prompt
|
|
1062
|
+
specialist_messages = [
|
|
1063
|
+
{
|
|
1064
|
+
role: "system",
|
|
1065
|
+
content: system_prompt_for(task_type)
|
|
1066
|
+
},
|
|
1067
|
+
{
|
|
1068
|
+
role: "user",
|
|
1069
|
+
content: context ? "Context: #{context}\n\nTask: #{prompt}" : prompt
|
|
1070
|
+
}
|
|
1071
|
+
]
|
|
1072
|
+
|
|
1073
|
+
# Call the specialist model
|
|
1074
|
+
specialist_response = @client.complete(
|
|
1075
|
+
specialist_messages,
|
|
1076
|
+
model: specialist_model
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
{
|
|
1080
|
+
specialist_model: specialist_model,
|
|
1081
|
+
task_type: task_type,
|
|
1082
|
+
response: specialist_response.content
|
|
1083
|
+
}
|
|
1084
|
+
end
|
|
1085
|
+
|
|
1086
|
+
private
|
|
1087
|
+
|
|
1088
|
+
def system_prompt_for(task_type)
|
|
1089
|
+
case task_type
|
|
1090
|
+
when "code_review"
|
|
1091
|
+
"You are an expert code reviewer. Analyze the code for bugs, security issues, and improvements."
|
|
1092
|
+
when "math_reasoning"
|
|
1093
|
+
"You are a mathematics expert. Solve problems step by step with clear explanations."
|
|
1094
|
+
when "creative_writing"
|
|
1095
|
+
"You are a creative writing expert. Help with storytelling, prose, and narrative."
|
|
1096
|
+
when "analysis"
|
|
1097
|
+
"You are an analytical expert. Provide thorough, well-reasoned analysis."
|
|
1098
|
+
end
|
|
1099
|
+
end
|
|
1100
|
+
end
|
|
1101
|
+
|
|
1102
|
+
# Usage in a tool loop
|
|
1103
|
+
client = OpenRouter::Client.new
|
|
1104
|
+
executor = ModelDelegationExecutor.new(client)
|
|
1105
|
+
|
|
1106
|
+
messages = [
|
|
1107
|
+
{ role: "user", content: "Review this Ruby code and also help me solve: what's the integral of x^2?" }
|
|
1108
|
+
]
|
|
1109
|
+
|
|
1110
|
+
# Primary model (orchestrator)
|
|
1111
|
+
response = client.complete(
|
|
1112
|
+
messages,
|
|
1113
|
+
model: "openai/gpt-4o-mini", # Fast, cheap orchestrator
|
|
1114
|
+
tools: [specialist_tool],
|
|
1115
|
+
tool_choice: "auto"
|
|
1116
|
+
)
|
|
1117
|
+
|
|
1118
|
+
if response.has_tool_calls?
|
|
1119
|
+
messages << response.to_message
|
|
1120
|
+
|
|
1121
|
+
response.tool_calls.each do |tool_call|
|
|
1122
|
+
puts "Delegating #{tool_call.arguments['task_type']} to specialist..."
|
|
1123
|
+
|
|
1124
|
+
result = executor.execute(tool_call)
|
|
1125
|
+
puts "Specialist (#{result[:specialist_model]}) responded"
|
|
1126
|
+
|
|
1127
|
+
messages << tool_call.to_result_message(result)
|
|
1128
|
+
end
|
|
1129
|
+
|
|
1130
|
+
# Get final synthesized response from orchestrator
|
|
1131
|
+
final_response = client.complete(
|
|
1132
|
+
messages,
|
|
1133
|
+
model: "openai/gpt-4o-mini",
|
|
1134
|
+
tools: [specialist_tool]
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
puts final_response.content
|
|
1138
|
+
end
|
|
1139
|
+
```
|
|
1140
|
+
|
|
1141
|
+
#### Advanced: Multi-Model Reasoning Pipeline
|
|
1142
|
+
|
|
1143
|
+
```ruby
|
|
1144
|
+
# Chain multiple specialist consultations for complex tasks
|
|
1145
|
+
class ReasoningPipeline
|
|
1146
|
+
def initialize(client)
|
|
1147
|
+
@client = client
|
|
1148
|
+
end
|
|
1149
|
+
|
|
1150
|
+
def solve_complex_problem(problem)
|
|
1151
|
+
# Step 1: Break down the problem with a reasoning model
|
|
1152
|
+
breakdown = consult_model(
|
|
1153
|
+
model: "deepseek/deepseek-r1",
|
|
1154
|
+
system: "Break this problem into smaller, solvable steps.",
|
|
1155
|
+
prompt: problem
|
|
1156
|
+
)
|
|
1157
|
+
|
|
1158
|
+
# Step 2: Solve each step with appropriate specialists
|
|
1159
|
+
solutions = breakdown[:steps].map do |step|
|
|
1160
|
+
model = select_model_for_step(step)
|
|
1161
|
+
consult_model(
|
|
1162
|
+
model: model,
|
|
1163
|
+
system: "Solve this specific step thoroughly.",
|
|
1164
|
+
prompt: step
|
|
1165
|
+
)
|
|
1166
|
+
end
|
|
1167
|
+
|
|
1168
|
+
# Step 3: Synthesize with a capable general model
|
|
1169
|
+
consult_model(
|
|
1170
|
+
model: "anthropic/claude-sonnet-4",
|
|
1171
|
+
system: "Synthesize these solutions into a coherent final answer.",
|
|
1172
|
+
prompt: "Problem: #{problem}\n\nStep solutions:\n#{solutions.join("\n\n")}"
|
|
1173
|
+
)
|
|
1174
|
+
end
|
|
1175
|
+
|
|
1176
|
+
private
|
|
1177
|
+
|
|
1178
|
+
def consult_model(model:, system:, prompt:)
|
|
1179
|
+
response = @client.complete(
|
|
1180
|
+
[
|
|
1181
|
+
{ role: "system", content: system },
|
|
1182
|
+
{ role: "user", content: prompt }
|
|
1183
|
+
],
|
|
1184
|
+
model: model
|
|
1185
|
+
)
|
|
1186
|
+
response.content
|
|
1187
|
+
end
|
|
1188
|
+
|
|
1189
|
+
def select_model_for_step(step)
|
|
1190
|
+
# Route based on step content
|
|
1191
|
+
case step
|
|
1192
|
+
when /code|programming|function/i
|
|
1193
|
+
"anthropic/claude-sonnet-4"
|
|
1194
|
+
when /math|calculate|equation/i
|
|
1195
|
+
"deepseek/deepseek-r1"
|
|
1196
|
+
when /research|analyze|compare/i
|
|
1197
|
+
"openai/gpt-4o"
|
|
1198
|
+
else
|
|
1199
|
+
"openai/gpt-4o-mini"
|
|
1200
|
+
end
|
|
1201
|
+
end
|
|
1202
|
+
end
|
|
1203
|
+
```
|
|
1204
|
+
|
|
1205
|
+
#### Cost-Aware Model Routing
|
|
1206
|
+
|
|
1207
|
+
```ruby
|
|
1208
|
+
# Route based on task complexity to optimize cost
|
|
1209
|
+
class CostAwareRouter
|
|
1210
|
+
MODELS_BY_TIER = {
|
|
1211
|
+
cheap: "openai/gpt-4o-mini",
|
|
1212
|
+
standard: "openai/gpt-4o",
|
|
1213
|
+
premium: "anthropic/claude-sonnet-4",
|
|
1214
|
+
reasoning: "deepseek/deepseek-r1"
|
|
1215
|
+
}.freeze
|
|
1216
|
+
|
|
1217
|
+
def initialize(client)
|
|
1218
|
+
@client = client
|
|
1219
|
+
end
|
|
1220
|
+
|
|
1221
|
+
def route_task(task, complexity: :auto)
|
|
1222
|
+
tier = complexity == :auto ? estimate_complexity(task) : complexity
|
|
1223
|
+
model = MODELS_BY_TIER[tier]
|
|
1224
|
+
|
|
1225
|
+
@client.complete(
|
|
1226
|
+
[{ role: "user", content: task }],
|
|
1227
|
+
model: model
|
|
1228
|
+
)
|
|
1229
|
+
end
|
|
1230
|
+
|
|
1231
|
+
private
|
|
1232
|
+
|
|
1233
|
+
def estimate_complexity(task)
|
|
1234
|
+
# Quick heuristics for complexity
|
|
1235
|
+
word_count = task.split.size
|
|
1236
|
+
|
|
1237
|
+
case
|
|
1238
|
+
when task.match?(/prove|derive|analyze deeply|comprehensive/i)
|
|
1239
|
+
:reasoning
|
|
1240
|
+
when task.match?(/code|debug|security|architecture/i)
|
|
1241
|
+
:premium
|
|
1242
|
+
when word_count > 200 || task.match?(/compare|evaluate|synthesize/i)
|
|
1243
|
+
:standard
|
|
1244
|
+
else
|
|
1245
|
+
:cheap
|
|
1246
|
+
end
|
|
1247
|
+
end
|
|
1248
|
+
end
|
|
1016
1249
|
```
|