legion-llm 0.9.17 → 0.9.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ce44e3d928a848ab67e5cd50574c7454ff3490a455c1d040c7089641e1091e5e
4
- data.tar.gz: ef3eaa05c9340b08f94af99c7b4f35334cef2a1d8dd09aeafb3535532840b4ce
3
+ metadata.gz: 4743dd41922fbca3818f72bb48d353314ed2895ce0981e779ac29315c8ffea3b
4
+ data.tar.gz: a235df9596b11ddfd94ef5f075a9785f4bce0ae9c849db8b0b5845bde83af4ac
5
5
  SHA512:
6
- metadata.gz: 1dc635c864ac647911bc6d55a34209f10273471b63683ab4eaa7dc69fdee7d3047c6b028b9d5180688b0b7ed4624c89fa90e5a7745f2462cb30371fe84607a11
7
- data.tar.gz: ddd2e32d57a9a56d1fff22c4b7e423145d743183efd7044290482612b704b4c787db9790d28733df53e69a90ed57e4af1d7f6bef89f069ad3a25fafbe09b63ae
6
+ metadata.gz: 1c02e4859ef4bd824e854275fcbb1eadfe243b13477c9af9a9f2f3c484579eefa10bc70d0b1735c85b433b476ca9a8dd69b5fa788cdeafc651dcc370f71cfc40
7
+ data.tar.gz: 9f3ae0f1adba6bbe56653f0afce38c0eaa0dd4121b02279f5d9053be84682774f07401e346a855320c1bc006929d8ca184c88896098cd52697869c9b8d9f4630
data/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Legion LLM Changelog
2
2
 
3
+ ## [0.9.19] - 2026-05-11
4
+
5
+ ### Added
6
+ - `GET /api/llm/tiers` — full RESTful tier hierarchy endpoint with sub-routes: `/:tier`, `/:tier/providers`, `/:tier/providers/:provider`, `/:tier/providers/:provider/instances`, `/:tier/providers/:provider/instances/:instance`, `/:tier/providers/:provider/instances/:instance/models`, `/:tier/providers/:provider/models`. Returns tier availability, provider health, instance details, and model listings in a structured tree.
7
+ - `POST /api/llm/inference` now accepts `tier` parameter in request body, passed through to the routing pipeline via `Request.extra[:tier]`. Supports values: `local`, `fleet`, `openai_compat`, `cloud`, `frontier`.
8
+ - Request log for `/api/llm/inference` now includes `requested_tier` field.
9
+
10
+ ### Changed
11
+ - `GET /api/llm/offerings` response restructured from flat array to grouped hash: `tier → provider → instance → [offerings]`. Individual offering lookup (`GET /api/llm/offerings/:id`) unchanged.
12
+
13
+ ## [0.9.18] - 2026-05-11
14
+
15
+ ### Fixed
16
+ - `NativeResponseAdapter` now coerces tool_calls from the Hash-keyed-by-name format (returned by OpenAI-compatible providers via lex-llm) into a flat Array of Hashes, preventing TypeError crashes in `step_tool_calls`, `response_tool_calls`, and the native tool loop when streaming tool-use responses from vllm/ollama.
17
+ - `LexLLMAdapter#normalize_messages` merges enriched system content with an existing system message at index 0 instead of prepending a duplicate, preventing vllm "System message must be at the beginning" rejections during gaia narrator ticks.
18
+
3
19
  ## [0.9.17] - 2026-05-11
4
20
 
5
21
  ### Fixed
@@ -23,6 +23,7 @@ module Legion
23
23
  requested_tools = body[:requested_tools] || []
24
24
  model = body[:model]
25
25
  provider = body[:provider]
26
+ tier = body[:tier]
26
27
  caller_context = body[:caller]
27
28
  conversation_id = body[:conversation_id]
28
29
  request_id = body[:request_id] || SecureRandom.uuid
@@ -88,12 +89,16 @@ module Legion
88
89
  "[llm][api][inference] action=accepted request_id=#{request_id} " \
89
90
  "conversation_id=#{conversation_id || 'none'} caller=#{caller_summary} " \
90
91
  "messages=#{messages.size} client_tools=#{tools.size} requested_tools=#{Array(requested_tools).size} " \
91
- "requested_provider=#{provider || 'auto'} requested_model=#{model || 'auto'} stream=#{streaming}"
92
+ "requested_tier=#{tier || 'auto'} requested_provider=#{provider || 'auto'} " \
93
+ "requested_model=#{model || 'auto'} stream=#{streaming}"
92
94
  )
93
95
 
94
96
  require 'legion/llm/inference/request' unless defined?(Legion::LLM::Inference::Request)
95
97
  require 'legion/llm/inference/executor' unless defined?(Legion::LLM::Inference::Executor)
96
98
 
99
+ extra = {}
100
+ extra[:tier] = tier.to_sym if tier
101
+
97
102
  pipeline_request = Legion::LLM::Inference::Request.build(
98
103
  id: request_id,
99
104
  messages: messages,
@@ -104,7 +109,8 @@ module Legion
104
109
  conversation_id: conversation_id,
105
110
  metadata: { requested_tools: requested_tools },
106
111
  stream: streaming,
107
- cache: { strategy: :default, cacheable: true }
112
+ cache: { strategy: :default, cacheable: true },
113
+ extra: extra
108
114
  )
109
115
 
110
116
  setup_ms = ((::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - route_t0) * 1000).round
@@ -17,11 +17,12 @@ module Legion
17
17
  require_llm!
18
18
 
19
19
  filters = Legion::LLM::API::Native::Offerings.request_filters(params)
20
- offerings = Legion::LLM::Inventory.offerings(filters)
20
+ raw_offerings = Legion::LLM::Inventory.offerings(filters)
21
+ grouped = Legion::LLM::API::Native::Offerings.group_offerings(raw_offerings)
21
22
 
22
23
  json_response({
23
- offerings: offerings,
24
- summary: Legion::LLM::API::Native::Offerings.summary(offerings, filters)
24
+ offerings: grouped,
25
+ summary: Legion::LLM::API::Native::Offerings.summary(raw_offerings)
25
26
  })
26
27
  rescue StandardError => e
27
28
  handle_exception(e, level: :error, handled: true, operation: 'llm.api.offerings.list')
@@ -59,15 +60,46 @@ module Legion
59
60
  }
60
61
  end
61
62
 
62
- def self.summary(offerings, filters)
63
+ def self.group_offerings(offerings)
64
+ grouped = {}
65
+
66
+ offerings.each do |offering|
67
+ tier = (offering[:tier] || :unknown).to_s
68
+ provider = (offering[:provider_family] || :unknown).to_s
69
+ instance = (offering[:instance_id] || offering[:provider_instance] || :default).to_s
70
+
71
+ grouped[tier] ||= {}
72
+ grouped[tier][provider] ||= {}
73
+ grouped[tier][provider][instance] ||= []
74
+ grouped[tier][provider][instance] << compact_offering(offering)
75
+ end
76
+
77
+ grouped
78
+ end
79
+
80
+ def self.compact_offering(offering)
63
81
  {
64
- total: offerings.size,
65
- operation: filters[:type]&.to_s,
66
- models: offerings.map { |offering| offering[:model] }.uniq.size,
67
- providers: offerings.map { |offering| offering[:provider_family] }.uniq.size,
68
- instances: offerings.map { |offering| offering[:instance_id] }.uniq.size
82
+ id: offering[:offering_id] || offering[:id],
83
+ model: offering[:model].to_s,
84
+ type: offering[:type].to_s,
85
+ model_family: offering[:model_family]&.to_s,
86
+ capabilities: Array(offering[:capabilities]).map(&:to_s),
87
+ limits: offering[:limits] || {},
88
+ enabled: offering[:enabled] != false,
89
+ cost: offering[:cost] || {},
90
+ health: offering[:health] || {}
69
91
  }.compact
70
92
  end
93
+
94
+ def self.summary(offerings)
95
+ {
96
+ total: offerings.size,
97
+ tiers: offerings.map { |o| (o[:tier] || :unknown).to_s }.uniq.size,
98
+ providers: offerings.map { |o| (o[:provider_family] || :unknown).to_s }.uniq.size,
99
+ instances: offerings.map { |o| (o[:instance_id] || :default).to_s }.uniq.size,
100
+ models: offerings.map { |o| o[:model] }.uniq.size
101
+ }
102
+ end
71
103
  end
72
104
  end
73
105
  end
@@ -0,0 +1,242 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'legion/logging/helper'
4
+
5
+ module Legion
6
+ module LLM
7
+ module API
8
+ module Native
9
+ module Tiers
10
+ extend Legion::Logging::Helper
11
+
12
+ def self.registered(app) # rubocop:disable Metrics/MethodLength,Metrics/AbcSize
13
+ log.debug('[llm][api][tiers] registering tier routes')
14
+
15
+ app.get '/api/llm/tiers' do
16
+ require_llm!
17
+
18
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
19
+ json_response({
20
+ tiers: tiers_data,
21
+ priority: Legion::LLM::API::Native::Tiers.tier_priority,
22
+ privacy_mode: Legion::LLM::API::Native::Tiers.privacy_mode?
23
+ })
24
+ rescue StandardError => e
25
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.list')
26
+ json_error('tiers_error', e.message, status_code: 500)
27
+ end
28
+
29
+ app.get '/api/llm/tiers/:tier' do
30
+ require_llm!
31
+
32
+ tier_name = params[:tier].to_s
33
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
34
+ tier = tiers_data[tier_name]
35
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
36
+
37
+ json_response({ tier: tier_name, **tier })
38
+ rescue StandardError => e
39
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.get')
40
+ json_error('tiers_error', e.message, status_code: 500)
41
+ end
42
+
43
+ app.get '/api/llm/tiers/:tier/providers' do
44
+ require_llm!
45
+
46
+ tier_name = params[:tier].to_s
47
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
48
+ tier = tiers_data[tier_name]
49
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
50
+
51
+ json_response({ tier: tier_name, providers: tier[:providers] })
52
+ rescue StandardError => e
53
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.providers')
54
+ json_error('tiers_error', e.message, status_code: 500)
55
+ end
56
+
57
+ app.get '/api/llm/tiers/:tier/providers/:provider' do
58
+ require_llm!
59
+
60
+ tier_name = params[:tier].to_s
61
+ provider_name = params[:provider].to_s
62
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
63
+ tier = tiers_data[tier_name]
64
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
65
+
66
+ provider = tier.dig(:providers, provider_name)
67
+ halt json_error('provider_not_found', "Provider '#{provider_name}' not found in tier '#{tier_name}'", status_code: 404) unless provider
68
+
69
+ json_response({ tier: tier_name, provider: provider_name, **provider })
70
+ rescue StandardError => e
71
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.provider')
72
+ json_error('tiers_error', e.message, status_code: 500)
73
+ end
74
+
75
+ app.get '/api/llm/tiers/:tier/providers/:provider/instances' do
76
+ require_llm!
77
+
78
+ tier_name = params[:tier].to_s
79
+ provider_name = params[:provider].to_s
80
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
81
+ tier = tiers_data[tier_name]
82
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
83
+
84
+ provider = tier.dig(:providers, provider_name)
85
+ halt json_error('provider_not_found', "Provider '#{provider_name}' not found in tier '#{tier_name}'", status_code: 404) unless provider
86
+
87
+ json_response({ tier: tier_name, provider: provider_name, instances: provider[:instances] })
88
+ rescue StandardError => e
89
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.instances')
90
+ json_error('tiers_error', e.message, status_code: 500)
91
+ end
92
+
93
+ app.get '/api/llm/tiers/:tier/providers/:provider/instances/:instance' do
94
+ require_llm!
95
+
96
+ tier_name = params[:tier].to_s
97
+ provider_name = params[:provider].to_s
98
+ instance_name = params[:instance].to_s
99
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
100
+ tier = tiers_data[tier_name]
101
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
102
+
103
+ provider = tier.dig(:providers, provider_name)
104
+ halt json_error('provider_not_found', "Provider '#{provider_name}' not found in tier '#{tier_name}'", status_code: 404) unless provider
105
+
106
+ instance = provider.dig(:instances, instance_name)
107
+ halt json_error('instance_not_found', "Instance '#{instance_name}' not found", status_code: 404) unless instance
108
+
109
+ json_response({ tier: tier_name, provider: provider_name, instance: instance_name, **instance })
110
+ rescue StandardError => e
111
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.instance')
112
+ json_error('tiers_error', e.message, status_code: 500)
113
+ end
114
+
115
+ app.get '/api/llm/tiers/:tier/providers/:provider/instances/:instance/models' do
116
+ require_llm!
117
+
118
+ tier_name = params[:tier].to_s
119
+ provider_name = params[:provider].to_s
120
+ instance_name = params[:instance].to_s
121
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
122
+ tier = tiers_data[tier_name]
123
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
124
+
125
+ provider = tier.dig(:providers, provider_name)
126
+ halt json_error('provider_not_found', "Provider '#{provider_name}' not found in tier '#{tier_name}'", status_code: 404) unless provider
127
+
128
+ instance = provider.dig(:instances, instance_name)
129
+ halt json_error('instance_not_found', "Instance '#{instance_name}' not found", status_code: 404) unless instance
130
+
131
+ json_response({ tier: tier_name, provider: provider_name, instance: instance_name, models: instance[:models] })
132
+ rescue StandardError => e
133
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.instance_models')
134
+ json_error('tiers_error', e.message, status_code: 500)
135
+ end
136
+
137
+ app.get '/api/llm/tiers/:tier/providers/:provider/models' do
138
+ require_llm!
139
+
140
+ tier_name = params[:tier].to_s
141
+ provider_name = params[:provider].to_s
142
+ tiers_data = Legion::LLM::API::Native::Tiers.build_tiers_tree
143
+ tier = tiers_data[tier_name]
144
+ halt json_error('tier_not_found', "Tier '#{tier_name}' not found", status_code: 404) unless tier
145
+
146
+ provider = tier.dig(:providers, provider_name)
147
+ halt json_error('provider_not_found', "Provider '#{provider_name}' not found in tier '#{tier_name}'", status_code: 404) unless provider
148
+
149
+ all_models = provider[:instances].values.flat_map { |inst| inst[:models] }
150
+ seen = {}
151
+ unique_models = all_models.select { |m| seen[m[:id]] ? false : (seen[m[:id]] = true) }
152
+
153
+ json_response({ tier: tier_name, provider: provider_name, models: unique_models })
154
+ rescue StandardError => e
155
+ handle_exception(e, level: :error, handled: true, operation: 'llm.api.tiers.provider_models')
156
+ json_error('tiers_error', e.message, status_code: 500)
157
+ end
158
+
159
+ log.debug('[llm][api][tiers] tier routes registered')
160
+ end
161
+
162
+ def self.tier_priority
163
+ routing_config = Legion::LLM::Settings.value(:routing) || {}
164
+ Array(routing_config[:tier_priority] || %w[local fleet openai_compat cloud frontier])
165
+ end
166
+
167
+ def self.privacy_mode?
168
+ return false unless defined?(Legion::LLM::Router)
169
+
170
+ Legion::LLM::Router.respond_to?(:privacy_mode?) && Legion::LLM::Router.privacy_mode?
171
+ end
172
+
173
+ def self.tier_available?(tier_sym)
174
+ return true unless defined?(Legion::LLM::Router) && Legion::LLM::Router.respond_to?(:tier_available?)
175
+
176
+ Legion::LLM::Router.tier_available?(tier_sym)
177
+ end
178
+
179
+ def self.build_tiers_tree
180
+ offerings = Legion::LLM::Inventory.offerings({})
181
+ grouped = {}
182
+
183
+ offerings.each do |offering|
184
+ tier_name = (offering[:tier] || :unknown).to_s
185
+ provider_name = (offering[:provider_family] || :unknown).to_s
186
+ instance_name = (offering[:instance_id] || offering[:provider_instance] || :default).to_s
187
+
188
+ grouped[tier_name] ||= { available: tier_available?(tier_name.to_sym), providers: {} }
189
+ grouped[tier_name][:providers][provider_name] ||= { instances: {} }
190
+ grouped[tier_name][:providers][provider_name][:instances][instance_name] ||= {
191
+ health: offering_instance_health(provider_name, instance_name),
192
+ capabilities: [],
193
+ models: []
194
+ }
195
+
196
+ inst = grouped[tier_name][:providers][provider_name][:instances][instance_name]
197
+ inst[:capabilities] = (inst[:capabilities] + Array(offering[:capabilities])).uniq.sort
198
+ inst[:models] << build_model_entry(offering)
199
+ end
200
+
201
+ # Sort tiers by priority order
202
+ priority = tier_priority
203
+ sorted = {}
204
+ priority.each { |t| sorted[t] = grouped.delete(t) if grouped.key?(t) }
205
+ grouped.each { |t, v| sorted[t] = v }
206
+
207
+ # Ensure all priority tiers appear even if empty
208
+ priority.each do |t|
209
+ sorted[t] ||= { available: tier_available?(t.to_sym), providers: {} }
210
+ end
211
+
212
+ sorted
213
+ end
214
+
215
+ def self.build_model_entry(offering)
216
+ {
217
+ id: offering[:model].to_s,
218
+ offering_id: offering[:offering_id] || offering[:id],
219
+ type: offering[:type].to_s,
220
+ capabilities: Array(offering[:capabilities]).map(&:to_s),
221
+ limits: offering[:limits] || {},
222
+ enabled: offering[:enabled] != false,
223
+ cost: offering[:cost] || {},
224
+ model_family: offering[:model_family]&.to_s
225
+ }.compact
226
+ end
227
+
228
+ def self.offering_instance_health(provider_name, instance_name)
229
+ return 'unknown' unless defined?(Legion::LLM::Router) && Legion::LLM::Router.respond_to?(:health_tracker)
230
+
231
+ tracker = Legion::LLM::Router.health_tracker
232
+ return 'unknown' unless tracker
233
+
234
+ tracker.circuit_state(provider_name.to_sym, instance: instance_name.to_sym).to_s
235
+ rescue StandardError
236
+ 'unknown'
237
+ end
238
+ end
239
+ end
240
+ end
241
+ end
242
+ end
@@ -9,6 +9,7 @@ require_relative 'api/native/models'
9
9
  require_relative 'api/native/offerings'
10
10
  require_relative 'api/native/instances'
11
11
  require_relative 'api/native/routing'
12
+ require_relative 'api/native/tiers'
12
13
  require_relative 'api/translators/openai_request'
13
14
  require_relative 'api/translators/openai_response'
14
15
  require_relative 'api/openai/chat_completions'
@@ -36,6 +37,7 @@ module Legion
36
37
  Native::Offerings.registered(app)
37
38
  Native::Instances.registered(app)
38
39
  Native::Routing.registered(app)
40
+ Native::Tiers.registered(app)
39
41
  OpenAI::ChatCompletions.registered(app)
40
42
  OpenAI::Models.registered(app)
41
43
  OpenAI::Embeddings.registered(app)
@@ -2,11 +2,7 @@
2
2
 
3
3
  require 'legion/logging/helper'
4
4
 
5
- begin
6
- require 'legion/extensions/llm/responses/thinking_extractor'
7
- rescue LoadError
8
- nil
9
- end
5
+ require 'legion/extensions/llm/responses/thinking_extractor'
10
6
 
11
7
  module Legion
12
8
  module LLM
@@ -37,7 +33,7 @@ module Legion
37
33
  @content = extracted[:result].to_s
38
34
  @model = result_hash[:model]
39
35
  @metadata = extracted[:metadata] || {}
40
- @tool_calls = result_hash[:tool_calls] || []
36
+ @tool_calls = self.class.coerce_tool_calls(result_hash[:tool_calls])
41
37
  @stop_reason = result_hash[:stop_reason]
42
38
  @thinking = extracted[:thinking]
43
39
  usage = self.class.coerce_usage(result_hash[:usage])
@@ -73,7 +69,7 @@ module Legion
73
69
  cache_write_tokens: raw.respond_to?(:cache_creation_tokens) ? raw.cache_creation_tokens.to_i : 0
74
70
  ),
75
71
  metadata: raw.respond_to?(:metadata) && raw.metadata.is_a?(Hash) ? raw.metadata : {},
76
- tool_calls: raw.respond_to?(:tool_calls) ? raw.tool_calls : [],
72
+ tool_calls: raw.respond_to?(:tool_calls) ? coerce_tool_calls(raw.tool_calls) : [],
77
73
  stop_reason: raw.respond_to?(:stop_reason) ? raw.stop_reason : nil,
78
74
  thinking: raw.respond_to?(:thinking) ? raw.thinking : nil
79
75
  }.compact
@@ -107,6 +103,29 @@ module Legion
107
103
  )
108
104
  end
109
105
 
106
+ def self.coerce_tool_calls(raw)
107
+ return [] if raw.nil?
108
+ return raw if raw.is_a?(Array)
109
+
110
+ return raw.values.filter_map { |entry| coerce_single_tool_call(entry) } if raw.is_a?(Hash) && !single_tool_call_hash?(raw)
111
+
112
+ [coerce_single_tool_call(raw)].compact
113
+ end
114
+
115
+ def self.single_tool_call_hash?(hash)
116
+ hash.key?(:name) || hash.key?('name') || hash.key?(:function) || hash.key?('function')
117
+ end
118
+
119
+ def self.coerce_single_tool_call(entry)
120
+ if entry.respond_to?(:id) && entry.respond_to?(:name)
121
+ return { id: entry.id, name: entry.name, arguments: entry.respond_to?(:arguments) ? entry.arguments : {} }
122
+ end
123
+
124
+ return entry if entry.is_a?(Hash)
125
+
126
+ nil
127
+ end
128
+
110
129
  def self.merge_thinking_payloads(existing, extracted)
111
130
  return existing || extracted unless existing && extracted
112
131
 
@@ -150,7 +150,7 @@ module Legion
150
150
  def normalize_messages(messages, system: nil)
151
151
  message_class = lex_llm_namespace::Message
152
152
  raw_messages = Array(messages)
153
- raw_messages = [{ role: :system, content: system }] + raw_messages if present_system?(system)
153
+ raw_messages = prepend_or_merge_system(raw_messages, system) if present_system?(system)
154
154
 
155
155
  raw_messages.map do |message|
156
156
  next message if message.is_a?(message_class)
@@ -165,6 +165,22 @@ module Legion
165
165
  end
166
166
  end
167
167
 
168
+ def prepend_or_merge_system(raw_messages, system)
169
+ first = raw_messages.first
170
+ first_role = if first.is_a?(Hash)
171
+ first[:role] || first['role']
172
+ elsif first.respond_to?(:role)
173
+ first.role
174
+ end
175
+ if first_role.to_s == 'system'
176
+ existing_content = first.is_a?(Hash) ? (first[:content] || first['content']) : first.content
177
+ merged = { role: :system, content: "#{system}\n\n#{existing_content}" }
178
+ [merged] + raw_messages[1..]
179
+ else
180
+ [{ role: :system, content: system }] + raw_messages
181
+ end
182
+ end
183
+
168
184
  def present_system?(system)
169
185
  return false if system.nil?
170
186
  return false if system.respond_to?(:empty?) && system.empty?
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Legion
4
4
  module LLM
5
- VERSION = '0.9.17'
5
+ VERSION = '0.9.19'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: legion-llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.17
4
+ version: 0.9.19
5
5
  platform: ruby
6
6
  authors:
7
7
  - Esity
@@ -200,6 +200,7 @@ files:
200
200
  - lib/legion/llm/api/native/offerings.rb
201
201
  - lib/legion/llm/api/native/providers.rb
202
202
  - lib/legion/llm/api/native/routing.rb
203
+ - lib/legion/llm/api/native/tiers.rb
203
204
  - lib/legion/llm/api/openai/chat_completions.rb
204
205
  - lib/legion/llm/api/openai/embeddings.rb
205
206
  - lib/legion/llm/api/openai/models.rb