lex-llm 0.1.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/lib/lex_llm/provider/open_ai_compatible.rb +219 -0
- data/lib/lex_llm/version.rb +1 -1
- data/lib/lex_llm.rb +1 -0
- metadata +2 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: bba21b096146376e92e11aa2a001e7f13cd91e453ecd2907c44cc1c39dcbe3a1
|
|
4
|
+
data.tar.gz: ad320998555a6e6f8eda82b0dac1bc376415def07c15228400c60f0638a82b1c
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 7b657bf222fa57ad9bd887d6e964a9bb1664c732b894dc760f33fe5396b44c136a19ae96d859036c63449f307f6eb613ef27862ace25fd634a95dafb9c3b5c96
|
|
7
|
+
data.tar.gz: f1799aba3ec971591f68e7cfe6ad144742acd82582af1e44de251d3ada828773e64f2b617be9482f7fc31c15e2f2a22861630d5383e614565465d6ae050e9e5e
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.1.2 - 2026-04-27
|
|
4
|
+
|
|
5
|
+
- Add a shared OpenAI-compatible provider adapter for `lex-llm-openai`, `lex-llm-vllm`, `lex-llm-mlx`, and other compatible servers.
|
|
6
|
+
|
|
3
7
|
## 0.1.1 - 2026-04-27
|
|
4
8
|
|
|
5
9
|
- Remove fork-carried concrete provider implementations and VCR-backed provider specs from the base gem.
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LexLLM
|
|
4
|
+
class Provider
|
|
5
|
+
# Shared OpenAI-compatible HTTP payload and response adapter.
|
|
6
|
+
module OpenAICompatible
|
|
7
|
+
def completion_url = '/v1/chat/completions'
|
|
8
|
+
def stream_url = completion_url
|
|
9
|
+
def models_url = '/v1/models'
|
|
10
|
+
def moderation_url = '/v1/moderations'
|
|
11
|
+
def embedding_url(**) = '/v1/embeddings'
|
|
12
|
+
def transcription_url = '/v1/audio/transcriptions'
|
|
13
|
+
|
|
14
|
+
def images_url(with:, mask:)
|
|
15
|
+
with || mask ? '/v1/images/edits' : '/v1/images/generations'
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
private
|
|
19
|
+
|
|
20
|
+
def render_payload(messages, tools:, temperature:, model:, stream:, schema:, thinking:, tool_prefs:) # rubocop:disable Metrics/ParameterLists
|
|
21
|
+
{
|
|
22
|
+
model: model.id,
|
|
23
|
+
messages: format_openai_messages(messages),
|
|
24
|
+
temperature: temperature,
|
|
25
|
+
stream: stream,
|
|
26
|
+
tools: format_openai_tools(tools),
|
|
27
|
+
tool_choice: openai_tool_choice(tool_prefs),
|
|
28
|
+
response_format: openai_response_format(schema),
|
|
29
|
+
reasoning_effort: openai_reasoning_effort(thinking)
|
|
30
|
+
}.compact
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def format_openai_messages(messages)
|
|
34
|
+
messages.map do |message|
|
|
35
|
+
{
|
|
36
|
+
role: message.role.to_s,
|
|
37
|
+
content: openai_content(message.content),
|
|
38
|
+
tool_call_id: message.tool_call_id,
|
|
39
|
+
tool_calls: format_openai_tool_calls(message.tool_calls)
|
|
40
|
+
}.compact
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def openai_content(content)
|
|
45
|
+
return content.format if content.is_a?(LexLLM::Content::Raw)
|
|
46
|
+
return content unless content.respond_to?(:attachments)
|
|
47
|
+
return content.text.to_s if content.attachments.empty?
|
|
48
|
+
|
|
49
|
+
openai_content_parts(content)
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def openai_content_parts(content)
|
|
53
|
+
parts = []
|
|
54
|
+
parts << { type: 'text', text: content.text.to_s } if content.text
|
|
55
|
+
content.attachments.each do |attachment|
|
|
56
|
+
parts << { type: 'image_url', image_url: { url: attachment.for_llm } } if attachment.image?
|
|
57
|
+
end
|
|
58
|
+
parts
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def format_openai_tool_calls(tool_calls)
|
|
62
|
+
return nil unless tool_calls&.any?
|
|
63
|
+
|
|
64
|
+
tool_calls.values.map do |tool_call|
|
|
65
|
+
{
|
|
66
|
+
id: tool_call.id,
|
|
67
|
+
type: 'function',
|
|
68
|
+
function: {
|
|
69
|
+
name: tool_call.name,
|
|
70
|
+
arguments: Legion::JSON.generate(tool_call.arguments || {})
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def format_openai_tools(tools)
|
|
77
|
+
return nil if tools.empty?
|
|
78
|
+
|
|
79
|
+
tools.values.map do |tool|
|
|
80
|
+
{
|
|
81
|
+
type: 'function',
|
|
82
|
+
function: {
|
|
83
|
+
name: tool.name,
|
|
84
|
+
description: tool.description,
|
|
85
|
+
parameters: tool.params_schema || { type: 'object', properties: {} }
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def openai_tool_choice(tool_prefs)
|
|
92
|
+
choice = tool_prefs && (tool_prefs[:choice] || tool_prefs['choice'])
|
|
93
|
+
return nil unless choice
|
|
94
|
+
return choice.to_s if %i[auto none required].include?(choice.to_sym)
|
|
95
|
+
|
|
96
|
+
{ type: 'function', function: { name: choice.to_s } }
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def openai_response_format(schema)
|
|
100
|
+
return nil unless schema
|
|
101
|
+
|
|
102
|
+
schema_hash = schema.respond_to?(:to_h) ? schema.to_h : schema
|
|
103
|
+
{ type: 'json_schema', json_schema: schema_hash }
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
def openai_reasoning_effort(thinking)
|
|
107
|
+
return nil unless thinking.is_a?(Hash)
|
|
108
|
+
|
|
109
|
+
thinking[:effort] || thinking['effort']
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def parse_completion_response(response)
|
|
113
|
+
body = response.body
|
|
114
|
+
choice = Array(body['choices']).first || {}
|
|
115
|
+
message = choice['message'] || {}
|
|
116
|
+
usage = body['usage'] || {}
|
|
117
|
+
|
|
118
|
+
LexLLM::Message.new(
|
|
119
|
+
role: :assistant,
|
|
120
|
+
content: message['content'],
|
|
121
|
+
model_id: body['model'],
|
|
122
|
+
tool_calls: parse_tool_calls(message['tool_calls']),
|
|
123
|
+
input_tokens: usage['prompt_tokens'],
|
|
124
|
+
output_tokens: usage['completion_tokens'],
|
|
125
|
+
reasoning_tokens: usage.dig('completion_tokens_details', 'reasoning_tokens'),
|
|
126
|
+
raw: body
|
|
127
|
+
)
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
def build_chunk(data)
|
|
131
|
+
choice = Array(data['choices']).first || {}
|
|
132
|
+
delta = choice['delta'] || {}
|
|
133
|
+
usage = data['usage'] || {}
|
|
134
|
+
|
|
135
|
+
LexLLM::Chunk.new(
|
|
136
|
+
role: :assistant,
|
|
137
|
+
content: delta['content'],
|
|
138
|
+
model_id: data['model'],
|
|
139
|
+
tool_calls: parse_tool_calls(delta['tool_calls']),
|
|
140
|
+
input_tokens: usage['prompt_tokens'],
|
|
141
|
+
output_tokens: usage['completion_tokens'],
|
|
142
|
+
raw: data
|
|
143
|
+
)
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
def parse_tool_calls(tool_calls)
|
|
147
|
+
return nil unless tool_calls&.any?
|
|
148
|
+
|
|
149
|
+
tool_calls.to_h do |call|
|
|
150
|
+
function = call.fetch('function', {})
|
|
151
|
+
name = function.fetch('name')
|
|
152
|
+
[name.to_sym, LexLLM::ToolCall.new(id: call['id'] || name, name: name,
|
|
153
|
+
arguments: parse_tool_arguments(function['arguments']))]
|
|
154
|
+
end
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
def parse_tool_arguments(arguments)
|
|
158
|
+
return {} if arguments.nil? || arguments == ''
|
|
159
|
+
return arguments if arguments.is_a?(Hash)
|
|
160
|
+
|
|
161
|
+
Legion::JSON.parse(arguments, symbolize_names: false)
|
|
162
|
+
rescue Legion::JSON::ParseError
|
|
163
|
+
{}
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def parse_list_models_response(response, provider, _capabilities)
|
|
167
|
+
response.body.fetch('data', []).map do |model|
|
|
168
|
+
LexLLM::Model::Info.new(
|
|
169
|
+
id: model.fetch('id'),
|
|
170
|
+
name: model['id'],
|
|
171
|
+
provider: provider,
|
|
172
|
+
created_at: model['created'],
|
|
173
|
+
metadata: model
|
|
174
|
+
)
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
def render_embedding_payload(text, model:, dimensions:)
|
|
179
|
+
{ model: model, input: text, dimensions: dimensions }.compact
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
def parse_embedding_response(response, model:, text:)
|
|
183
|
+
vectors = response.body.fetch('data', []).map { |item| item['embedding'] }
|
|
184
|
+
vectors = vectors.first unless text.is_a?(Array)
|
|
185
|
+
usage = response.body['usage'] || {}
|
|
186
|
+
|
|
187
|
+
LexLLM::Embedding.new(vectors: vectors, model: model, input_tokens: usage['prompt_tokens'].to_i)
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
def render_moderation_payload(input, model:)
|
|
191
|
+
{ model: model, input: input }.compact
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
def parse_moderation_response(response, model:)
|
|
195
|
+
LexLLM::Moderation.new(id: response.body['id'], model: response.body['model'] || model,
|
|
196
|
+
results: response.body.fetch('results', []))
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
def render_image_payload(prompt, model:, size:, with:, mask:, params:) # rubocop:disable Metrics/ParameterLists
|
|
200
|
+
{ model: model, prompt: prompt, size: size, image: with, mask: mask }.merge(params).compact
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
def parse_image_response(response, model:)
|
|
204
|
+
image = response.body.fetch('data', []).first || {}
|
|
205
|
+
LexLLM::Image.new(url: image['url'], data: image['b64_json'], revised_prompt: image['revised_prompt'],
|
|
206
|
+
model_id: model, usage: response.body['usage'] || {})
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
def render_transcription_payload(file_part, model:, language:, **options)
|
|
210
|
+
{ model: model, file: file_part, language: language }.merge(options).compact
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
def parse_transcription_response(response, model:)
|
|
214
|
+
LexLLM::Transcription.new(text: response.body['text'], model: model, language: response.body['language'],
|
|
215
|
+
duration: response.body['duration'], segments: response.body['segments'])
|
|
216
|
+
end
|
|
217
|
+
end
|
|
218
|
+
end
|
|
219
|
+
end
|
data/lib/lex_llm/version.rb
CHANGED
data/lib/lex_llm.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: lex-llm
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.1.
|
|
4
|
+
version: 0.1.2
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- LegionIO
|
|
@@ -305,6 +305,7 @@ files:
|
|
|
305
305
|
- lib/lex_llm/models_schema.json
|
|
306
306
|
- lib/lex_llm/moderation.rb
|
|
307
307
|
- lib/lex_llm/provider.rb
|
|
308
|
+
- lib/lex_llm/provider/open_ai_compatible.rb
|
|
308
309
|
- lib/lex_llm/railtie.rb
|
|
309
310
|
- lib/lex_llm/routing.rb
|
|
310
311
|
- lib/lex_llm/routing/lane_key.rb
|