legion-llm 0.8.26 → 0.8.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 942f34663b8d915ee982996b5b2e63e26a7edf79a7aac17f8ce71ed1829dff01
4
- data.tar.gz: dd78dd3bd79c9f1cf19d170f4ee2905fc92865dd3e21b107856c973eaf752fb5
3
+ metadata.gz: 2cccf9351fd9f4db59b1548197bf7b78c5947e85183535e86ede5c3359d71b89
4
+ data.tar.gz: 14e3a1b5b6648bea618941f84473e63aeccee7edc9520366d09dde8d27b00a7b
5
5
  SHA512:
6
- metadata.gz: bfc1f55dce2a3eda78b5b6ab2405b6ce5d4e58fa841a81bb304af3bbe9a5b52851023c845d898713cfa87d9e292cd5fd1545464a7e0937eadde6f8668595ccc2
7
- data.tar.gz: 4cad8eb9c6b6cfc79c1ffce687b7fddbb7b47d4e22ec9bca424f2dbb061ed83fff97d4ab2bbec441d3b319922316a238b057752210f1d7908e0d7169380485e9
6
+ metadata.gz: 31ec279fcb498e5cc3308bcefcb6adc94915c36867967fd08aa3f0422d4c583f83bc0ace1db9a47ecd45371a0ce0c82542fea7015c1474f5b7386409d789e5e0
7
+ data.tar.gz: '083bd8e581399a574b313a31784eacca4424fadb131f82cd17a5a7e840420da14ec3e5f589efa88b7be6be8a76916439c88bbf936a6d13c9a9435bd8fd04245c'
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # Legion LLM Changelog
2
2
 
3
+ ## [0.8.27] - 2026-04-24
4
+
5
+ ### Fixed
6
+ - vLLM provider sent `developer` message role (OpenAI convention) which Qwen's chat template rejects. Added `Vllm::Chat` module that overrides `format_messages` and `format_role` to always send `system`.
7
+ - vLLM provider called `OpenAI::Chat.render_payload` as a module function without provider instance context, causing `NoMethodError` on `openai_use_system_role`. Rewrote to use `super` with instance method overrides.
8
+ - Audit events included the full conversation history in every message — quadratic payload growth. Now caps at the last 20 messages (configurable via `compliance.audit_max_messages`). Full conversation reconstructable via `conversation_id`.
9
+
10
+ ### Added
11
+ - vLLM `chat_template_kwargs` with `enable_thinking` sent on every request so vLLM separates reasoning into the `reasoning` response field instead of inline `<think>` tags.
12
+ - `providers.vllm.enable_thinking` setting (default: `true`). Controls whether thinking is enabled for vLLM requests. Per-request `thinking` param overrides.
13
+
3
14
  ## [0.8.26] - 2026-04-24
4
15
 
5
16
  ### Added
@@ -40,7 +40,7 @@ module Legion
40
40
  timeline: compact_timeline(response.timeline),
41
41
  classification: response.classification,
42
42
  tracing: response.tracing,
43
- messages: request.messages,
43
+ messages: current_turn_messages(request.messages),
44
44
  response_content: msg_content,
45
45
  tools_used: tools_data,
46
46
  timestamp: Time.now,
@@ -109,6 +109,23 @@ module Legion
109
109
  end
110
110
  end
111
111
 
112
+ def current_turn_messages(messages)
113
+ return messages unless messages.is_a?(Array)
114
+
115
+ max = audit_max_messages
116
+ return messages if messages.size <= max
117
+
118
+ messages.last(max)
119
+ end
120
+
121
+ def audit_max_messages
122
+ return 20 unless defined?(Legion::Settings)
123
+
124
+ Legion::Settings[:llm].dig(:compliance, :audit_max_messages) || 20
125
+ rescue StandardError
126
+ 20
127
+ end
128
+
112
129
  def build_message_context(response:, **)
113
130
  {
114
131
  request_id: response.request_id,
@@ -3,6 +3,47 @@
3
3
  module RubyLLM
4
4
  module Providers
5
5
  class Vllm < OpenAI
6
+ module Chat
7
+ def format_role(role)
8
+ role.to_s
9
+ end
10
+
11
+ def format_messages(messages)
12
+ messages.map do |msg|
13
+ {
14
+ role: format_role(msg.role),
15
+ content: OpenAI::Media.format_content(msg.content),
16
+ tool_calls: format_tool_calls(msg.tool_calls),
17
+ tool_call_id: msg.tool_call_id
18
+ }.compact.merge(OpenAI::Chat.format_thinking(msg))
19
+ end
20
+ end
21
+
22
+ def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil,
23
+ thinking: nil, tool_prefs: nil)
24
+ payload = super
25
+ enable = if thinking.nil?
26
+ vllm_thinking_default
27
+ else
28
+ thinking ? true : false
29
+ end
30
+ payload[:chat_template_kwargs] = { enable_thinking: enable }
31
+ payload
32
+ end
33
+
34
+ private
35
+
36
+ def vllm_thinking_default
37
+ return true unless defined?(Legion::Settings)
38
+
39
+ Legion::Settings[:llm].dig(:providers, :vllm, :enable_thinking) != false
40
+ rescue StandardError
41
+ true
42
+ end
43
+ end
44
+
45
+ include Vllm::Chat
46
+
6
47
  def api_base
7
48
  @config.vllm_api_base
8
49
  end
@@ -377,10 +377,11 @@ module Legion
377
377
  base_url: 'http://localhost:11434'
378
378
  },
379
379
  vllm: {
380
- enabled: false,
381
- default_model: 'qwen3.6-27b',
382
- base_url: 'http://localhost:8000/v1',
383
- api_key: nil
380
+ enabled: false,
381
+ default_model: 'qwen3.6-27b',
382
+ base_url: 'http://localhost:8000/v1',
383
+ api_key: nil,
384
+ enable_thinking: true
384
385
  }
385
386
  }
386
387
  end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Legion
4
4
  module LLM
5
- VERSION = '0.8.26'
5
+ VERSION = '0.8.27'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: legion-llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.26
4
+ version: 0.8.27
5
5
  platform: ruby
6
6
  authors:
7
7
  - Esity