red-candle 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- use magnus::{class, function, method, prelude::*, Error, Module, RArray, RHash, RModule, TryConvert};
1
+ use magnus::{function, method, prelude::*, Error, Module, RArray, RHash, RModule, Ruby, TryConvert};
2
2
  use crate::tokenizer::{TokenizerWrapper as InnerTokenizer, loader::TokenizerLoader};
3
3
  use crate::ruby::Result;
4
4
 
@@ -9,136 +9,151 @@ pub struct Tokenizer(pub InnerTokenizer);
9
9
  impl Tokenizer {
10
10
  /// Create a new tokenizer from a file path
11
11
  pub fn from_file(path: String) -> Result<Self> {
12
+ let ruby = Ruby::get().unwrap();
12
13
  let tokenizer = TokenizerLoader::from_file(&path)
13
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
14
+ .map_err(|e| Error::new(ruby.exception_runtime_error(), e.to_string()))?;
14
15
  Ok(Self(InnerTokenizer::new(tokenizer)))
15
16
  }
16
17
 
17
18
  /// Create a new tokenizer from HuggingFace model ID
18
19
  pub fn from_pretrained(model_id: String) -> Result<Self> {
20
+ let ruby = Ruby::get().unwrap();
21
+ let runtime_error = ruby.exception_runtime_error();
19
22
  // Use tokio runtime for async operations
20
23
  let rt = tokio::runtime::Runtime::new()
21
- .map_err(|e| Error::new(magnus::exception::runtime_error(), format!("Failed to create runtime: {}", e)))?;
22
-
24
+ .map_err(|e| Error::new(runtime_error, format!("Failed to create runtime: {}", e)))?;
25
+
23
26
  let tokenizer = rt.block_on(async {
24
27
  TokenizerLoader::from_hf_hub(&model_id, None).await
25
28
  })
26
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
27
-
29
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
30
+
28
31
  Ok(Self(InnerTokenizer::new(tokenizer)))
29
32
  }
30
33
 
31
34
  /// Encode text into token IDs
32
35
  pub fn encode(&self, text: String, add_special_tokens: Option<bool>) -> Result<RArray> {
36
+ let ruby = Ruby::get().unwrap();
33
37
  let add_special = add_special_tokens.unwrap_or(true);
34
38
  let token_ids = self.0.encode(&text, add_special)
35
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
36
-
37
- Ok(RArray::from_vec(token_ids.into_iter().map(|id| id as i64).collect()))
39
+ .map_err(|e| Error::new(ruby.exception_runtime_error(), e.to_string()))?;
40
+
41
+ Ok(ruby.ary_from_vec(token_ids.into_iter().map(|id| id as i64).collect()))
38
42
  }
39
-
43
+
40
44
  /// Encode text into token strings (words/subwords)
41
45
  pub fn encode_to_tokens(&self, text: String, add_special_tokens: Option<bool>) -> Result<RArray> {
46
+ let ruby = Ruby::get().unwrap();
47
+ let runtime_error = ruby.exception_runtime_error();
42
48
  let add_special = add_special_tokens.unwrap_or(true);
43
49
  let token_ids = self.0.encode(&text, add_special)
44
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
45
-
50
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
51
+
46
52
  let mut tokens = Vec::new();
47
53
  for id in token_ids {
48
54
  let token = self.0.token_to_piece(id)
49
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
55
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
50
56
  tokens.push(token);
51
57
  }
52
-
53
- Ok(RArray::from_vec(tokens))
58
+
59
+ Ok(ruby.ary_from_vec(tokens))
54
60
  }
55
61
 
56
62
  /// Encode multiple texts in batch
57
63
  pub fn encode_batch(&self, texts: RArray, add_special_tokens: Option<bool>) -> Result<RArray> {
64
+ let ruby = Ruby::get().unwrap();
65
+ let runtime_error = ruby.exception_runtime_error();
58
66
  let texts: Vec<String> = texts.to_vec()?;
59
67
  let add_special = add_special_tokens.unwrap_or(true);
60
-
68
+
61
69
  let token_ids_batch = self.0.encode_batch(texts, add_special)
62
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
63
-
64
- let result = RArray::new();
70
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
71
+
72
+ let result = ruby.ary_new();
65
73
  for token_ids in token_ids_batch {
66
- result.push(RArray::from_vec(token_ids.into_iter().map(|id| id as i64).collect()))?;
74
+ result.push(ruby.ary_from_vec(token_ids.into_iter().map(|id| id as i64).collect()))?;
67
75
  }
68
-
76
+
69
77
  Ok(result)
70
78
  }
71
-
79
+
72
80
  /// Encode multiple texts in batch, returning token strings
73
81
  pub fn encode_batch_to_tokens(&self, texts: RArray, add_special_tokens: Option<bool>) -> Result<RArray> {
82
+ let ruby = Ruby::get().unwrap();
83
+ let runtime_error = ruby.exception_runtime_error();
74
84
  let texts: Vec<String> = texts.to_vec()?;
75
85
  let add_special = add_special_tokens.unwrap_or(true);
76
-
86
+
77
87
  let token_ids_batch = self.0.encode_batch(texts, add_special)
78
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
79
-
80
- let result = RArray::new();
88
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
89
+
90
+ let result = ruby.ary_new();
81
91
  for token_ids in token_ids_batch {
82
92
  let mut tokens = Vec::new();
83
93
  for id in token_ids {
84
94
  let token = self.0.token_to_piece(id)
85
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
95
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
86
96
  tokens.push(token);
87
97
  }
88
- result.push(RArray::from_vec(tokens))?;
98
+ result.push(ruby.ary_from_vec(tokens))?;
89
99
  }
90
-
100
+
91
101
  Ok(result)
92
102
  }
93
103
 
94
104
  /// Encode text and return both token IDs and token strings
95
105
  pub fn encode_with_tokens(&self, text: String, add_special_tokens: Option<bool>) -> Result<RHash> {
106
+ let ruby = Ruby::get().unwrap();
107
+ let runtime_error = ruby.exception_runtime_error();
96
108
  let add_special = add_special_tokens.unwrap_or(true);
97
109
  let token_ids = self.0.encode(&text, add_special)
98
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
99
-
110
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
111
+
100
112
  let mut tokens = Vec::new();
101
113
  for &id in &token_ids {
102
114
  let token = self.0.token_to_piece(id)
103
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))?;
115
+ .map_err(|e| Error::new(runtime_error, e.to_string()))?;
104
116
  tokens.push(token);
105
117
  }
106
-
107
- let hash = RHash::new();
108
- hash.aset(magnus::Symbol::new("ids"), RArray::from_vec(token_ids.into_iter().map(|id| id as i64).collect()))?;
109
- hash.aset(magnus::Symbol::new("tokens"), RArray::from_vec(tokens))?;
110
-
118
+
119
+ let hash = ruby.hash_new();
120
+ hash.aset(ruby.to_symbol("ids"), ruby.ary_from_vec(token_ids.into_iter().map(|id| id as i64).collect()))?;
121
+ hash.aset(ruby.to_symbol("tokens"), ruby.ary_from_vec(tokens))?;
122
+
111
123
  Ok(hash)
112
124
  }
113
-
125
+
114
126
  /// Decode token IDs back to text
115
127
  pub fn decode(&self, token_ids: RArray, skip_special_tokens: Option<bool>) -> Result<String> {
128
+ let ruby = Ruby::get().unwrap();
116
129
  let token_ids: Vec<i64> = token_ids.to_vec()?;
117
130
  let token_ids: Vec<u32> = token_ids.into_iter()
118
131
  .map(|id| id as u32)
119
132
  .collect();
120
133
  let skip_special = skip_special_tokens.unwrap_or(true);
121
-
134
+
122
135
  self.0.decode(&token_ids, skip_special)
123
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))
136
+ .map_err(|e| Error::new(ruby.exception_runtime_error(), e.to_string()))
124
137
  }
125
138
 
126
139
  /// Get the string representation of a single token ID
127
140
  pub fn id_to_token(&self, token_id: i64) -> Result<String> {
141
+ let ruby = Ruby::get().unwrap();
128
142
  self.0.token_to_piece(token_id as u32)
129
- .map_err(|e| Error::new(magnus::exception::runtime_error(), e.to_string()))
143
+ .map_err(|e| Error::new(ruby.exception_runtime_error(), e.to_string()))
130
144
  }
131
145
 
132
146
  /// Get the vocabulary as a hash of token string to ID
133
147
  pub fn get_vocab(&self, with_added_tokens: Option<bool>) -> Result<RHash> {
148
+ let ruby = Ruby::get().unwrap();
134
149
  let with_added = with_added_tokens.unwrap_or(true);
135
150
  let vocab = self.0.inner().get_vocab(with_added);
136
-
137
- let hash = RHash::new();
151
+
152
+ let hash = ruby.hash_new();
138
153
  for (token, id) in vocab {
139
154
  hash.aset(token, id as i64)?;
140
155
  }
141
-
156
+
142
157
  Ok(hash)
143
158
  }
144
159
 
@@ -151,23 +166,24 @@ impl Tokenizer {
151
166
  /// Enable padding - returns a new tokenizer with padding enabled
152
167
  pub fn with_padding(&self, kwargs: RHash) -> Result<Self> {
153
168
  use tokenizers::{PaddingParams, PaddingStrategy, PaddingDirection};
154
-
169
+ let ruby = Ruby::get().unwrap();
170
+
155
171
  let mut params = PaddingParams::default();
156
-
172
+
157
173
  // Extract parameters from kwargs
158
- if let Some(length) = kwargs.get(magnus::Symbol::new("length")) {
174
+ if let Some(length) = kwargs.get(ruby.to_symbol("length")) {
159
175
  if let Ok(len) = usize::try_convert(length) {
160
176
  params.strategy = PaddingStrategy::Fixed(len);
161
177
  }
162
178
  }
163
-
164
- if let Some(max_length) = kwargs.get(magnus::Symbol::new("max_length")) {
179
+
180
+ if let Some(max_length) = kwargs.get(ruby.to_symbol("max_length")) {
165
181
  if let Ok(_) = usize::try_convert(max_length) {
166
182
  params.strategy = PaddingStrategy::BatchLongest;
167
183
  }
168
184
  }
169
-
170
- if let Some(direction) = kwargs.get(magnus::Symbol::new("direction")) {
185
+
186
+ if let Some(direction) = kwargs.get(ruby.to_symbol("direction")) {
171
187
  if let Ok(dir) = String::try_convert(direction) {
172
188
  params.direction = match dir.as_str() {
173
189
  "right" => PaddingDirection::Right,
@@ -176,19 +192,19 @@ impl Tokenizer {
176
192
  };
177
193
  }
178
194
  }
179
-
180
- if let Some(pad_id) = kwargs.get(magnus::Symbol::new("pad_id")) {
195
+
196
+ if let Some(pad_id) = kwargs.get(ruby.to_symbol("pad_id")) {
181
197
  if let Ok(id) = u32::try_convert(pad_id) {
182
198
  params.pad_id = id;
183
199
  }
184
200
  }
185
-
186
- if let Some(pad_token) = kwargs.get(magnus::Symbol::new("pad_token")) {
201
+
202
+ if let Some(pad_token) = kwargs.get(ruby.to_symbol("pad_token")) {
187
203
  if let Ok(token) = String::try_convert(pad_token) {
188
204
  params.pad_token = token;
189
205
  }
190
206
  }
191
-
207
+
192
208
  let mut new_tokenizer = self.0.clone();
193
209
  let _ = new_tokenizer.inner_mut().with_padding(Some(params));
194
210
  Ok(Self(new_tokenizer))
@@ -197,14 +213,14 @@ impl Tokenizer {
197
213
  /// Enable truncation - returns a new tokenizer with truncation enabled
198
214
  pub fn with_truncation(&self, max_length: usize) -> Result<Self> {
199
215
  use tokenizers::{TruncationParams, TruncationStrategy, TruncationDirection};
200
-
216
+
201
217
  let params = TruncationParams {
202
218
  max_length,
203
219
  strategy: TruncationStrategy::LongestFirst,
204
220
  stride: 0,
205
221
  direction: TruncationDirection::Right,
206
222
  };
207
-
223
+
208
224
  let mut new_tokenizer = self.0.clone();
209
225
  let _ = new_tokenizer.inner_mut().with_truncation(Some(params));
210
226
  Ok(Self(new_tokenizer))
@@ -212,8 +228,9 @@ impl Tokenizer {
212
228
 
213
229
  /// Get special tokens information
214
230
  pub fn get_special_tokens(&self) -> Result<RHash> {
215
- let hash = RHash::new();
216
-
231
+ let ruby = Ruby::get().unwrap();
232
+ let hash = ruby.hash_new();
233
+
217
234
  // Common special tokens
218
235
  let special_tokens = vec![
219
236
  ("[CLS]", "cls_token"),
@@ -224,50 +241,49 @@ impl Tokenizer {
224
241
  ("<s>", "bos_token"),
225
242
  ("</s>", "eos_token"),
226
243
  ];
227
-
244
+
228
245
  let vocab = self.0.inner().get_vocab(true);
229
-
246
+
230
247
  for (token, name) in special_tokens {
231
248
  if let Some(id) = vocab.get(token) {
232
249
  hash.aset(name, *id as i64)?;
233
250
  }
234
251
  }
235
-
252
+
236
253
  Ok(hash)
237
254
  }
238
255
 
239
256
  /// Get tokenizer options as a hash
240
257
  pub fn options(&self) -> Result<RHash> {
241
- let hash = RHash::new();
242
-
258
+ let ruby = Ruby::get().unwrap();
259
+ let hash = ruby.hash_new();
260
+
243
261
  // Get vocab size
244
262
  hash.aset("vocab_size", self.vocab_size(Some(true)))?;
245
263
  hash.aset("vocab_size_base", self.vocab_size(Some(false)))?;
246
-
264
+
247
265
  // Get special tokens info
248
266
  let special_tokens = self.get_special_tokens()?;
249
267
  hash.aset("special_tokens", special_tokens)?;
250
-
268
+
251
269
  // Get padding/truncation info if available
252
270
  let inner_tokenizer = self.0.inner();
253
-
271
+
254
272
  // Check if padding is enabled
255
273
  if let Some(_padding) = inner_tokenizer.get_padding() {
256
- let padding_info = RHash::new();
274
+ let padding_info = ruby.hash_new();
257
275
  padding_info.aset("enabled", true)?;
258
- // Note: We can't easily extract all padding params from the tokenizers library
259
- // but we can indicate it's enabled
260
276
  hash.aset("padding", padding_info)?;
261
277
  }
262
-
263
- // Check if truncation is enabled
278
+
279
+ // Check if truncation is enabled
264
280
  if let Some(truncation) = inner_tokenizer.get_truncation() {
265
- let truncation_info = RHash::new();
281
+ let truncation_info = ruby.hash_new();
266
282
  truncation_info.aset("enabled", true)?;
267
283
  truncation_info.aset("max_length", truncation.max_length)?;
268
284
  hash.aset("truncation", truncation_info)?;
269
285
  }
270
-
286
+
271
287
  Ok(hash)
272
288
  }
273
289
 
@@ -278,13 +294,13 @@ impl Tokenizer {
278
294
  .ok()
279
295
  .map(|h| h.len())
280
296
  .unwrap_or(0);
281
-
297
+
282
298
  let mut parts = vec![format!("#<Candle::Tokenizer vocab_size={}", vocab_size)];
283
-
299
+
284
300
  if special_tokens > 0 {
285
301
  parts.push(format!("special_tokens={}", special_tokens));
286
302
  }
287
-
303
+
288
304
  // Check for padding/truncation
289
305
  let inner_tokenizer = self.0.inner();
290
306
  if inner_tokenizer.get_padding().is_some() {
@@ -293,18 +309,19 @@ impl Tokenizer {
293
309
  if let Some(truncation) = inner_tokenizer.get_truncation() {
294
310
  parts.push(format!("truncation={}", truncation.max_length));
295
311
  }
296
-
312
+
297
313
  parts.join(" ") + ">"
298
314
  }
299
315
  }
300
316
 
301
317
  pub fn init(rb_candle: RModule) -> Result<()> {
302
- let tokenizer_class = rb_candle.define_class("Tokenizer", class::object())?;
303
-
318
+ let ruby = Ruby::get().unwrap();
319
+ let tokenizer_class = rb_candle.define_class("Tokenizer", ruby.class_object())?;
320
+
304
321
  // Class methods
305
322
  tokenizer_class.define_singleton_method("from_file", function!(Tokenizer::from_file, 1))?;
306
323
  tokenizer_class.define_singleton_method("from_pretrained", function!(Tokenizer::from_pretrained, 1))?;
307
-
324
+
308
325
  // Instance methods
309
326
  tokenizer_class.define_method("encode", method!(Tokenizer::encode, 2))?;
310
327
  tokenizer_class.define_method("encode_to_tokens", method!(Tokenizer::encode_to_tokens, 2))?;
@@ -321,6 +338,6 @@ pub fn init(rb_candle: RModule) -> Result<()> {
321
338
  tokenizer_class.define_method("options", method!(Tokenizer::options, 0))?;
322
339
  tokenizer_class.define_method("inspect", method!(Tokenizer::inspect, 0))?;
323
340
  tokenizer_class.define_method("to_s", method!(Tokenizer::inspect, 0))?;
324
-
341
+
325
342
  Ok(())
326
- }
343
+ }
data/lib/candle/llm.rb CHANGED
@@ -32,16 +32,90 @@ module Candle
32
32
  end
33
33
  end
34
34
  # Create a structured constraint from a JSON schema
35
+ # Uses the model's vocabulary with proper byte encoding handling
35
36
  def constraint_from_schema(schema)
36
37
  schema_str = schema.is_a?(String) ? schema : JSON.generate(schema)
37
- StructuredConstraint.from_schema(schema_str, tokenizer)
38
+
39
+ # Extract the tokenizer source model ID for proper vocabulary loading
40
+ tokenizer_model = tokenizer_source_model
41
+ if tokenizer_model
42
+ begin
43
+ StructuredConstraint.from_schema_with_model(schema_str, tokenizer_model)
44
+ rescue RuntimeError => e
45
+ # Fall back to legacy method if from_pretrained fails
46
+ # (e.g., tokenizer doesn't have EOS token in expected format)
47
+ if e.message.include?("UnsupportedTokenizer")
48
+ StructuredConstraint.from_schema(schema_str, tokenizer)
49
+ else
50
+ raise
51
+ end
52
+ end
53
+ else
54
+ # Fall back to legacy method if we can't determine the model
55
+ StructuredConstraint.from_schema(schema_str, tokenizer)
56
+ end
38
57
  end
39
-
58
+
40
59
  # Create a structured constraint from a regex pattern
60
+ # Uses the model's vocabulary with proper byte encoding handling
41
61
  def constraint_from_regex(pattern)
42
62
  pattern_str = pattern.is_a?(Regexp) ? pattern.source : pattern.to_s
43
- StructuredConstraint.from_regex(pattern_str, tokenizer)
63
+
64
+ # Extract the tokenizer source model ID for proper vocabulary loading
65
+ tokenizer_model = tokenizer_source_model
66
+ if tokenizer_model
67
+ begin
68
+ StructuredConstraint.from_regex_with_model(pattern_str, tokenizer_model)
69
+ rescue RuntimeError => e
70
+ # Fall back to legacy method if from_pretrained fails
71
+ if e.message.include?("UnsupportedTokenizer")
72
+ StructuredConstraint.from_regex(pattern_str, tokenizer)
73
+ else
74
+ raise
75
+ end
76
+ end
77
+ else
78
+ # Fall back to legacy method if we can't determine the model
79
+ StructuredConstraint.from_regex(pattern_str, tokenizer)
80
+ end
44
81
  end
82
+
83
+ private
84
+
85
+ # Get the model ID to use for vocabulary loading
86
+ # This handles GGUF models by extracting the tokenizer source
87
+ def tokenizer_source_model
88
+ opts = options rescue {}
89
+
90
+ # For GGUF models, use the tokenizer source if available
91
+ if opts["tokenizer_source"]
92
+ return opts["tokenizer_source"]
93
+ end
94
+
95
+ # For regular models, use the base model ID
96
+ if opts["base_model"]
97
+ return opts["base_model"]
98
+ end
99
+
100
+ # Try model_id but strip GGUF parts
101
+ model = opts["model_id"] || (model_id rescue nil)
102
+ return nil unless model
103
+
104
+ # Remove GGUF file suffix if present
105
+ if model.include?("@")
106
+ model = model.split("@").first
107
+ end
108
+
109
+ # For GGUF repos, try to guess the tokenizer source
110
+ if model.downcase.include?("gguf")
111
+ guessed = self.class.guess_tokenizer(model)
112
+ return guessed if guessed && guessed != model
113
+ end
114
+
115
+ model
116
+ end
117
+
118
+ public
45
119
 
46
120
  # Generate with regex constraint
47
121
  def generate_regex(prompt, pattern:, stop_on_match: true, **options)
@@ -1,5 +1,5 @@
1
1
  # :nocov:
2
2
  module Candle
3
- VERSION = "1.3.0"
3
+ VERSION = "1.4.0"
4
4
  end
5
5
  # :nocov:
metadata CHANGED
@@ -1,15 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: red-candle
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.0
4
+ version: 1.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Christopher Petersen
8
8
  - kojix2
9
- autorequire:
10
9
  bindir: bin
11
10
  cert_chain: []
12
- date: 2025-09-13 00:00:00.000000000 Z
11
+ date: 1980-01-02 00:00:00.000000000 Z
13
12
  dependencies:
14
13
  - !ruby/object:Gem::Dependency
15
14
  name: rb_sys
@@ -25,6 +24,34 @@ dependencies:
25
24
  - - ">="
26
25
  - !ruby/object:Gem::Version
27
26
  version: '0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: logger
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
41
+ - !ruby/object:Gem::Dependency
42
+ name: benchmark
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - ">="
46
+ - !ruby/object:Gem::Version
47
+ version: '0'
48
+ type: :development
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - ">="
53
+ - !ruby/object:Gem::Version
54
+ version: '0'
28
55
  - !ruby/object:Gem::Dependency
29
56
  name: minitest
30
57
  requirement: !ruby/object:Gem::Requirement
@@ -225,7 +252,6 @@ homepage: https://github.com/scientist-labs/red-candle
225
252
  licenses:
226
253
  - MIT
227
254
  metadata: {}
228
- post_install_message:
229
255
  rdoc_options: []
230
256
  require_paths:
231
257
  - lib
@@ -241,8 +267,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
241
267
  version: '3.3'
242
268
  requirements:
243
269
  - Rust >= 1.85
244
- rubygems_version: 3.3.3
245
- signing_key:
270
+ rubygems_version: 4.0.3
246
271
  specification_version: 4
247
272
  summary: Ruby gem for running state-of-the-art language models locally. Access LLMs,
248
273
  embeddings, rerankers, and NER models directly from Ruby using Rust-powered Candle