red-candle 1.0.0.pre.5 → 1.0.0.pre.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,496 @@
1
+ use candle_core::{DType, Device, Result as CandleResult, Tensor};
2
+ use candle_core::quantized::gguf_file;
3
+ use candle_transformers::models::quantized_llama::ModelWeights as QuantizedLlamaModel;
4
+ use candle_transformers::models::quantized_gemma3::ModelWeights as QuantizedGemmaModel;
5
+ use hf_hub::api::tokio::{Api, ApiRepo};
6
+ use tokenizers::Tokenizer;
7
+ use std::io::Seek;
8
+
9
+ use crate::llm::{GenerationConfig, TextGeneration, TextGenerator, TokenizerWrapper};
10
+
11
+ /// Unified GGUF model that can load any GGUF file and detect the architecture
12
+ #[derive(Debug)]
13
+ pub struct QuantizedGGUF {
14
+ model: ModelType,
15
+ tokenizer: TokenizerWrapper,
16
+ device: Device,
17
+ model_id: String,
18
+ eos_token_id: u32,
19
+ architecture: String,
20
+ _chat_template: Option<String>,
21
+ }
22
+
23
+ #[derive(Debug)]
24
+ enum ModelType {
25
+ Llama(QuantizedLlamaModel),
26
+ Gemma(QuantizedGemmaModel),
27
+ // Mistral uses Llama loader due to tensor naming compatibility
28
+ }
29
+
30
+ impl QuantizedGGUF {
31
+ /// Load a quantized model from a GGUF file
32
+ pub async fn from_pretrained(model_id: &str, device: Device, tokenizer_source: Option<&str>) -> CandleResult<Self> {
33
+ // Check if user specified an exact GGUF filename
34
+ let (actual_model_id, gguf_file) = if let Some(pos) = model_id.find('@') {
35
+ let (id, filename) = model_id.split_at(pos);
36
+ (id, Some(&filename[1..]))
37
+ } else {
38
+ (model_id, None)
39
+ };
40
+
41
+ let api = Api::new()
42
+ .map_err(|e| candle_core::Error::Msg(format!("Failed to create HF API: {}", e)))?;
43
+
44
+ let repo = api.model(actual_model_id.to_string());
45
+
46
+ // Download GGUF file
47
+ let gguf_filename = if let Some(filename) = gguf_file {
48
+ // User specified exact filename
49
+ repo.get(filename).await
50
+ .map_err(|e| candle_core::Error::Msg(format!("Failed to download GGUF file '{}': {}", filename, e)))?
51
+ .to_string_lossy().to_string()
52
+ } else {
53
+ // Let Ruby handle the search, for now just try a common name
54
+ return Err(candle_core::Error::Msg(
55
+ "Please specify a GGUF filename using gguf_file parameter".to_string()
56
+ ));
57
+ };
58
+
59
+ // Read GGUF metadata to determine architecture
60
+ let mut file = std::fs::File::open(&gguf_filename)?;
61
+ let content = gguf_file::Content::read(&mut file)?;
62
+
63
+ // Detect architecture from metadata
64
+ let architecture = Self::detect_architecture(&content, actual_model_id)?;
65
+
66
+ // For Gemma 3 models, we might need to adjust the architecture
67
+ let architecture = if actual_model_id.contains("gemma-3") || actual_model_id.contains("gemma3") {
68
+ "gemma3".to_string()
69
+ } else {
70
+ architecture
71
+ };
72
+
73
+ // Download tokenizer - either from specified source or with fallback
74
+ let tokenizer_filename = if let Some(source) = tokenizer_source {
75
+ Self::download_tokenizer_from_source(&api, source).await?
76
+ } else {
77
+ Self::download_tokenizer(&api, &repo, actual_model_id, &architecture).await?
78
+ };
79
+ let tokenizer = Tokenizer::from_file(tokenizer_filename)
80
+ .map_err(|e| candle_core::Error::Msg(format!("Failed to load tokenizer: {}", e)))?;
81
+
82
+ // Determine EOS token based on architecture and model
83
+ let eos_token_id = Self::determine_eos_token(&tokenizer, &architecture, actual_model_id);
84
+
85
+ // Load the appropriate model based on architecture
86
+ file.seek(std::io::SeekFrom::Start(0))?;
87
+ let content = gguf_file::Content::read(&mut file)?;
88
+
89
+ let model = match architecture.as_str() {
90
+ "llama" | "mistral" => {
91
+ // Both use the same GGUF format with llama.cpp tensor names
92
+ let model = QuantizedLlamaModel::from_gguf(content, &mut file, &device)?;
93
+ ModelType::Llama(model)
94
+ }
95
+ "gemma" | "gemma2" | "gemma3" => {
96
+ // Try Gemma-specific loader first, fall back to Llama if it fails
97
+ match QuantizedGemmaModel::from_gguf(content, &mut file, &device) {
98
+ Ok(model) => ModelType::Gemma(model),
99
+ Err(e) if e.to_string().contains("gemma3.attention.head_count") => {
100
+ // This might be an older Gemma GGUF that uses llama format
101
+ // Note: Some Gemma GGUF files may not be compatible
102
+ file.seek(std::io::SeekFrom::Start(0))?;
103
+ let content = gguf_file::Content::read(&mut file)?;
104
+ let model = QuantizedLlamaModel::from_gguf(content, &mut file, &device)?;
105
+ ModelType::Llama(model)
106
+ }
107
+ Err(e) => return Err(e),
108
+ }
109
+ }
110
+ _ => {
111
+ return Err(candle_core::Error::Msg(format!(
112
+ "Unsupported architecture: {}. Supported: llama, mistral, gemma",
113
+ architecture
114
+ )));
115
+ }
116
+ };
117
+
118
+ // Detect chat template (for now, use defaults based on architecture)
119
+ let chat_template = Self::detect_chat_template(&tokenizer, &architecture, actual_model_id);
120
+
121
+ Ok(Self {
122
+ model,
123
+ tokenizer: TokenizerWrapper::new(tokenizer),
124
+ device,
125
+ model_id: actual_model_id.to_string(),
126
+ eos_token_id,
127
+ architecture: architecture.clone(),
128
+ _chat_template: chat_template,
129
+ })
130
+ }
131
+
132
+ /// Detect architecture from GGUF metadata or model name
133
+ fn detect_architecture(content: &gguf_file::Content, model_id: &str) -> CandleResult<String> {
134
+ // First try to get from metadata
135
+ if let Some(gguf_file::Value::String(arch)) = content.metadata.get("general.architecture") {
136
+ return Ok(arch.clone());
137
+ }
138
+
139
+ // Fallback to model name detection
140
+ let model_lower = model_id.to_lowercase();
141
+ if model_lower.contains("llama") || model_lower.contains("tinyllama") {
142
+ Ok("llama".to_string())
143
+ } else if model_lower.contains("mistral") {
144
+ Ok("mistral".to_string())
145
+ } else if model_lower.contains("gemma") {
146
+ Ok("gemma".to_string())
147
+ } else {
148
+ Err(candle_core::Error::Msg(
149
+ "Could not determine model architecture from metadata or name".to_string()
150
+ ))
151
+ }
152
+ }
153
+
154
+ /// Download tokenizer from a specific source
155
+ async fn download_tokenizer_from_source(
156
+ api: &Api,
157
+ source: &str
158
+ ) -> CandleResult<std::path::PathBuf> {
159
+ // Check if it's a local file path
160
+ if source.ends_with(".json") && std::path::Path::new(source).exists() {
161
+ return Ok(std::path::PathBuf::from(source));
162
+ }
163
+
164
+ // Otherwise treat it as a HuggingFace repo
165
+ let repo = api.model(source.to_string());
166
+
167
+ // Try tokenizer.json first
168
+ if let Ok(path) = repo.get("tokenizer.json").await {
169
+ return Ok(path);
170
+ }
171
+
172
+ // Try tokenizer.model (for models that use sentencepiece)
173
+ if let Ok(path) = repo.get("tokenizer.model").await {
174
+ return Ok(path);
175
+ }
176
+
177
+ Err(candle_core::Error::Msg(format!(
178
+ "Failed to find tokenizer in specified source: {}",
179
+ source
180
+ )))
181
+ }
182
+
183
+ /// Download tokenizer with architecture-specific fallbacks
184
+ async fn download_tokenizer(
185
+ _api: &Api,
186
+ repo: &ApiRepo,
187
+ model_id: &str,
188
+ _architecture: &str
189
+ ) -> CandleResult<std::path::PathBuf> {
190
+ // First try to get tokenizer.json from the GGUF repo
191
+ if let Ok(path) = repo.get("tokenizer.json").await {
192
+ return Ok(path);
193
+ }
194
+
195
+ // Try tokenizer.model (for models that use sentencepiece)
196
+ if let Ok(path) = repo.get("tokenizer.model").await {
197
+ return Ok(path);
198
+ }
199
+
200
+ // If no tokenizer found in GGUF repo, return error
201
+ // Ruby will handle the fallback logic
202
+ Err(candle_core::Error::Msg(format!(
203
+ "No tokenizer found in GGUF repository {}. Please specify a tokenizer source.",
204
+ model_id
205
+ )))
206
+ }
207
+
208
+ /// Determine EOS token based on architecture and model
209
+ fn determine_eos_token(tokenizer: &Tokenizer, architecture: &str, model_id: &str) -> u32 {
210
+ let vocab = tokenizer.get_vocab(true);
211
+
212
+ match architecture {
213
+ "llama" | "mistral" => {
214
+ // Check if it's Llama 3
215
+ if model_id.contains("Llama-3") || model_id.contains("llama-3") {
216
+ vocab.get("<|eot_id|>")
217
+ .or_else(|| vocab.get("<|end_of_text|>"))
218
+ .copied()
219
+ .unwrap_or(128009)
220
+ } else {
221
+ // Llama 2 and Mistral
222
+ vocab.get("</s>")
223
+ .copied()
224
+ .unwrap_or(2)
225
+ }
226
+ }
227
+ "gemma" => {
228
+ vocab.get("<eos>")
229
+ .or_else(|| vocab.get("<end_of_turn>"))
230
+ .copied()
231
+ .unwrap_or(1)
232
+ }
233
+ _ => 2, // Default
234
+ }
235
+ }
236
+
237
+ /// Detect chat template based on model
238
+ fn detect_chat_template(_tokenizer: &Tokenizer, _architecture: &str, _model_id: &str) -> Option<String> {
239
+ // For now, return None and handle templates in apply_chat_template
240
+ // In the future, this could read from tokenizer config
241
+ None
242
+ }
243
+
244
+ /// Apply chat template based on detected architecture
245
+ pub fn apply_chat_template(&self, messages: &[serde_json::Value]) -> CandleResult<String> {
246
+ // Check model name since Mistral GGUF reports as llama architecture
247
+ let model_lower = self.model_id.to_lowercase();
248
+
249
+ if model_lower.contains("mistral") {
250
+ self.apply_mistral_template(messages)
251
+ } else if model_lower.contains("gemma") {
252
+ // Always use Gemma template for Gemma models, regardless of loader used
253
+ self.apply_gemma_template(messages)
254
+ } else {
255
+ match self.architecture.as_str() {
256
+ "llama" => {
257
+ if self.model_id.contains("Llama-3") || self.model_id.contains("llama-3") {
258
+ self.apply_llama3_template(messages)
259
+ } else {
260
+ self.apply_llama2_template(messages)
261
+ }
262
+ }
263
+ "gemma" => {
264
+ self.apply_gemma_template(messages)
265
+ }
266
+ _ => Ok(self.apply_generic_template(messages))
267
+ }
268
+ }
269
+ }
270
+
271
+ fn apply_llama2_template(&self, messages: &[serde_json::Value]) -> CandleResult<String> {
272
+ let mut prompt = String::new();
273
+ let mut system_message = String::new();
274
+
275
+ for (i, message) in messages.iter().enumerate() {
276
+ let role = message["role"].as_str().unwrap_or("");
277
+ let content = message["content"].as_str().unwrap_or("");
278
+
279
+ match role {
280
+ "system" => {
281
+ system_message = content.to_string();
282
+ }
283
+ "user" => {
284
+ if i == 1 || (i == 0 && system_message.is_empty()) {
285
+ if !system_message.is_empty() {
286
+ prompt.push_str(&format!("<s>[INST] <<SYS>>\n{}\n<</SYS>>\n\n{} [/INST]", system_message, content));
287
+ } else {
288
+ prompt.push_str(&format!("<s>[INST] {} [/INST]", content));
289
+ }
290
+ } else {
291
+ prompt.push_str(&format!(" [INST] {} [/INST]", content));
292
+ }
293
+ }
294
+ "assistant" => {
295
+ prompt.push_str(&format!(" {} </s>", content));
296
+ }
297
+ _ => {}
298
+ }
299
+ }
300
+
301
+ Ok(prompt)
302
+ }
303
+
304
+ fn apply_llama3_template(&self, messages: &[serde_json::Value]) -> CandleResult<String> {
305
+ let mut prompt = String::new();
306
+ prompt.push_str("<|begin_of_text|>");
307
+
308
+ for message in messages {
309
+ let role = message["role"].as_str().unwrap_or("");
310
+ let content = message["content"].as_str().unwrap_or("");
311
+ prompt.push_str(&format!("<|start_header_id|>{}<|end_header_id|>\n\n{}<|eot_id|>", role, content));
312
+ }
313
+
314
+ prompt.push_str("<|start_header_id|>assistant<|end_header_id|>\n\n");
315
+ Ok(prompt)
316
+ }
317
+
318
+ fn apply_mistral_template(&self, messages: &[serde_json::Value]) -> CandleResult<String> {
319
+ let mut prompt = String::new();
320
+
321
+ for message in messages {
322
+ let role = message["role"].as_str().unwrap_or("");
323
+ let content = message["content"].as_str().unwrap_or("");
324
+
325
+ match role {
326
+ "user" => prompt.push_str(&format!("[INST] {} [/INST]", content)),
327
+ "assistant" => prompt.push_str(&format!(" {}</s>", content)),
328
+ "system" => prompt.push_str(&format!("[INST] {} [/INST]\n", content)),
329
+ _ => {}
330
+ }
331
+ }
332
+
333
+ Ok(prompt)
334
+ }
335
+
336
+ fn apply_gemma_template(&self, messages: &[serde_json::Value]) -> CandleResult<String> {
337
+ let mut prompt = String::new();
338
+
339
+ for message in messages {
340
+ let role = message["role"].as_str().unwrap_or("");
341
+ let content = message["content"].as_str().unwrap_or("");
342
+
343
+ match role {
344
+ "system" => {
345
+ prompt.push_str(&format!("<start_of_turn>user\nSystem: {}\n", content));
346
+ }
347
+ "user" => {
348
+ if !prompt.contains("<start_of_turn>user") || prompt.ends_with("<end_of_turn>\n") {
349
+ prompt.push_str("<start_of_turn>user\n");
350
+ }
351
+ prompt.push_str(&format!("{}<end_of_turn>\n", content));
352
+ }
353
+ "assistant" | "model" => {
354
+ prompt.push_str(&format!("<start_of_turn>model\n{}<end_of_turn>\n", content));
355
+ }
356
+ _ => {}
357
+ }
358
+ }
359
+
360
+ prompt.push_str("<start_of_turn>model\n");
361
+ Ok(prompt)
362
+ }
363
+
364
+ fn apply_generic_template(&self, messages: &[serde_json::Value]) -> String {
365
+ let mut prompt = String::new();
366
+
367
+ for message in messages {
368
+ let role = message["role"].as_str().unwrap_or("");
369
+ let content = message["content"].as_str().unwrap_or("");
370
+ prompt.push_str(&format!("{}: {}\n", role, content));
371
+ }
372
+
373
+ prompt.push_str("assistant: ");
374
+ prompt
375
+ }
376
+
377
+ /// Clear the KV cache between generations
378
+ pub fn clear_kv_cache(&mut self) {
379
+ // Quantized models manage cache internally
380
+ }
381
+
382
+ fn generate_tokens(
383
+ &mut self,
384
+ prompt_tokens: Vec<u32>,
385
+ config: &GenerationConfig,
386
+ mut callback: Option<impl FnMut(&str)>,
387
+ ) -> CandleResult<Vec<u32>> {
388
+ let mut text_gen = TextGeneration::from_config(config);
389
+ text_gen.set_eos_token_id(self.eos_token_id);
390
+ text_gen.set_tokens(prompt_tokens.clone());
391
+
392
+ let mut all_tokens = prompt_tokens.clone();
393
+ let start_gen = all_tokens.len();
394
+
395
+ for index in 0..config.max_length {
396
+ let context_size = if index > 0 { 1 } else { all_tokens.len() };
397
+ let start_pos = all_tokens.len().saturating_sub(context_size);
398
+ let ctxt = &all_tokens[start_pos..];
399
+
400
+ let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
401
+ let input = input.contiguous()?;
402
+
403
+ let logits = match &mut self.model {
404
+ ModelType::Llama(model) => model.forward(&input, start_pos)?,
405
+ ModelType::Gemma(model) => model.forward(&input, start_pos)?,
406
+ };
407
+
408
+ let logits = logits.squeeze(0)?;
409
+ let logits = if logits.dims().len() == 2 {
410
+ let seq_len = logits.dim(0)?;
411
+ logits.narrow(0, seq_len - 1, 1)?.squeeze(0)?
412
+ } else {
413
+ logits
414
+ };
415
+
416
+ let logits = logits.to_dtype(DType::F32)?;
417
+
418
+ let next_token = text_gen.sample_next_token(
419
+ &logits,
420
+ Some((config.repetition_penalty, config.repetition_penalty_last_n)),
421
+ )?;
422
+
423
+ all_tokens.push(next_token);
424
+
425
+ // Stream callback
426
+ if let Some(ref mut cb) = callback {
427
+ if config.debug_tokens {
428
+ // In debug mode, only show debug tokens
429
+ let token_piece = self.tokenizer.token_to_piece(next_token)?;
430
+ cb(&format!("[{}:{}]", next_token, token_piece));
431
+ } else {
432
+ // Normal mode: use incremental decoding for proper text
433
+ let decoded_text = self.tokenizer.decode_incremental(&all_tokens, all_tokens.len() - 1)?;
434
+ cb(&decoded_text);
435
+ }
436
+ }
437
+
438
+ // Check stop conditions
439
+ if text_gen.should_stop(next_token, config.max_length) {
440
+ break;
441
+ }
442
+
443
+ // Check stop sequences
444
+ let generated_text = self.tokenizer.decode(&all_tokens[start_gen..], true)?;
445
+ if text_gen.check_stop_sequences(&generated_text, &config.stop_sequences) {
446
+ break;
447
+ }
448
+ }
449
+
450
+ Ok(if config.include_prompt {
451
+ all_tokens
452
+ } else {
453
+ all_tokens[start_gen..].to_vec()
454
+ })
455
+ }
456
+ }
457
+
458
+ impl TextGenerator for QuantizedGGUF {
459
+ fn generate(
460
+ &mut self,
461
+ prompt: &str,
462
+ config: &GenerationConfig,
463
+ ) -> CandleResult<String> {
464
+ let prompt_tokens = self.tokenizer.encode(prompt, true)?;
465
+ let output_tokens = self.generate_tokens(prompt_tokens, config, None::<fn(&str)>)?;
466
+
467
+ if config.debug_tokens {
468
+ self.tokenizer.format_tokens_with_debug(&output_tokens)
469
+ } else {
470
+ self.tokenizer.decode(&output_tokens, true)
471
+ }
472
+ }
473
+
474
+ fn generate_stream(
475
+ &mut self,
476
+ prompt: &str,
477
+ config: &GenerationConfig,
478
+ mut callback: impl FnMut(&str),
479
+ ) -> CandleResult<String> {
480
+ let prompt_tokens = self.tokenizer.encode(prompt, true)?;
481
+ let output_tokens = self.generate_tokens(prompt_tokens, config, Some(&mut callback))?;
482
+ self.tokenizer.decode(&output_tokens, true)
483
+ }
484
+
485
+ fn model_name(&self) -> &str {
486
+ &self.model_id
487
+ }
488
+
489
+ fn device(&self) -> &Device {
490
+ &self.device
491
+ }
492
+
493
+ fn clear_cache(&mut self) {
494
+ // Quantized models manage cache internally
495
+ }
496
+ }
@@ -1,13 +1,10 @@
1
1
  use candle_core::{Result as CandleResult, Tensor};
2
2
  use candle_transformers::generation::LogitsProcessor;
3
- use rand::{rngs::StdRng, SeedableRng};
4
3
 
5
4
  use super::GenerationConfig;
6
5
 
7
6
  /// Helper struct for text generation process
8
7
  pub struct TextGeneration {
9
- #[allow(dead_code)]
10
- rng: StdRng,
11
8
  logits_processor: LogitsProcessor,
12
9
  tokens: Vec<u32>,
13
10
  eos_token_id: Option<u32>,
@@ -25,7 +22,6 @@ impl TextGeneration {
25
22
  let logits_processor = LogitsProcessor::new(seed, temperature, top_p);
26
23
 
27
24
  Self {
28
- rng: StdRng::seed_from_u64(seed),
29
25
  logits_processor,
30
26
  tokens: Vec::new(),
31
27
  eos_token_id: None,
@@ -367,7 +367,6 @@ impl EmbeddingModel {
367
367
  }
368
368
  }
369
369
 
370
- #[allow(dead_code)]
371
370
  fn normalize_l2(v: &CoreTensor) -> Result<CoreTensor, candle_core::Error> {
372
371
  v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)
373
372
  }