llama_cpp 0.3.6 → 0.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -86,7 +86,20 @@ extern "C" {
86
86
 
87
87
  typedef void (*llama_progress_callback)(float progress, void *ctx);
88
88
 
89
- struct llama_context_params {
89
+ enum llama_log_level {
90
+ LLAMA_LOG_LEVEL_ERROR = 2,
91
+ LLAMA_LOG_LEVEL_WARN = 3,
92
+ LLAMA_LOG_LEVEL_INFO = 4
93
+ };
94
+
95
+ // Signature for logging events
96
+ // Note that text includes the new line character at the end for most events.
97
+ // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
98
+ // if it exists.
99
+ // It might not exist for progress report where '.' is output repeatedly.
100
+ typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
101
+
102
+ struct llama_context_params {
90
103
  uint32_t seed; // RNG seed, -1 for random
91
104
  int32_t n_ctx; // text context
92
105
  int32_t n_batch; // prompt processing batch size
@@ -195,6 +208,10 @@ extern "C" {
195
208
  int32_t n_eval;
196
209
  };
197
210
 
211
+ // Set callback for all future logging events.
212
+ // If this is not called, or NULL is supplied, everything is output on stderr.
213
+ LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
214
+
198
215
  LLAMA_API int llama_max_devices();
199
216
 
200
217
  LLAMA_API struct llama_context_params llama_context_default_params();
@@ -334,6 +351,8 @@ extern "C" {
334
351
  LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model);
335
352
  LLAMA_API int llama_n_embd_from_model (const struct llama_model * model);
336
353
 
354
+ LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size);
355
+
337
356
  // Get the vocabulary as output parameters.
338
357
  // Returns number of results.
339
358
  LLAMA_API int llama_get_vocab(
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LLaMACpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.3.6'
6
+ VERSION = '0.3.8'
7
7
 
8
8
  # The version of llama.cpp bundled with llama_cpp.rb.
9
- LLAMA_CPP_VERSION = 'master-468ea24'
9
+ LLAMA_CPP_VERSION = 'master-097e121'
10
10
  end
data/sig/llama_cpp.rbs CHANGED
@@ -84,6 +84,7 @@ module LLaMACpp
84
84
  def vocab: (capacity: Integer) -> [Array[String], Array[Float]]
85
85
  def token_to_str: (Integer) -> String
86
86
  def tokenize: (text: String, ?n_max_tokens: Integer, ?add_bos: bool) -> Array[Integer]
87
+ def type: () -> String
87
88
  end
88
89
 
89
90
  class Timings
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.6
4
+ version: 0.3.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-08-04 00:00:00.000000000 Z
11
+ date: 2023-08-19 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
14
14
  email: