llama_cpp 0.19.6 → 0.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/ext/llama_cpp/llama_cpp.c +46 -267
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 33d3b46593a1ee0950c7f86ffe802f5841e8e065bc2bdf7d9679dcff37dbe06b
|
4
|
+
data.tar.gz: 117eba5fa85437e8cd0bb8767090bf63b4e72e523f96e8cf539a9ddf2cd15195
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: '0825dfa498b5a6616bebacb1b7aeec03db58b2ea992cafeebf4fc3e7553fbd2d30c6d14fda80caee8d303b84dfe447429a720add1c028eac17a16778c311d218'
|
7
|
+
data.tar.gz: 3b29add60fe63985974daa34b2e1d6a198688d83b495e6fe5381ca97074752f526a2900ae0e85c298a89d80f723c662d685fdefd74814ef956bd54567ef83c5b
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
## [[0.20.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.6...v0.20.0)] - 2025-05-23
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b5460
|
4
|
+
- Remove `LlamaKvChacheViewCell` class.
|
5
|
+
- Remove `deprecated LlamaKvCacheView` class.
|
6
|
+
- Remove `llama_kv_self_n_tokens` module function.
|
7
|
+
- Remove `llama_kv_self_used_cells` module function.
|
8
|
+
- Add `swa_full` accessor to `LlamaContextParams`.
|
9
|
+
- Add `llama_kv_self_seq_pos_min` module function.
|
10
|
+
|
1
11
|
## [[0.19.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.5...v0.19.6)] - 2025-05-17
|
2
12
|
|
3
13
|
- Change supported llama.cpp version to b5410
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -11,7 +11,6 @@ VALUE rb_cLlamaModelQuantizeParams;
|
|
11
11
|
VALUE rb_cLlamaLogitBias;
|
12
12
|
VALUE rb_cLlamaAdapterLora;
|
13
13
|
VALUE rb_cLlamaKvCache;
|
14
|
-
VALUE rb_cLlamaKvCacheView;
|
15
14
|
VALUE rb_cLlamaTokenDataArray;
|
16
15
|
VALUE rb_cLlamaBatch;
|
17
16
|
VALUE rb_cLlamaSampler;
|
@@ -827,6 +826,17 @@ static VALUE llama_context_params_set_op_offload(VALUE self, VALUE op_offload) {
|
|
827
826
|
return op_offload;
|
828
827
|
}
|
829
828
|
|
829
|
+
static VALUE llama_context_params_get_swa_full(VALUE self) {
|
830
|
+
struct llama_context_params* data = get_llama_context_params(self);
|
831
|
+
return data->swa_full ? Qtrue : Qfalse;
|
832
|
+
}
|
833
|
+
|
834
|
+
static VALUE llama_context_params_set_swa_full(VALUE self, VALUE swa_full) {
|
835
|
+
struct llama_context_params* data = get_llama_context_params(self);
|
836
|
+
data->swa_full = RTEST(swa_full) ? true : false;
|
837
|
+
return swa_full;
|
838
|
+
}
|
839
|
+
|
830
840
|
/* llama_model_quantize_params */
|
831
841
|
static void llama_model_quantize_params_free(void *ptr) {
|
832
842
|
if (ptr) {
|
@@ -1889,208 +1899,6 @@ static VALUE rb_llama_get_kv_self(VALUE self, VALUE ctx) {
|
|
1889
1899
|
return TypedData_Wrap_Struct(rb_cLlamaKvCache, &llama_kv_cache_wrapper_data_type, kv_cache_wrapper);
|
1890
1900
|
}
|
1891
1901
|
|
1892
|
-
/* struct llama_kv_cache_view_cell */
|
1893
|
-
static void llama_kv_cache_view_cell_free(void *ptr) {
|
1894
|
-
ruby_xfree(ptr);
|
1895
|
-
}
|
1896
|
-
|
1897
|
-
static size_t llama_kv_cache_view_cell_size(const void *ptr) {
|
1898
|
-
return sizeof(*((struct llama_kv_cache_view_cell*)ptr));
|
1899
|
-
}
|
1900
|
-
|
1901
|
-
static rb_data_type_t llama_kv_cache_view_cell_type = {
|
1902
|
-
"LlamaKvCacheViewCell",
|
1903
|
-
{ NULL,
|
1904
|
-
llama_kv_cache_view_cell_free,
|
1905
|
-
llama_kv_cache_view_cell_size },
|
1906
|
-
NULL,
|
1907
|
-
NULL,
|
1908
|
-
RUBY_TYPED_FREE_IMMEDIATELY
|
1909
|
-
};
|
1910
|
-
|
1911
|
-
static VALUE llama_kv_cache_view_cell_alloc(VALUE self) {
|
1912
|
-
struct llama_kv_cache_view_cell* data = (struct llama_kv_cache_view_cell*)ruby_xmalloc(sizeof(struct llama_kv_cache_view_cell));
|
1913
|
-
data->pos = 0;
|
1914
|
-
return TypedData_Wrap_Struct(self, &llama_kv_cache_view_cell_type, data);
|
1915
|
-
}
|
1916
|
-
|
1917
|
-
static struct llama_kv_cache_view_cell* get_llama_kv_cache_view_cell(VALUE self) {
|
1918
|
-
struct llama_kv_cache_view_cell* data = NULL;
|
1919
|
-
TypedData_Get_Struct(self, struct llama_kv_cache_view_cell, &llama_kv_cache_view_cell_type, data);
|
1920
|
-
return data;
|
1921
|
-
}
|
1922
|
-
|
1923
|
-
static VALUE llama_kv_cache_view_cell_get_pos(VALUE self) {
|
1924
|
-
struct llama_kv_cache_view_cell* data = get_llama_kv_cache_view_cell(self);
|
1925
|
-
return INT2NUM(data->pos);
|
1926
|
-
}
|
1927
|
-
|
1928
|
-
/* struct llama_kv_cache_view */
|
1929
|
-
static void llama_kv_cache_view_free_(void *ptr) {
|
1930
|
-
if (ptr != NULL) {
|
1931
|
-
ruby_xfree(ptr);
|
1932
|
-
}
|
1933
|
-
}
|
1934
|
-
|
1935
|
-
static size_t llama_kv_cache_view_size(const void *ptr) {
|
1936
|
-
return sizeof(*((struct llama_kv_cache_view*)ptr));
|
1937
|
-
}
|
1938
|
-
|
1939
|
-
static rb_data_type_t llama_kv_cache_view_type = {
|
1940
|
-
"LlamaKvCacheView",
|
1941
|
-
{ NULL,
|
1942
|
-
llama_kv_cache_view_free_,
|
1943
|
-
llama_kv_cache_view_size },
|
1944
|
-
NULL,
|
1945
|
-
NULL,
|
1946
|
-
RUBY_TYPED_FREE_IMMEDIATELY
|
1947
|
-
};
|
1948
|
-
|
1949
|
-
static VALUE llama_kv_cache_view_alloc(VALUE self) {
|
1950
|
-
struct llama_kv_cache_view* data = (struct llama_kv_cache_view*)ruby_xmalloc(sizeof(struct llama_kv_cache_view));
|
1951
|
-
data->n_cells = 0;
|
1952
|
-
data->n_seq_max = 0;
|
1953
|
-
data->token_count = 0;
|
1954
|
-
data->used_cells = 0;
|
1955
|
-
data->max_contiguous = 0;
|
1956
|
-
data->max_contiguous_idx = 0;
|
1957
|
-
data->cells = NULL;
|
1958
|
-
data->cells_sequences = NULL;
|
1959
|
-
return TypedData_Wrap_Struct(self, &llama_kv_cache_view_type, data);
|
1960
|
-
}
|
1961
|
-
|
1962
|
-
static struct llama_kv_cache_view* get_llama_kv_cache_view(VALUE self) {
|
1963
|
-
struct llama_kv_cache_view* data = NULL;
|
1964
|
-
TypedData_Get_Struct(self, struct llama_kv_cache_view, &llama_kv_cache_view_type, data);
|
1965
|
-
return data;
|
1966
|
-
}
|
1967
|
-
|
1968
|
-
static VALUE llama_kv_cache_view_get_n_cells(VALUE self) {
|
1969
|
-
struct llama_kv_cache_view* data = get_llama_kv_cache_view(self);
|
1970
|
-
return INT2NUM(data->n_cells);
|
1971
|
-
}
|
1972
|
-
|
1973
|
-
static VALUE llama_kv_cache_view_get_n_seq_max(VALUE self) {
|
1974
|
-
struct llama_kv_cache_view* data = get_llama_kv_cache_view(self);
|
1975
|
-
return INT2NUM(data->n_seq_max);
|
1976
|
-
}
|
1977
|
-
|
1978
|
-
static VALUE llama_kv_cache_view_get_token_count(VALUE self) {
|
1979
|
-
struct llama_kv_cache_view* data = get_llama_kv_cache_view(self);
|
1980
|
-
return INT2NUM(data->token_count);
|
1981
|
-
}
|
1982
|
-
|
1983
|
-
static VALUE llama_kv_cache_view_get_used_cells(VALUE self) {
|
1984
|
-
struct llama_kv_cache_view* data = get_llama_kv_cache_view(self);
|
1985
|
-
return INT2NUM(data->used_cells);
|
1986
|
-
}
|
1987
|
-
|
1988
|
-
static VALUE llama_kv_cache_view_get_max_contiguous(VALUE self) {
|
1989
|
-
struct llama_kv_cache_view* data = get_llama_kv_cache_view(self);
|
1990
|
-
return INT2NUM(data->max_contiguous);
|
1991
|
-
}
|
1992
|
-
|
1993
|
-
static VALUE llama_kv_cache_view_get_max_contiguous_idx(VALUE self) {
|
1994
|
-
struct llama_kv_cache_view* data = get_llama_kv_cache_view(self);
|
1995
|
-
return INT2NUM(data->max_contiguous_idx);
|
1996
|
-
}
|
1997
|
-
/* TODO: struct llama_kv_cache_view_cell * cells; */
|
1998
|
-
/* TODO: llama_seq_id * cells_sequences; */
|
1999
|
-
|
2000
|
-
/**
|
2001
|
-
* @overload llama_kv_cache_view_init(context, n_seq_max)
|
2002
|
-
* @param [LlamaContext] context
|
2003
|
-
* @param [Integer] n_seq_max
|
2004
|
-
* @return [LlamaKvCacheView]
|
2005
|
-
*/
|
2006
|
-
static VALUE rb_llama_kv_cache_view_init(VALUE self, VALUE ctx, VALUE n_seq_max) {
|
2007
|
-
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
2008
|
-
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
2009
|
-
return Qnil;
|
2010
|
-
}
|
2011
|
-
if (!RB_INTEGER_TYPE_P(n_seq_max)) {
|
2012
|
-
rb_raise(rb_eArgError, "n_seq_max must be an Integer");
|
2013
|
-
return Qnil;
|
2014
|
-
}
|
2015
|
-
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
2016
|
-
struct llama_kv_cache_view* data = (struct llama_kv_cache_view*)ruby_xmalloc(sizeof(struct llama_kv_cache_view));
|
2017
|
-
*data = llama_kv_cache_view_init(context_wrapper->context, NUM2UINT(n_seq_max));
|
2018
|
-
RB_GC_GUARD(ctx);
|
2019
|
-
return TypedData_Wrap_Struct(rb_cLlamaKvCacheView, &llama_kv_cache_view_type, data);
|
2020
|
-
}
|
2021
|
-
|
2022
|
-
/**
|
2023
|
-
* @overload llama_kv_cache_view_free(view)
|
2024
|
-
* @param [LlamaKvCacheView] view
|
2025
|
-
* @return [NilClass]
|
2026
|
-
*/
|
2027
|
-
static VALUE rb_llama_kv_cache_view_free(VALUE self, VALUE view) {
|
2028
|
-
if (!rb_obj_is_kind_of(view, rb_cLlamaKvCacheView)) {
|
2029
|
-
rb_raise(rb_eArgError, "view must be a LlamaKvCacheView");
|
2030
|
-
return Qnil;
|
2031
|
-
}
|
2032
|
-
struct llama_kv_cache_view* view_ = get_llama_kv_cache_view(view);
|
2033
|
-
llama_kv_cache_view_free(view_);
|
2034
|
-
view_ = NULL;
|
2035
|
-
RB_GC_GUARD(view);
|
2036
|
-
return Qnil;
|
2037
|
-
}
|
2038
|
-
|
2039
|
-
/**
|
2040
|
-
* @overload llama_kv_cache_view_update(context, view)
|
2041
|
-
* @param [LlamaContext] context
|
2042
|
-
* @param [LlamaKvCacheView] view
|
2043
|
-
* @return [NilClass]
|
2044
|
-
*/
|
2045
|
-
static VALUE rb_llama_kv_cache_view_update(VALUE self, VALUE ctx, VALUE view) {
|
2046
|
-
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
2047
|
-
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
2048
|
-
return Qnil;
|
2049
|
-
}
|
2050
|
-
if (!rb_obj_is_kind_of(view, rb_cLlamaKvCacheView)) {
|
2051
|
-
rb_raise(rb_eArgError, "view must be a LlamaKvCacheView");
|
2052
|
-
return Qnil;
|
2053
|
-
}
|
2054
|
-
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
2055
|
-
struct llama_kv_cache_view* view_ = get_llama_kv_cache_view(view);
|
2056
|
-
llama_kv_cache_view_update(context_wrapper->context, view_);
|
2057
|
-
RB_GC_GUARD(ctx);
|
2058
|
-
RB_GC_GUARD(view);
|
2059
|
-
return Qnil;
|
2060
|
-
}
|
2061
|
-
|
2062
|
-
/**
|
2063
|
-
* @overload llama_kv_self_n_tokens(context)
|
2064
|
-
* @param [LlamaContext] context
|
2065
|
-
* @return [Integer]
|
2066
|
-
*/
|
2067
|
-
static VALUE rb_llama_kv_self_n_tokens(VALUE self, VALUE ctx) {
|
2068
|
-
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
2069
|
-
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
2070
|
-
return Qnil;
|
2071
|
-
}
|
2072
|
-
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
2073
|
-
const int32_t n_tokens_kv_self = llama_kv_self_n_tokens(context_wrapper->context);
|
2074
|
-
RB_GC_GUARD(ctx);
|
2075
|
-
return INT2NUM(n_tokens_kv_self);
|
2076
|
-
}
|
2077
|
-
|
2078
|
-
/**
|
2079
|
-
* @overload llama_kv_self_used_cells(context)
|
2080
|
-
* @param [LlamaContext] context
|
2081
|
-
* @return [Integer]
|
2082
|
-
*/
|
2083
|
-
static VALUE rb_llama_kv_self_used_cells(VALUE self, VALUE ctx) {
|
2084
|
-
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
2085
|
-
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
2086
|
-
return Qnil;
|
2087
|
-
}
|
2088
|
-
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
2089
|
-
const int32_t n_used_kv_cells = llama_kv_self_used_cells(context_wrapper->context);
|
2090
|
-
RB_GC_GUARD(ctx);
|
2091
|
-
return INT2NUM(n_used_kv_cells);
|
2092
|
-
}
|
2093
|
-
|
2094
1902
|
/**
|
2095
1903
|
* @overload llama_kv_self_clear(context)
|
2096
1904
|
* @param [LlamaContext] context
|
@@ -2267,6 +2075,27 @@ static VALUE rb_llama_kv_self_seq_div(VALUE self, VALUE ctx, VALUE seq_id, VALUE
|
|
2267
2075
|
return Qnil;
|
2268
2076
|
}
|
2269
2077
|
|
2078
|
+
/**
|
2079
|
+
* @overload llama_kv_self_seq_pos_min(context, seq_id)
|
2080
|
+
* @param [LlamaContext] context
|
2081
|
+
* @param [Integer] seq_id
|
2082
|
+
* @return [Integer]
|
2083
|
+
*/
|
2084
|
+
static VALUE rb_llama_kv_self_seq_pos_min(VALUE self, VALUE ctx, VALUE seq_id) {
|
2085
|
+
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
2086
|
+
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
2087
|
+
return Qnil;
|
2088
|
+
}
|
2089
|
+
if (!RB_INTEGER_TYPE_P(seq_id)) {
|
2090
|
+
rb_raise(rb_eArgError, "seq_id must be an Integer");
|
2091
|
+
return Qnil;
|
2092
|
+
}
|
2093
|
+
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
2094
|
+
const int32_t pos_max = llama_kv_self_seq_pos_min(context_wrapper->context, NUM2INT(seq_id));
|
2095
|
+
RB_GC_GUARD(ctx);
|
2096
|
+
return INT2NUM(pos_max);
|
2097
|
+
}
|
2098
|
+
|
2270
2099
|
/**
|
2271
2100
|
* @overload llama_kv_self_seq_pos_max(context, seq_id)
|
2272
2101
|
* @param [LlamaContext] context
|
@@ -4637,6 +4466,17 @@ void Init_llama_cpp(void) {
|
|
4637
4466
|
* @return [Boolean]
|
4638
4467
|
*/
|
4639
4468
|
rb_define_method(rb_cLlamaContextParams, "op_offload=", RUBY_METHOD_FUNC(llama_context_params_set_op_offload), 1);
|
4469
|
+
/**
|
4470
|
+
* Document-method: swa_full
|
4471
|
+
* @return [Boolean]
|
4472
|
+
*/
|
4473
|
+
rb_define_method(rb_cLlamaContextParams, "swa_full", RUBY_METHOD_FUNC(llama_context_params_get_swa_full), 0);
|
4474
|
+
/**
|
4475
|
+
* Document-method: swa_full=
|
4476
|
+
* @param [Boolean] swa_full
|
4477
|
+
* @return [Boolean]
|
4478
|
+
*/
|
4479
|
+
rb_define_method(rb_cLlamaContextParams, "swa_full=", RUBY_METHOD_FUNC(llama_context_params_set_swa_full), 1);
|
4640
4480
|
/* TODO: ggml_abort_callback abort_callback */
|
4641
4481
|
/* TODO: void* abort_callback_data */
|
4642
4482
|
|
@@ -4956,18 +4796,6 @@ void Init_llama_cpp(void) {
|
|
4956
4796
|
|
4957
4797
|
/* TODO: llama_apply_adapter_cvec */
|
4958
4798
|
|
4959
|
-
/**
|
4960
|
-
* Document-class: LlamaCpp::LlamaKvCacheViewCell
|
4961
|
-
* "struct llama_kv_cache_view_cell" wrapper class
|
4962
|
-
*/
|
4963
|
-
VALUE rb_cLlamaKvCacheViewCell = rb_define_class_under(rb_mLlamaCpp, "LlamaKvCacheViewCell", rb_cObject);
|
4964
|
-
rb_define_alloc_func(rb_cLlamaKvCacheViewCell, llama_kv_cache_view_cell_alloc);
|
4965
|
-
/**
|
4966
|
-
* Document-method: pos
|
4967
|
-
* @return [Integer]
|
4968
|
-
*/
|
4969
|
-
rb_define_method(rb_cLlamaKvCacheViewCell, "pos", RUBY_METHOD_FUNC(llama_kv_cache_view_cell_get_pos), 0);
|
4970
|
-
|
4971
4799
|
/**
|
4972
4800
|
* Document-class: LlamaCpp::LlamaKvCache
|
4973
4801
|
* "struct llama_kv_cache" wrapper class
|
@@ -4975,58 +4803,6 @@ void Init_llama_cpp(void) {
|
|
4975
4803
|
rb_cLlamaKvCache = rb_define_class_under(rb_mLlamaCpp, "LlamaKvCache", rb_cObject);
|
4976
4804
|
rb_define_alloc_func(rb_cLlamaKvCache, llama_kv_cache_wrapper_alloc);
|
4977
4805
|
|
4978
|
-
/**
|
4979
|
-
* Document-class: LlamaCpp::LlamaKvCacheView
|
4980
|
-
* "struct llama_kv_cache_view" wrapper class
|
4981
|
-
*/
|
4982
|
-
rb_cLlamaKvCacheView = rb_define_class_under(rb_mLlamaCpp, "LlamaKvCacheView", rb_cObject);
|
4983
|
-
rb_define_alloc_func(rb_cLlamaKvCacheView, llama_kv_cache_view_alloc);
|
4984
|
-
/**
|
4985
|
-
* Document-method: n_cells
|
4986
|
-
* @return [Integer]
|
4987
|
-
*/
|
4988
|
-
rb_define_method(rb_cLlamaKvCacheView, "n_cells", RUBY_METHOD_FUNC(llama_kv_cache_view_get_n_cells), 0);
|
4989
|
-
/**
|
4990
|
-
* Document-method: n_seq_max
|
4991
|
-
* @return [Integer]
|
4992
|
-
*/
|
4993
|
-
rb_define_method(rb_cLlamaKvCacheView, "n_seq_max", RUBY_METHOD_FUNC(llama_kv_cache_view_get_n_seq_max), 0);
|
4994
|
-
/**
|
4995
|
-
* Document-method: token_count
|
4996
|
-
* @return [Integer]
|
4997
|
-
*/
|
4998
|
-
rb_define_method(rb_cLlamaKvCacheView, "token_count", RUBY_METHOD_FUNC(llama_kv_cache_view_get_token_count), 0);
|
4999
|
-
/**
|
5000
|
-
* Document-method: used_cells
|
5001
|
-
* @return [Integer]
|
5002
|
-
*/
|
5003
|
-
rb_define_method(rb_cLlamaKvCacheView, "used_cells", RUBY_METHOD_FUNC(llama_kv_cache_view_get_used_cells), 0);
|
5004
|
-
/**
|
5005
|
-
* Document-method: max_contiguous
|
5006
|
-
* @return [Integer]
|
5007
|
-
*/
|
5008
|
-
rb_define_method(rb_cLlamaKvCacheView, "max_contiguous", RUBY_METHOD_FUNC(llama_kv_cache_view_get_max_contiguous), 0);
|
5009
|
-
/**
|
5010
|
-
* Document-method: max_contiguous_idx
|
5011
|
-
* @return [Integer]
|
5012
|
-
*/
|
5013
|
-
rb_define_method(rb_cLlamaKvCacheView, "max_contiguous_idx", RUBY_METHOD_FUNC(llama_kv_cache_view_get_max_contiguous_idx), 0);
|
5014
|
-
|
5015
|
-
/* llama_kv_cache_view_init */
|
5016
|
-
rb_define_module_function(rb_mLlamaCpp, "llama_kv_cache_view_init", rb_llama_kv_cache_view_init, 2);
|
5017
|
-
|
5018
|
-
/* llama_kv_cache_view_free */
|
5019
|
-
rb_define_module_function(rb_mLlamaCpp, "llama_kv_cache_view_free", rb_llama_kv_cache_view_free, 1);
|
5020
|
-
|
5021
|
-
/* llama_kv_cache_view_update */
|
5022
|
-
rb_define_module_function(rb_mLlamaCpp, "llama_kv_cache_view_update", rb_llama_kv_cache_view_update, 2);
|
5023
|
-
|
5024
|
-
/* llama_kv_self_n_tokens */
|
5025
|
-
rb_define_module_function(rb_mLlamaCpp, "llama_kv_self_n_tokens", rb_llama_kv_self_n_tokens, 1);
|
5026
|
-
|
5027
|
-
/* llama_kv_self_used_cells */
|
5028
|
-
rb_define_module_function(rb_mLlamaCpp, "llama_kv_self_used_cells", rb_llama_kv_self_used_cells, 1);
|
5029
|
-
|
5030
4806
|
/* llama_kv_self_clear */
|
5031
4807
|
rb_define_module_function(rb_mLlamaCpp, "llama_kv_self_clear", rb_llama_kv_self_clear, 1);
|
5032
4808
|
|
@@ -5045,6 +4821,9 @@ void Init_llama_cpp(void) {
|
|
5045
4821
|
/* llama_kv_self_seq_div */
|
5046
4822
|
rb_define_module_function(rb_mLlamaCpp, "llama_kv_self_seq_div", rb_llama_kv_self_seq_div, 5);
|
5047
4823
|
|
4824
|
+
/* llama_kv_self_seq_pos_min */
|
4825
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_kv_self_seq_pos_min", rb_llama_kv_self_seq_pos_min, 2);
|
4826
|
+
|
5048
4827
|
/* llama_kv_self_seq_pos_max */
|
5049
4828
|
rb_define_module_function(rb_mLlamaCpp, "llama_kv_self_seq_pos_max", rb_llama_kv_self_seq_pos_max, 2);
|
5050
4829
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.
|
6
|
+
VERSION = '0.20.0'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b5460'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.20.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
49
49
|
- !ruby/object:Gem::Version
|
50
50
|
version: '0'
|
51
51
|
requirements: []
|
52
|
-
rubygems_version: 3.6.
|
52
|
+
rubygems_version: 3.6.9
|
53
53
|
specification_version: 4
|
54
54
|
summary: Ruby bindings for the llama.cpp.
|
55
55
|
test_files: []
|