llama_cpp 0.14.5 → 0.14.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/lib/llama_cpp/version.rb +2 -2
- data/vendor/tmp/llama.cpp/Makefile +18 -6
- data/vendor/tmp/llama.cpp/ggml-cuda.cu +135 -46
- data/vendor/tmp/llama.cpp/ggml-impl.h +1 -1
- data/vendor/tmp/llama.cpp/ggml-metal.m +130 -83
- data/vendor/tmp/llama.cpp/ggml-metal.metal +505 -1467
- data/vendor/tmp/llama.cpp/ggml-quants.c +1 -1
- data/vendor/tmp/llama.cpp/ggml-sycl.cpp +65 -52
- data/vendor/tmp/llama.cpp/ggml.c +153 -87
- data/vendor/tmp/llama.cpp/ggml.h +5 -4
- data/vendor/tmp/llama.cpp/llama.cpp +885 -144
- data/vendor/tmp/llama.cpp/sgemm.cpp +1148 -0
- data/vendor/tmp/llama.cpp/sgemm.h +12 -0
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5c4bd6bcb93b98a00f94dcdf93d04f853174f73e281d96fce8f837a6ba7f250e
|
4
|
+
data.tar.gz: 6d184e9ce927c06ba794bea63a09007a175a72e477366ffb1c5763ceb2c7c71e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 953fe2777a759e5467694b8afb9d3f929a42603e81b2c3e38ba0fda4bb6dca78b2d147345023f99c2c9fb899cc746bf6729ad2726c2cb473d7094e93c13caf73
|
7
|
+
data.tar.gz: 71eb3cd5a5c619e9cc8a3418be745a8b76dc5e8cabe5b26a766230a8533df9a11c3981601b0be4ec0adb34a49f86ad741503ffc9f3b0d7ba021a7e9ddc3246a7
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,7 @@
|
|
1
|
+
## [[0.14.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.14.5...v0.14.6)] - 2024-04-20
|
2
|
+
|
3
|
+
- Bump llama.cpp from b2658 to b2698.
|
4
|
+
|
1
5
|
## [[0.14.5](https://github.com/yoshoku/llama_cpp.rb/compare/v0.14.4...v0.14.5)] - 2024-04-13
|
2
6
|
|
3
7
|
- Bump llama.cpp from b2608 to b2658.
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LLaMACpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.14.
|
6
|
+
VERSION = '0.14.6'
|
7
7
|
|
8
8
|
# The version of llama.cpp bundled with llama_cpp.rb.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b2698'
|
10
10
|
end
|
@@ -386,6 +386,15 @@ ifdef LLAMA_OPENBLAS
|
|
386
386
|
MK_LDFLAGS += $(shell pkg-config --libs openblas)
|
387
387
|
endif # LLAMA_OPENBLAS
|
388
388
|
|
389
|
+
# TODO: temporary disable until MoE is fixed
|
390
|
+
# https://github.com/ggerganov/llama.cpp/pull/6716
|
391
|
+
LLAMA_NO_LLAMAFILE := 1
|
392
|
+
|
393
|
+
ifndef LLAMA_NO_LLAMAFILE
|
394
|
+
MK_CPPFLAGS += -DGGML_USE_LLAMAFILE
|
395
|
+
OBJS += sgemm.o
|
396
|
+
endif
|
397
|
+
|
389
398
|
ifdef LLAMA_BLIS
|
390
399
|
MK_CPPFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
|
391
400
|
MK_LDFLAGS += -lblis -L/usr/local/lib
|
@@ -482,11 +491,9 @@ ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/com
|
|
482
491
|
|
483
492
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
484
493
|
$(NVCC_COMPILE)
|
485
|
-
|
486
494
|
endif # LLAMA_CUDA
|
487
495
|
|
488
496
|
ifdef LLAMA_CLBLAST
|
489
|
-
|
490
497
|
MK_CPPFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags-only-I clblast OpenCL)
|
491
498
|
MK_CFLAGS += $(shell pkg-config --cflags-only-other clblast OpenCL)
|
492
499
|
MK_CXXFLAGS += $(shell pkg-config --cflags-only-other clblast OpenCL)
|
@@ -605,6 +612,11 @@ ggml-mpi.o: ggml-mpi.c ggml-mpi.h
|
|
605
612
|
$(CC) $(CFLAGS) -c $< -o $@
|
606
613
|
endif # LLAMA_MPI
|
607
614
|
|
615
|
+
ifndef LLAMA_NO_LLAMAFILE
|
616
|
+
sgemm.o: sgemm.cpp sgemm.h ggml.h
|
617
|
+
$(CXX) $(CXXFLAGS) -c $< -o $@
|
618
|
+
endif
|
619
|
+
|
608
620
|
GF_CC := $(CC)
|
609
621
|
include scripts/get-flags.mk
|
610
622
|
|
@@ -690,7 +702,7 @@ llama.o: llama.cpp unicode.h ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml
|
|
690
702
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
691
703
|
|
692
704
|
COMMON_H_DEPS = common/common.h common/sampling.h common/log.h
|
693
|
-
COMMON_DEPS = common.o sampling.o grammar-parser.o build-info.o
|
705
|
+
COMMON_DEPS = common.o sampling.o grammar-parser.o build-info.o json-schema-to-grammar.o
|
694
706
|
|
695
707
|
common.o: common/common.cpp $(COMMON_H_DEPS)
|
696
708
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
@@ -724,7 +736,7 @@ lib: llama.o ggml.o $(OBJS)
|
|
724
736
|
ar rcs libllama.a $^
|
725
737
|
|
726
738
|
clean:
|
727
|
-
rm -vrf *.o tests/*.o *.so *.a *.dll
|
739
|
+
rm -vrf *.o tests/*.o *.so *.a *.dll benchmark-matmult lookup-create lookup-merge lookup-stats common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS)
|
728
740
|
rm -vrf ggml-cuda/*.o
|
729
741
|
|
730
742
|
#
|
@@ -761,7 +773,7 @@ batched: examples/batched/batched.cpp ggml.o llama.o $(C
|
|
761
773
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
762
774
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
763
775
|
|
764
|
-
batched-bench: examples/batched-bench/batched-bench.cpp build-info.o ggml.o llama.o
|
776
|
+
batched-bench: examples/batched-bench/batched-bench.cpp build-info.o ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
765
777
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
766
778
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
767
779
|
|
@@ -793,7 +805,7 @@ save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(C
|
|
793
805
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
794
806
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
795
807
|
|
796
|
-
server: examples/server/server.cpp examples/server/utils.hpp examples/server/httplib.h common/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp
|
808
|
+
server: examples/server/server.cpp examples/server/utils.hpp examples/server/httplib.h common/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
797
809
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
798
810
|
$(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2)
|
799
811
|
|
@@ -1231,7 +1231,7 @@ static void ggml_cuda_op_mul_mat_cublas(
|
|
1231
1231
|
|
1232
1232
|
if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) {
|
1233
1233
|
// convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
|
1234
|
-
ggml_cuda_pool_alloc<half> src0_as_f16(ctx.pool());
|
1234
|
+
ggml_cuda_pool_alloc<half> src0_as_f16(ctx.pool(id));
|
1235
1235
|
if (src0->type != GGML_TYPE_F16) {
|
1236
1236
|
const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type);
|
1237
1237
|
GGML_ASSERT(to_fp16_cuda != nullptr);
|
@@ -1241,7 +1241,7 @@ static void ggml_cuda_op_mul_mat_cublas(
|
|
1241
1241
|
}
|
1242
1242
|
const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16.get();
|
1243
1243
|
|
1244
|
-
ggml_cuda_pool_alloc<half> src1_as_f16(ctx.pool());
|
1244
|
+
ggml_cuda_pool_alloc<half> src1_as_f16(ctx.pool(id));
|
1245
1245
|
if (src1->type != GGML_TYPE_F16) {
|
1246
1246
|
const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
|
1247
1247
|
GGML_ASSERT(to_fp16_cuda != nullptr);
|
@@ -1250,7 +1250,7 @@ static void ggml_cuda_op_mul_mat_cublas(
|
|
1250
1250
|
to_fp16_cuda(src1_ddf_i, src1_as_f16.get(), ne, stream);
|
1251
1251
|
}
|
1252
1252
|
const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get();
|
1253
|
-
ggml_cuda_pool_alloc<half> dst_f16(ctx.pool(), row_diff*src1_ncols);
|
1253
|
+
ggml_cuda_pool_alloc<half> dst_f16(ctx.pool(id), row_diff*src1_ncols);
|
1254
1254
|
|
1255
1255
|
const half alpha_f16 = 1.0f;
|
1256
1256
|
const half beta_f16 = 0.0f;
|
@@ -1946,7 +1946,7 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
|
|
1946
1946
|
} else if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
|
1947
1947
|
// KQV single-batch
|
1948
1948
|
ggml_cuda_mul_mat_vec_nc(ctx, src0, src1, dst);
|
1949
|
-
} else if (!split &&
|
1949
|
+
} else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || fp16_performance_good) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) {
|
1950
1950
|
// KQ + KQV multi-batch
|
1951
1951
|
ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst);
|
1952
1952
|
} else if (use_dequantize_mul_mat_vec) {
|
@@ -1960,20 +1960,73 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
|
|
1960
1960
|
}
|
1961
1961
|
}
|
1962
1962
|
|
1963
|
+
struct mmid_row_mapping {
|
1964
|
+
int32_t i1;
|
1965
|
+
int32_t i2;
|
1966
|
+
};
|
1967
|
+
|
1968
|
+
static __global__ void k_copy_src1_to_contiguous(const char * __restrict__ src1_original, char * __restrict__ src1_contiguous,
|
1969
|
+
int * __restrict__ cur_src1_row, mmid_row_mapping * __restrict__ row_mapping,
|
1970
|
+
const char * __restrict ids, int64_t i02, size_t ids_nb1, size_t ids_nb0,
|
1971
|
+
int64_t ne11, int64_t ne10,
|
1972
|
+
size_t nb11, size_t nb12) {
|
1973
|
+
int32_t iid1 = blockIdx.x;
|
1974
|
+
int32_t id = blockIdx.y;
|
1975
|
+
|
1976
|
+
const int32_t row_id_i = *(const int32_t *) (ids + iid1*ids_nb1 + id*ids_nb0);
|
1977
|
+
|
1978
|
+
if (row_id_i != i02) {
|
1979
|
+
return;
|
1980
|
+
}
|
1981
|
+
|
1982
|
+
const int64_t i11 = id % ne11;
|
1983
|
+
const int64_t i12 = iid1;
|
1984
|
+
|
1985
|
+
__shared__ int src1_row;
|
1986
|
+
if (threadIdx.x == 0) {
|
1987
|
+
src1_row = atomicAdd(cur_src1_row, 1);
|
1988
|
+
row_mapping[src1_row] = {id, iid1};
|
1989
|
+
}
|
1990
|
+
__syncthreads();
|
1991
|
+
|
1992
|
+
const float * src1_row_original = (const float *)(src1_original + i11*nb11 + i12*nb12);
|
1993
|
+
float * src1_row_contiguous = (float *)(src1_contiguous + src1_row*nb11);
|
1994
|
+
|
1995
|
+
for (int i = threadIdx.x; i < ne10; i += blockDim.x) {
|
1996
|
+
src1_row_contiguous[i] = src1_row_original[i];
|
1997
|
+
}
|
1998
|
+
}
|
1999
|
+
|
2000
|
+
static __global__ void k_copy_dst_from_contiguous(char * __restrict__ dst_original, const char * __restrict__ dst_contiguous,
|
2001
|
+
const mmid_row_mapping * __restrict__ row_mapping,
|
2002
|
+
int64_t ne0,
|
2003
|
+
size_t nb1, size_t nb2) {
|
2004
|
+
int32_t i = blockIdx.x;
|
2005
|
+
|
2006
|
+
const int32_t i1 = row_mapping[i].i1;
|
2007
|
+
const int32_t i2 = row_mapping[i].i2;
|
2008
|
+
|
2009
|
+
const float * dst_row_contiguous = (const float *)(dst_contiguous + i*nb1);
|
2010
|
+
float * dst_row_original = (float *)(dst_original + i1*nb1 + i2*nb2);
|
2011
|
+
|
2012
|
+
for (int j = threadIdx.x; j < ne0; j += blockDim.x) {
|
2013
|
+
dst_row_original[j] = dst_row_contiguous[j];
|
2014
|
+
}
|
2015
|
+
}
|
2016
|
+
|
1963
2017
|
static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
1964
2018
|
const ggml_tensor * src0 = dst->src[0];
|
1965
2019
|
const ggml_tensor * src1 = dst->src[1];
|
1966
2020
|
const ggml_tensor * ids = dst->src[2];
|
1967
2021
|
|
2022
|
+
GGML_TENSOR_BINARY_OP_LOCALS
|
2023
|
+
|
1968
2024
|
GGML_ASSERT(!ggml_backend_buffer_is_cuda_split(src0->buffer) && "mul_mat_id does not support split buffers");
|
1969
2025
|
|
1970
2026
|
cudaStream_t stream = ctx.stream();
|
1971
2027
|
|
1972
|
-
const
|
1973
|
-
const
|
1974
|
-
|
1975
|
-
const int32_t id = ((int32_t *) dst->op_params)[0];
|
1976
|
-
const int32_t n_as = src0->ne[2];
|
2028
|
+
const int64_t n_as = ne02;
|
2029
|
+
const int64_t n_ids = ids->ne[0];
|
1977
2030
|
|
1978
2031
|
std::vector<char> ids_host(ggml_nbytes(ids));
|
1979
2032
|
const char * ids_dev = (const char *) ids->data;
|
@@ -1982,7 +2035,7 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
|
|
1982
2035
|
|
1983
2036
|
ggml_tensor src0_row = *src0;
|
1984
2037
|
ggml_tensor src1_row = *src1;
|
1985
|
-
ggml_tensor dst_row
|
2038
|
+
ggml_tensor dst_row = *dst;
|
1986
2039
|
|
1987
2040
|
char * src0_original = (char *) src0->data;
|
1988
2041
|
char * src1_original = (char *) src1->data;
|
@@ -1990,19 +2043,39 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
|
|
1990
2043
|
|
1991
2044
|
src0_row.ne[2] = 1;
|
1992
2045
|
src0_row.ne[3] = 1;
|
1993
|
-
src0_row.nb[3] =
|
2046
|
+
src0_row.nb[3] = nb02;
|
1994
2047
|
|
1995
|
-
|
1996
|
-
|
1997
|
-
|
2048
|
+
src1_row.ne[1] = 1;
|
2049
|
+
src1_row.ne[2] = 1;
|
2050
|
+
src1_row.ne[3] = 1;
|
2051
|
+
src1_row.nb[2] = nb11;
|
2052
|
+
src1_row.nb[3] = nb11;
|
1998
2053
|
|
1999
|
-
|
2054
|
+
dst_row.ne[1] = 1;
|
2055
|
+
dst_row.ne[2] = 1;
|
2056
|
+
dst_row.ne[3] = 1;
|
2057
|
+
dst_row.nb[2] = nb1;
|
2058
|
+
dst_row.nb[3] = nb1;
|
2000
2059
|
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2060
|
+
if (ne12 == 1) {
|
2061
|
+
for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) {
|
2062
|
+
for (int64_t id = 0; id < n_ids; id++) {
|
2063
|
+
const int32_t i02 = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
|
2004
2064
|
|
2005
|
-
|
2065
|
+
GGML_ASSERT(i02 >= 0 && i02 < n_as);
|
2066
|
+
|
2067
|
+
const int64_t i11 = id % ne11;
|
2068
|
+
const int64_t i12 = iid1;
|
2069
|
+
|
2070
|
+
const int64_t i1 = id;
|
2071
|
+
const int64_t i2 = i12;
|
2072
|
+
|
2073
|
+
src0_row.data = src0_original + i02*nb02;
|
2074
|
+
src1_row.data = src1_original + i11*nb11 + i12*nb12;
|
2075
|
+
dst_row.data = dst_original + i1*nb1 + i2*nb2;
|
2076
|
+
|
2077
|
+
ggml_cuda_mul_mat(ctx, &src0_row, &src1_row, &dst_row);
|
2078
|
+
}
|
2006
2079
|
}
|
2007
2080
|
} else {
|
2008
2081
|
ggml_cuda_pool_alloc<char> src1_contiguous(ctx.pool(), sizeof(float)*ggml_nelements(src1));
|
@@ -2011,54 +2084,69 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
|
|
2011
2084
|
src1_row.data = src1_contiguous.get();
|
2012
2085
|
dst_row.data = dst_contiguous.get();
|
2013
2086
|
|
2014
|
-
for (
|
2087
|
+
for (int64_t i02 = 0; i02 < n_as; i02++) {
|
2015
2088
|
int64_t num_src1_rows = 0;
|
2016
|
-
for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
|
2017
|
-
const int32_t row_id_i = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]);
|
2018
2089
|
|
2019
|
-
|
2020
|
-
|
2021
|
-
|
2090
|
+
for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) {
|
2091
|
+
for (int64_t id = 0; id < n_ids; id++) {
|
2092
|
+
const int32_t row_id_i = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
|
2022
2093
|
|
2023
|
-
|
2094
|
+
GGML_ASSERT(row_id_i >= 0 && row_id_i < n_as);
|
2024
2095
|
|
2025
|
-
|
2026
|
-
|
2027
|
-
|
2096
|
+
if (row_id_i != i02) {
|
2097
|
+
continue;
|
2098
|
+
}
|
2099
|
+
|
2100
|
+
num_src1_rows++;
|
2101
|
+
}
|
2028
2102
|
}
|
2029
2103
|
|
2030
2104
|
if (num_src1_rows == 0) {
|
2031
2105
|
continue;
|
2032
2106
|
}
|
2033
2107
|
|
2034
|
-
|
2108
|
+
ggml_cuda_pool_alloc<int> dev_cur_src1_row(ctx.pool(), 1);
|
2109
|
+
ggml_cuda_pool_alloc<mmid_row_mapping> dev_row_mapping(ctx.pool(), num_src1_rows);
|
2110
|
+
CUDA_CHECK(cudaMemsetAsync(dev_cur_src1_row.get(), 0, sizeof(int), stream));
|
2035
2111
|
|
2036
|
-
|
2037
|
-
|
2112
|
+
{
|
2113
|
+
dim3 block_dims(std::min((unsigned int)ne10, 768u));
|
2114
|
+
dim3 grid_dims(ids->ne[1], n_ids);
|
2115
|
+
k_copy_src1_to_contiguous<<<grid_dims, block_dims, 0, stream>>>(
|
2116
|
+
src1_original, src1_contiguous.get(),
|
2117
|
+
dev_cur_src1_row.get(), dev_row_mapping.get(),
|
2118
|
+
ids_dev, i02, ids->nb[1], ids->nb[0],
|
2119
|
+
ne11, ne10,
|
2120
|
+
nb11, nb12);
|
2121
|
+
CUDA_CHECK(cudaGetLastError());
|
2122
|
+
}
|
2123
|
+
|
2124
|
+
src0_row.data = src0_original + i02*nb02;
|
2038
2125
|
|
2126
|
+
GGML_ASSERT(nb11 == sizeof(float)*ne10);
|
2127
|
+
GGML_ASSERT(nb1 == sizeof(float)*ne0);
|
2128
|
+
|
2129
|
+
src1_row.ne[1] = num_src1_rows;
|
2039
2130
|
src1_row.nb[1] = nb11;
|
2040
2131
|
src1_row.nb[2] = num_src1_rows*nb11;
|
2041
2132
|
src1_row.nb[3] = num_src1_rows*nb11;
|
2042
2133
|
|
2134
|
+
dst_row.ne[1] = num_src1_rows;
|
2043
2135
|
dst_row.nb[1] = nb1;
|
2044
2136
|
dst_row.nb[2] = num_src1_rows*nb1;
|
2045
2137
|
dst_row.nb[3] = num_src1_rows*nb1;
|
2046
2138
|
|
2047
2139
|
ggml_cuda_mul_mat(ctx, &src0_row, &src1_row, &dst_row);
|
2048
2140
|
|
2049
|
-
|
2050
|
-
|
2051
|
-
|
2052
|
-
|
2053
|
-
|
2054
|
-
|
2055
|
-
|
2056
|
-
|
2057
|
-
|
2058
|
-
|
2059
|
-
CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous.get() + num_src1_rows*nb1,
|
2060
|
-
nb1, cudaMemcpyDeviceToDevice, stream));
|
2061
|
-
num_src1_rows++;
|
2141
|
+
{
|
2142
|
+
dim3 block_dims(std::min((unsigned int)ne0, 768u));
|
2143
|
+
dim3 grid_dims(num_src1_rows);
|
2144
|
+
k_copy_dst_from_contiguous<<<grid_dims, block_dims, 0, stream>>>(
|
2145
|
+
dst_original, dst_contiguous.get(),
|
2146
|
+
dev_row_mapping.get(),
|
2147
|
+
ne0,
|
2148
|
+
nb1, nb2);
|
2149
|
+
CUDA_CHECK(cudaGetLastError());
|
2062
2150
|
}
|
2063
2151
|
}
|
2064
2152
|
}
|
@@ -2487,7 +2575,8 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|
2487
2575
|
GGML_CALL static bool ggml_backend_cuda_offload_op(ggml_backend_t backend, const ggml_tensor * op) {
|
2488
2576
|
const int min_batch_size = 32;
|
2489
2577
|
|
2490
|
-
return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS
|
2578
|
+
return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
|
2579
|
+
(op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
|
2491
2580
|
|
2492
2581
|
GGML_UNUSED(backend);
|
2493
2582
|
}
|
@@ -88,7 +88,7 @@ typedef uint16_t ggml_fp16_internal_t;
|
|
88
88
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
89
89
|
#include <intrin.h>
|
90
90
|
#else
|
91
|
-
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
|
91
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) || defined(__SSE__)
|
92
92
|
#if !defined(__riscv)
|
93
93
|
#include <immintrin.h>
|
94
94
|
#endif
|