llama_cpp 0.15.4 → 0.16.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +16 -0
  3. data/ext/llama_cpp/extconf.rb +3 -2
  4. data/ext/llama_cpp/llama_cpp.cpp +17 -3
  5. data/lib/llama_cpp/version.rb +2 -2
  6. data/sig/llama_cpp.rbs +15 -1
  7. data/vendor/tmp/llama.cpp/Makefile +166 -82
  8. data/vendor/tmp/llama.cpp/ggml-alloc.c +82 -26
  9. data/vendor/tmp/llama.cpp/ggml-backend-impl.h +20 -8
  10. data/vendor/tmp/llama.cpp/ggml-backend.c +183 -69
  11. data/vendor/tmp/llama.cpp/ggml-backend.h +4 -4
  12. data/vendor/tmp/llama.cpp/ggml-blas.cpp +363 -0
  13. data/vendor/tmp/llama.cpp/ggml-blas.h +23 -0
  14. data/vendor/tmp/llama.cpp/ggml-common.h +6 -0
  15. data/vendor/tmp/llama.cpp/ggml-cuda/acc.cu +47 -0
  16. data/vendor/tmp/llama.cpp/ggml-cuda/arange.cu +34 -0
  17. data/vendor/tmp/llama.cpp/ggml-cuda/argsort.cu +104 -0
  18. data/vendor/tmp/llama.cpp/ggml-cuda/binbcast.cu +280 -0
  19. data/vendor/tmp/llama.cpp/ggml-cuda/clamp.cu +34 -0
  20. data/vendor/tmp/llama.cpp/ggml-cuda/concat.cu +196 -0
  21. data/vendor/tmp/llama.cpp/ggml-cuda/convert.cu +686 -0
  22. data/vendor/tmp/llama.cpp/ggml-cuda/cpy.cu +490 -0
  23. data/vendor/tmp/llama.cpp/ggml-cuda/diagmask.cu +40 -0
  24. data/vendor/tmp/llama.cpp/ggml-cuda/dmmv.cu +674 -0
  25. data/vendor/tmp/llama.cpp/ggml-cuda/fattn-tile-f16.cu +319 -0
  26. data/vendor/tmp/llama.cpp/ggml-cuda/fattn-tile-f32.cu +312 -0
  27. data/vendor/tmp/llama.cpp/ggml-cuda/fattn.cu +345 -0
  28. data/vendor/tmp/llama.cpp/ggml-cuda/getrows.cu +178 -0
  29. data/vendor/tmp/llama.cpp/ggml-cuda/im2col.cu +104 -0
  30. data/vendor/tmp/llama.cpp/ggml-cuda/mmq.cu +88 -0
  31. data/vendor/tmp/llama.cpp/ggml-cuda/mmvq.cu +419 -0
  32. data/vendor/tmp/llama.cpp/ggml-cuda/norm.cu +221 -0
  33. data/vendor/tmp/llama.cpp/ggml-cuda/pad.cu +49 -0
  34. data/vendor/tmp/llama.cpp/ggml-cuda/pool2d.cu +94 -0
  35. data/vendor/tmp/llama.cpp/ggml-cuda/quantize.cu +112 -0
  36. data/vendor/tmp/llama.cpp/ggml-cuda/rope.cu +271 -0
  37. data/vendor/tmp/llama.cpp/ggml-cuda/scale.cu +31 -0
  38. data/vendor/tmp/llama.cpp/ggml-cuda/softmax.cu +206 -0
  39. data/vendor/tmp/llama.cpp/ggml-cuda/sumrows.cu +40 -0
  40. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
  41. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
  42. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
  43. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
  44. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
  45. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
  46. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
  47. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
  48. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
  49. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
  50. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
  51. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
  52. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
  53. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
  54. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
  55. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
  56. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
  57. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
  58. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
  59. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
  60. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
  61. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
  62. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
  63. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
  64. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
  65. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
  66. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
  67. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
  68. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
  69. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
  70. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
  71. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
  72. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
  73. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
  74. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
  75. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
  76. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
  77. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
  78. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
  79. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
  80. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
  81. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
  82. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
  83. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
  84. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
  85. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
  86. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
  87. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
  88. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
  89. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
  90. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
  91. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
  92. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
  93. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
  94. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
  95. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
  96. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
  97. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
  98. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
  99. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
  100. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
  101. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
  102. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
  103. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
  104. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
  105. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
  106. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
  107. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
  108. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
  109. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
  110. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
  111. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
  112. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
  113. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
  114. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
  115. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
  116. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
  117. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
  118. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
  119. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
  120. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
  121. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
  122. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
  123. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
  124. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
  125. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
  126. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu +10 -0
  127. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu +9 -0
  128. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu +10 -0
  129. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu +10 -0
  130. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu +8 -0
  131. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
  132. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
  133. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
  134. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
  135. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
  136. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
  137. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
  138. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
  139. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
  140. data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
  141. data/vendor/tmp/llama.cpp/ggml-cuda/tsembd.cu +47 -0
  142. data/vendor/tmp/llama.cpp/ggml-cuda/unary.cu +286 -0
  143. data/vendor/tmp/llama.cpp/ggml-cuda/upscale.cu +51 -0
  144. data/vendor/tmp/llama.cpp/ggml-cuda.cu +103 -135
  145. data/vendor/tmp/llama.cpp/ggml-kompute.cpp +29 -13
  146. data/vendor/tmp/llama.cpp/ggml-metal.h +1 -1
  147. data/vendor/tmp/llama.cpp/ggml-metal.m +45 -33
  148. data/vendor/tmp/llama.cpp/ggml-metal.metal +83 -59
  149. data/vendor/tmp/llama.cpp/ggml-rpc.cpp +15 -14
  150. data/vendor/tmp/llama.cpp/ggml-sycl.cpp +26 -90
  151. data/vendor/tmp/llama.cpp/ggml-vulkan-shaders.hpp +74522 -14913
  152. data/vendor/tmp/llama.cpp/ggml-vulkan.cpp +631 -471
  153. data/vendor/tmp/llama.cpp/ggml.c +278 -603
  154. data/vendor/tmp/llama.cpp/ggml.h +9 -28
  155. data/vendor/tmp/llama.cpp/llama.cpp +345 -473
  156. data/vendor/tmp/llama.cpp/llama.h +21 -43
  157. metadata +134 -7
  158. data/vendor/tmp/llama.cpp/ggml-mpi.c +0 -216
  159. data/vendor/tmp/llama.cpp/ggml-mpi.h +0 -39
  160. data/vendor/tmp/llama.cpp/ggml-opencl.cpp +0 -2305
  161. data/vendor/tmp/llama.cpp/ggml-opencl.h +0 -36
@@ -86,6 +86,7 @@ extern "C" {
86
86
  LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
87
87
  LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
88
88
  LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
89
+ LLAMA_VOCAB_PRE_TYPE_PORO = 15,
89
90
  };
90
91
 
91
92
  // note: these values should be synchronized with ggml_rope
@@ -97,7 +98,7 @@ extern "C" {
97
98
  LLAMA_ROPE_TYPE_GLM = 4,
98
99
  };
99
100
 
100
- enum llama_token_type {
101
+ enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
101
102
  LLAMA_TOKEN_TYPE_UNDEFINED = 0,
102
103
  LLAMA_TOKEN_TYPE_NORMAL = 1,
103
104
  LLAMA_TOKEN_TYPE_UNKNOWN = 2,
@@ -107,6 +108,20 @@ extern "C" {
107
108
  LLAMA_TOKEN_TYPE_BYTE = 6,
108
109
  };
109
110
 
111
+ enum llama_token_attr {
112
+ LLAMA_TOKEN_ATTR_UNDEFINED = 0,
113
+ LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
114
+ LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
115
+ LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
116
+ LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
117
+ LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
118
+ LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
119
+ LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
120
+ LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
121
+ LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
122
+ LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
123
+ };
124
+
110
125
  // model file types
111
126
  enum llama_ftype {
112
127
  LLAMA_FTYPE_ALL_F32 = 0,
@@ -351,6 +366,9 @@ extern "C" {
351
366
  // modifies a preceding LLAMA_GRETYPE_CHAR or
352
367
  // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
353
368
  LLAMA_GRETYPE_CHAR_ALT = 6,
369
+
370
+ // any character (.)
371
+ LLAMA_GRETYPE_CHAR_ANY = 7,
354
372
  };
355
373
 
356
374
  typedef struct llama_grammar_element {
@@ -821,7 +839,7 @@ extern "C" {
821
839
 
822
840
  LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
823
841
 
824
- LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token);
842
+ LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token);
825
843
 
826
844
  // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
827
845
  LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token);
@@ -1042,49 +1060,9 @@ extern "C" {
1042
1060
  llama_token token);
1043
1061
 
1044
1062
  //
1045
- // Beam search
1063
+ // Model split
1046
1064
  //
1047
1065
 
1048
- struct llama_beam_view {
1049
- const llama_token * tokens;
1050
-
1051
- size_t n_tokens;
1052
- float p; // Cumulative beam probability (renormalized relative to all beams)
1053
- bool eob; // Callback should set this to true when a beam is at end-of-beam.
1054
- };
1055
-
1056
- // Passed to beam_search_callback function.
1057
- // Whenever 0 < common_prefix_length, this number of tokens should be copied from any of the beams
1058
- // (e.g. beams[0]) as they will be removed (shifted) from all beams in all subsequent callbacks.
1059
- // These pointers are valid only during the synchronous callback, so should not be saved.
1060
- struct llama_beams_state {
1061
- struct llama_beam_view * beam_views;
1062
-
1063
- size_t n_beams; // Number of elements in beam_views[].
1064
- size_t common_prefix_length; // Current max length of prefix tokens shared by all beams.
1065
- bool last_call; // True iff this is the last callback invocation.
1066
- };
1067
-
1068
- // Type of pointer to the beam_search_callback function.
1069
- // void* callback_data is any custom data passed to llama_beam_search, that is subsequently
1070
- // passed back to beam_search_callback. This avoids having to use global variables in the callback.
1071
- typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, struct llama_beams_state);
1072
-
1073
- /// @details Deterministically returns entire sentence constructed by a beam search.
1074
- /// @param ctx Pointer to the llama_context.
1075
- /// @param callback Invoked for each iteration of the beam_search loop, passing in beams_state.
1076
- /// @param callback_data A pointer that is simply passed back to callback.
1077
- /// @param n_beams Number of beams to use.
1078
- /// @param n_past Number of tokens already evaluated.
1079
- /// @param n_predict Maximum number of tokens to predict. EOS may occur earlier.
1080
- LLAMA_API void llama_beam_search(
1081
- struct llama_context * ctx,
1082
- llama_beam_search_callback_fn_t callback,
1083
- void * callback_data,
1084
- size_t n_beams,
1085
- int32_t n_past,
1086
- int32_t n_predict);
1087
-
1088
1066
  /// @details Build a split GGUF final path for this chunk.
1089
1067
  /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1090
1068
  // Returns the split_path length.
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.4
4
+ version: 0.16.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-06-01 00:00:00.000000000 Z
11
+ date: 2024-06-15 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
14
14
  email:
@@ -42,19 +42,146 @@ files:
42
42
  - vendor/tmp/llama.cpp/ggml-backend-impl.h
43
43
  - vendor/tmp/llama.cpp/ggml-backend.c
44
44
  - vendor/tmp/llama.cpp/ggml-backend.h
45
+ - vendor/tmp/llama.cpp/ggml-blas.cpp
46
+ - vendor/tmp/llama.cpp/ggml-blas.h
45
47
  - vendor/tmp/llama.cpp/ggml-common.h
46
48
  - vendor/tmp/llama.cpp/ggml-cuda.cu
47
49
  - vendor/tmp/llama.cpp/ggml-cuda.h
50
+ - vendor/tmp/llama.cpp/ggml-cuda/acc.cu
51
+ - vendor/tmp/llama.cpp/ggml-cuda/arange.cu
52
+ - vendor/tmp/llama.cpp/ggml-cuda/argsort.cu
53
+ - vendor/tmp/llama.cpp/ggml-cuda/binbcast.cu
54
+ - vendor/tmp/llama.cpp/ggml-cuda/clamp.cu
55
+ - vendor/tmp/llama.cpp/ggml-cuda/concat.cu
56
+ - vendor/tmp/llama.cpp/ggml-cuda/convert.cu
57
+ - vendor/tmp/llama.cpp/ggml-cuda/cpy.cu
58
+ - vendor/tmp/llama.cpp/ggml-cuda/diagmask.cu
59
+ - vendor/tmp/llama.cpp/ggml-cuda/dmmv.cu
60
+ - vendor/tmp/llama.cpp/ggml-cuda/fattn-tile-f16.cu
61
+ - vendor/tmp/llama.cpp/ggml-cuda/fattn-tile-f32.cu
62
+ - vendor/tmp/llama.cpp/ggml-cuda/fattn.cu
63
+ - vendor/tmp/llama.cpp/ggml-cuda/getrows.cu
64
+ - vendor/tmp/llama.cpp/ggml-cuda/im2col.cu
65
+ - vendor/tmp/llama.cpp/ggml-cuda/mmq.cu
66
+ - vendor/tmp/llama.cpp/ggml-cuda/mmvq.cu
67
+ - vendor/tmp/llama.cpp/ggml-cuda/norm.cu
68
+ - vendor/tmp/llama.cpp/ggml-cuda/pad.cu
69
+ - vendor/tmp/llama.cpp/ggml-cuda/pool2d.cu
70
+ - vendor/tmp/llama.cpp/ggml-cuda/quantize.cu
71
+ - vendor/tmp/llama.cpp/ggml-cuda/rope.cu
72
+ - vendor/tmp/llama.cpp/ggml-cuda/scale.cu
73
+ - vendor/tmp/llama.cpp/ggml-cuda/softmax.cu
74
+ - vendor/tmp/llama.cpp/ggml-cuda/sumrows.cu
75
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu
76
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu
77
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu
78
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu
79
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu
80
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu
81
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu
82
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu
83
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu
84
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu
85
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu
86
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu
87
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu
88
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu
89
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu
90
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu
91
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu
92
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu
93
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu
94
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu
95
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu
96
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu
97
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu
98
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu
99
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu
100
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu
101
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu
102
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu
103
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu
104
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu
105
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu
106
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu
107
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu
108
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu
109
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu
110
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu
111
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu
112
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu
113
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu
114
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu
115
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu
116
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu
117
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu
118
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu
119
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu
120
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu
121
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu
122
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu
123
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu
124
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu
125
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu
126
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu
127
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu
128
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu
129
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu
130
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu
131
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu
132
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu
133
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu
134
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu
135
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu
136
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu
137
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu
138
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu
139
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu
140
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu
141
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu
142
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu
143
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu
144
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu
145
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu
146
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu
147
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu
148
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu
149
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu
150
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu
151
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu
152
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu
153
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu
154
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu
155
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu
156
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu
157
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu
158
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu
159
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu
160
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu
161
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu
162
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu
163
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu
164
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu
165
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu
166
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q2_k.cu
167
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q3_k.cu
168
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_0.cu
169
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_1.cu
170
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_k.cu
171
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_0.cu
172
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_1.cu
173
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_k.cu
174
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q6_k.cu
175
+ - vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q8_0.cu
176
+ - vendor/tmp/llama.cpp/ggml-cuda/tsembd.cu
177
+ - vendor/tmp/llama.cpp/ggml-cuda/unary.cu
178
+ - vendor/tmp/llama.cpp/ggml-cuda/upscale.cu
48
179
  - vendor/tmp/llama.cpp/ggml-impl.h
49
180
  - vendor/tmp/llama.cpp/ggml-kompute.cpp
50
181
  - vendor/tmp/llama.cpp/ggml-kompute.h
51
182
  - vendor/tmp/llama.cpp/ggml-metal.h
52
183
  - vendor/tmp/llama.cpp/ggml-metal.m
53
184
  - vendor/tmp/llama.cpp/ggml-metal.metal
54
- - vendor/tmp/llama.cpp/ggml-mpi.c
55
- - vendor/tmp/llama.cpp/ggml-mpi.h
56
- - vendor/tmp/llama.cpp/ggml-opencl.cpp
57
- - vendor/tmp/llama.cpp/ggml-opencl.h
58
185
  - vendor/tmp/llama.cpp/ggml-quants.c
59
186
  - vendor/tmp/llama.cpp/ggml-quants.h
60
187
  - vendor/tmp/llama.cpp/ggml-rpc.cpp
@@ -99,7 +226,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
99
226
  - !ruby/object:Gem::Version
100
227
  version: '0'
101
228
  requirements: []
102
- rubygems_version: 3.5.10
229
+ rubygems_version: 3.5.9
103
230
  signing_key:
104
231
  specification_version: 4
105
232
  summary: Ruby bindings for the llama.cpp.
@@ -1,216 +0,0 @@
1
- #include "ggml-mpi.h"
2
-
3
- #include "ggml.h"
4
-
5
- #include <mpi.h>
6
-
7
- #include <stdio.h>
8
- #include <stdlib.h>
9
-
10
- #define MIN(a, b) ((a) < (b) ? (a) : (b))
11
-
12
- #define UNUSED GGML_UNUSED
13
-
14
- struct ggml_mpi_context {
15
- int rank;
16
- int size;
17
- };
18
-
19
- void ggml_mpi_backend_init(void) {
20
- MPI_Init(NULL, NULL);
21
- }
22
-
23
- void ggml_mpi_backend_free(void) {
24
- MPI_Finalize();
25
- }
26
-
27
- struct ggml_mpi_context * ggml_mpi_init(void) {
28
- struct ggml_mpi_context * ctx = calloc(1, sizeof(struct ggml_mpi_context));
29
-
30
- MPI_Comm_rank(MPI_COMM_WORLD, &ctx->rank);
31
- MPI_Comm_size(MPI_COMM_WORLD, &ctx->size);
32
-
33
- return ctx;
34
- }
35
-
36
- void ggml_mpi_free(struct ggml_mpi_context * ctx) {
37
- free(ctx);
38
- }
39
-
40
- int ggml_mpi_rank(struct ggml_mpi_context * ctx) {
41
- return ctx->rank;
42
- }
43
-
44
- void ggml_mpi_eval_init(
45
- struct ggml_mpi_context * ctx_mpi,
46
- int * n_tokens,
47
- int * n_past,
48
- int * n_threads) {
49
- UNUSED(ctx_mpi);
50
-
51
- // synchronize the worker node parameters with the root node
52
- MPI_Barrier(MPI_COMM_WORLD);
53
-
54
- MPI_Bcast(n_tokens, 1, MPI_INT, 0, MPI_COMM_WORLD);
55
- MPI_Bcast(n_past, 1, MPI_INT, 0, MPI_COMM_WORLD);
56
- MPI_Bcast(n_threads, 1, MPI_INT, 0, MPI_COMM_WORLD);
57
- }
58
-
59
- static int ggml_graph_get_node_idx(struct ggml_cgraph * gf, const char * name) {
60
- struct ggml_tensor * t = ggml_graph_get_tensor(gf, name);
61
- if (t == NULL) {
62
- fprintf(stderr, "%s: tensor %s not found\n", __func__, name);
63
- return -1;
64
- }
65
-
66
- for (int i = 0; i < gf->n_nodes; i++) {
67
- if (gf->nodes[i] == t) {
68
- return i;
69
- }
70
- }
71
-
72
- fprintf(stderr, "%s: tensor %s not found in graph (should not happen)\n", __func__, name);
73
- return -1;
74
- }
75
-
76
- static void ggml_mpi_tensor_send(struct ggml_tensor * t, int mpi_rank_dst) {
77
- MPI_Datatype mpi_type;
78
-
79
- switch (t->type) {
80
- case GGML_TYPE_I32: mpi_type = MPI_INT32_T; break;
81
- case GGML_TYPE_F32: mpi_type = MPI_FLOAT; break;
82
- default: GGML_ASSERT(false && "not implemented");
83
- }
84
-
85
- const int retval = MPI_Send(t->data, ggml_nelements(t), mpi_type, mpi_rank_dst, 0, MPI_COMM_WORLD);
86
- GGML_ASSERT(retval == MPI_SUCCESS);
87
- }
88
-
89
- static void ggml_mpi_tensor_recv(struct ggml_tensor * t, int mpi_rank_src) {
90
- MPI_Datatype mpi_type;
91
-
92
- switch (t->type) {
93
- case GGML_TYPE_I32: mpi_type = MPI_INT32_T; break;
94
- case GGML_TYPE_F32: mpi_type = MPI_FLOAT; break;
95
- default: GGML_ASSERT(false && "not implemented");
96
- }
97
-
98
- MPI_Status status; UNUSED(status);
99
-
100
- const int retval = MPI_Recv(t->data, ggml_nelements(t), mpi_type, mpi_rank_src, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
101
- GGML_ASSERT(retval == MPI_SUCCESS);
102
- }
103
-
104
- // TODO: there are many improvements that can be done to this implementation
105
- void ggml_mpi_graph_compute_pre(
106
- struct ggml_mpi_context * ctx_mpi,
107
- struct ggml_cgraph * gf,
108
- int n_layers) {
109
- const int mpi_rank = ctx_mpi->rank;
110
- const int mpi_size = ctx_mpi->size;
111
-
112
- struct ggml_tensor * inp_tokens = ggml_graph_get_tensor(gf, "inp_tokens");
113
- if (inp_tokens == NULL) {
114
- fprintf(stderr, "%s: tensor 'inp_tokens' not found\n", __func__);
115
- return;
116
- }
117
-
118
- struct ggml_tensor * inp0 = ggml_graph_get_tensor(gf, "layer_inp_0");
119
- if (inp0 == NULL) {
120
- fprintf(stderr, "%s: tensor 'inp0' not found\n", __func__);
121
- return;
122
- }
123
-
124
- GGML_ASSERT(inp0 == gf->nodes[0]);
125
-
126
- // distribute the compute graph into slices across the MPI nodes
127
- //
128
- // the main node (0) processes the last layers + the remainder of the compute graph
129
- // and is responsible to pass the input tokens to the first node (1)
130
- //
131
- // node 1: [( 0) * n_per_node, ( 1) * n_per_node)
132
- // node 2: [( 1) * n_per_node, ( 2) * n_per_node)
133
- // ...
134
- // node n-1: [(n-2) * n_per_node, (n-1) * n_per_node)
135
- // node 0: [(n-1) * n_per_node, n_nodes)
136
- //
137
- if (mpi_rank > 0) {
138
- if (mpi_rank == 1) {
139
- // the first node (1) receives the input tokens from the main node (0)
140
- ggml_mpi_tensor_recv(inp_tokens, 0);
141
- } else {
142
- // recv input data for each node into the "inp0" tensor (i.e. the first node in the compute graph)
143
- ggml_mpi_tensor_recv(inp0, mpi_rank - 1);
144
- }
145
- } else if (mpi_size > 1) {
146
- // node 0 sends the input tokens to node 1
147
- ggml_mpi_tensor_send(inp_tokens, 1);
148
-
149
- // recv the output data from the last node
150
- ggml_mpi_tensor_recv(inp0, mpi_size - 1);
151
- }
152
-
153
- {
154
- const int n_per_node = (n_layers + (mpi_size - 1)) / mpi_size;
155
-
156
- const int mpi_idx = mpi_rank > 0 ? mpi_rank - 1 : mpi_size - 1;
157
-
158
- const int il0 = (mpi_idx + 0) * n_per_node;
159
- const int il1 = MIN(n_layers, (mpi_idx + 1) * n_per_node);
160
-
161
- char name_l0[GGML_MAX_NAME];
162
- char name_l1[GGML_MAX_NAME];
163
-
164
- snprintf(name_l0, sizeof(name_l0), "layer_inp_%d", il0);
165
- snprintf(name_l1, sizeof(name_l1), "layer_inp_%d", il1);
166
-
167
- const int idx_l0 = ggml_graph_get_node_idx(gf, name_l0);
168
- const int idx_l1 = mpi_rank > 0 ? ggml_graph_get_node_idx(gf, name_l1) + 1 : gf->n_nodes;
169
-
170
- if (idx_l0 < 0 || idx_l1 < 0) {
171
- fprintf(stderr, "%s: layer input nodes not found\n", __func__);
172
- return;
173
- }
174
-
175
- // attach the input data to all nodes that need it
176
- // TODO: not great - should be able to do this without modifying the compute graph (see next TODO below)
177
- for (int i = idx_l0; i < idx_l1; i++) {
178
- if (gf->nodes[i]->src[0] == gf->nodes[idx_l0]) {
179
- gf->nodes[i]->src[0] = inp0;
180
- }
181
- if (gf->nodes[i]->src[1] == gf->nodes[idx_l0]) {
182
- gf->nodes[i]->src[1] = inp0;
183
- }
184
- }
185
-
186
- // TODO: instead of rearranging the nodes, we should be able to execute a subset of the compute graph
187
- for (int i = 1; i < idx_l1 - idx_l0; i++) {
188
- gf->nodes[i] = gf->nodes[idx_l0 + i];
189
- gf->grads[i] = gf->grads[idx_l0 + i];
190
- }
191
-
192
- // the first node performs the "get_rows" operation, the rest of the nodes get the data from the previous node
193
- if (mpi_idx != 0) {
194
- gf->nodes[0]->op = GGML_OP_NONE;
195
- }
196
-
197
- gf->n_nodes = idx_l1 - idx_l0;
198
-
199
- //fprintf(stderr, "%s: node %d: processing %d nodes [%d, %d)\n", __func__, mpi_rank, gf->n_nodes, il0, il1);
200
- }
201
- }
202
-
203
- void ggml_mpi_graph_compute_post(
204
- struct ggml_mpi_context * ctx_mpi,
205
- struct ggml_cgraph * gf,
206
- int n_layers) {
207
- UNUSED(n_layers);
208
-
209
- const int mpi_rank = ctx_mpi->rank;
210
- const int mpi_size = ctx_mpi->size;
211
-
212
- // send the output data to the next node
213
- if (mpi_rank > 0) {
214
- ggml_mpi_tensor_send(gf->nodes[gf->n_nodes - 1], (mpi_rank + 1) % mpi_size);
215
- }
216
- }
@@ -1,39 +0,0 @@
1
- #pragma once
2
-
3
- struct ggml_context;
4
- struct ggml_tensor;
5
- struct ggml_cgraph;
6
-
7
- #ifdef __cplusplus
8
- extern "C" {
9
- #endif
10
-
11
- struct ggml_mpi_context;
12
-
13
- void ggml_mpi_backend_init(void);
14
- void ggml_mpi_backend_free(void);
15
-
16
- struct ggml_mpi_context * ggml_mpi_init(void);
17
- void ggml_mpi_free(struct ggml_mpi_context * ctx);
18
-
19
- int ggml_mpi_rank(struct ggml_mpi_context * ctx);
20
-
21
- void ggml_mpi_eval_init(
22
- struct ggml_mpi_context * ctx_mpi,
23
- int * n_tokens,
24
- int * n_past,
25
- int * n_threads);
26
-
27
- void ggml_mpi_graph_compute_pre(
28
- struct ggml_mpi_context * ctx_mpi,
29
- struct ggml_cgraph * gf,
30
- int n_layers);
31
-
32
- void ggml_mpi_graph_compute_post(
33
- struct ggml_mpi_context * ctx_mpi,
34
- struct ggml_cgraph * gf,
35
- int n_layers);
36
-
37
- #ifdef __cplusplus
38
- }
39
- #endif