llama-cpp-capacitor 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,148 @@
1
+ cmake_minimum_required(VERSION 3.10)
2
+
3
+ project(llama-cpp)
4
+
5
+ set(CMAKE_CXX_STANDARD 17)
6
+ set(LLAMACPP_LIB_DIR ${CMAKE_SOURCE_DIR}/../../../cpp)
7
+
8
+ include_directories(
9
+ ${LLAMACPP_LIB_DIR}
10
+ ${LLAMACPP_LIB_DIR}/ggml-cpu
11
+ ${LLAMACPP_LIB_DIR}/tools/mtmd
12
+ )
13
+
14
+ set(
15
+ SOURCE_FILES
16
+ ${LLAMACPP_LIB_DIR}/ggml.c
17
+ ${LLAMACPP_LIB_DIR}/ggml-alloc.c
18
+ ${LLAMACPP_LIB_DIR}/ggml-backend.cpp
19
+ ${LLAMACPP_LIB_DIR}/ggml-backend-reg.cpp
20
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/amx/amx.cpp
21
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/amx/mmq.cpp
22
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/ggml-cpu.c
23
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/ggml-cpu.cpp
24
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/quants.c
25
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/traits.cpp
26
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/repack.cpp
27
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/unary-ops.cpp
28
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/binary-ops.cpp
29
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/vec.cpp
30
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/ops.cpp
31
+ ${LLAMACPP_LIB_DIR}/ggml-opt.cpp
32
+ ${LLAMACPP_LIB_DIR}/ggml-threading.cpp
33
+ ${LLAMACPP_LIB_DIR}/ggml-quants.c
34
+ ${LLAMACPP_LIB_DIR}/gguf.cpp
35
+ ${LLAMACPP_LIB_DIR}/log.cpp
36
+ ${LLAMACPP_LIB_DIR}/llama-impl.cpp
37
+ ${LLAMACPP_LIB_DIR}/chat-parser.cpp
38
+ ${LLAMACPP_LIB_DIR}/json-partial.cpp
39
+ ${LLAMACPP_LIB_DIR}/regex-partial.cpp
40
+ # Multimodal support
41
+ ${LLAMACPP_LIB_DIR}/tools/mtmd/mtmd.cpp
42
+ ${LLAMACPP_LIB_DIR}/tools/mtmd/mtmd-audio.cpp
43
+ ${LLAMACPP_LIB_DIR}/tools/mtmd/clip.cpp
44
+ ${LLAMACPP_LIB_DIR}/tools/mtmd/mtmd-helper.cpp
45
+ ${LLAMACPP_LIB_DIR}/llama-grammar.cpp
46
+ ${LLAMACPP_LIB_DIR}/llama-sampling.cpp
47
+ ${LLAMACPP_LIB_DIR}/llama-vocab.cpp
48
+ ${LLAMACPP_LIB_DIR}/llama-adapter.cpp
49
+ ${LLAMACPP_LIB_DIR}/llama-chat.cpp
50
+ ${LLAMACPP_LIB_DIR}/llama-context.cpp
51
+ ${LLAMACPP_LIB_DIR}/llama-arch.cpp
52
+ ${LLAMACPP_LIB_DIR}/llama-batch.cpp
53
+ ${LLAMACPP_LIB_DIR}/llama-cparams.cpp
54
+ ${LLAMACPP_LIB_DIR}/llama-hparams.cpp
55
+ ${LLAMACPP_LIB_DIR}/llama.cpp
56
+ ${LLAMACPP_LIB_DIR}/llama-model.cpp
57
+ ${LLAMACPP_LIB_DIR}/llama-model-loader.cpp
58
+ ${LLAMACPP_LIB_DIR}/llama-kv-cache.cpp
59
+ ${LLAMACPP_LIB_DIR}/llama-kv-cache-iswa.cpp
60
+ ${LLAMACPP_LIB_DIR}/llama-memory-hybrid.cpp
61
+ ${LLAMACPP_LIB_DIR}/llama-memory-recurrent.cpp
62
+ ${LLAMACPP_LIB_DIR}/llama-mmap.cpp
63
+ ${LLAMACPP_LIB_DIR}/llama-vocab.cpp
64
+ ${LLAMACPP_LIB_DIR}/llama-memory.cpp
65
+ ${LLAMACPP_LIB_DIR}/llama-io.cpp
66
+ ${LLAMACPP_LIB_DIR}/llama-graph.cpp
67
+ ${LLAMACPP_LIB_DIR}/sampling.cpp
68
+ ${LLAMACPP_LIB_DIR}/unicode-data.cpp
69
+ ${LLAMACPP_LIB_DIR}/unicode.cpp
70
+ ${LLAMACPP_LIB_DIR}/common.cpp
71
+ ${LLAMACPP_LIB_DIR}/chat.cpp
72
+ ${LLAMACPP_LIB_DIR}/json-schema-to-grammar.cpp
73
+ ${LLAMACPP_LIB_DIR}/nlohmann/json.hpp
74
+ ${LLAMACPP_LIB_DIR}/nlohmann/json_fwd.hpp
75
+ ${LLAMACPP_LIB_DIR}/minja/minja.hpp
76
+ ${LLAMACPP_LIB_DIR}/minja/chat-template.hpp
77
+ ${LLAMACPP_LIB_DIR}/anyascii.c
78
+ ${LLAMACPP_LIB_DIR}/rn-llama.cpp
79
+ ${LLAMACPP_LIB_DIR}/rn-completion.cpp
80
+ ${LLAMACPP_LIB_DIR}/rn-tts.cpp
81
+ ${CMAKE_SOURCE_DIR}/jni-utils.h
82
+ ${CMAKE_SOURCE_DIR}/jni.cpp
83
+ )
84
+
85
+ find_library(LOG_LIB log)
86
+
87
+ function(build_library target_name arch cpu_flags)
88
+ if (NOT ${arch} STREQUAL "generic")
89
+ set(SOURCE_FILES_ARCH
90
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/arch/${arch}/quants.c
91
+ ${LLAMACPP_LIB_DIR}/ggml-cpu/arch/${arch}/repack.cpp
92
+ )
93
+ endif ()
94
+
95
+ add_library(
96
+ ${target_name}
97
+ SHARED
98
+ ${SOURCE_FILES}
99
+ ${SOURCE_FILES_ARCH}
100
+ )
101
+
102
+ target_compile_options(${target_name} PRIVATE ${cpu_flags})
103
+ target_link_libraries(${target_name} ${LOG_LIB})
104
+
105
+ set_target_properties(${target_name} PROPERTIES
106
+ LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}
107
+ )
108
+ endfunction()
109
+
110
+ # Build for different architectures
111
+ if (ANDROID_ABI STREQUAL "arm64-v8a")
112
+ build_library(llama-cpp-arm64-v8a "arm64" "-march=armv8-a")
113
+ elseif (ANDROID_ABI STREQUAL "armeabi-v7a")
114
+ build_library(llama-cpp-armeabi-v7a "arm" "-march=armv7-a -mfpu=neon")
115
+ elseif (ANDROID_ABI STREQUAL "x86")
116
+ build_library(llama-cpp-x86 "generic" "-march=i686 -mtune=intel -mssse3 -mfpmath=sse -m32")
117
+ elseif (ANDROID_ABI STREQUAL "x86_64")
118
+ build_library(llama-cpp-x86_64 "generic" "-march=x86-64 -msse4.2 -mpopcnt -m64 -mtune=intel")
119
+ endif()
120
+
121
+ # Set compile definitions
122
+ target_compile_definitions(llama-cpp-arm64-v8a PRIVATE
123
+ -DNDEBUG
124
+ -DO3
125
+ -DLM_GGML_USE_CPU
126
+ -DLM_GGML_CPU_GENERIC
127
+ )
128
+
129
+ target_compile_definitions(llama-cpp-armeabi-v7a PRIVATE
130
+ -DNDEBUG
131
+ -DO3
132
+ -DLM_GGML_USE_CPU
133
+ -DLM_GGML_CPU_GENERIC
134
+ )
135
+
136
+ target_compile_definitions(llama-cpp-x86 PRIVATE
137
+ -DNDEBUG
138
+ -DO3
139
+ -DLM_GGML_USE_CPU
140
+ -DLM_GGML_CPU_GENERIC
141
+ )
142
+
143
+ target_compile_definitions(llama-cpp-x86_64 PRIVATE
144
+ -DNDEBUG
145
+ -DO3
146
+ -DLM_GGML_USE_CPU
147
+ -DLM_GGML_CPU_GENERIC
148
+ )