llama-cpp-capacitor 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,149 @@
1
+ cmake_minimum_required(VERSION 3.16)
2
+ project(llama-cpp VERSION 1.0.0 LANGUAGES CXX C)
3
+
4
+ set(CMAKE_CXX_STANDARD 17)
5
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
6
+
7
+ # iOS specific settings for ARM64 devices
8
+ set(CMAKE_OSX_DEPLOYMENT_TARGET 13.0)
9
+ set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE NO)
10
+
11
+ # Common llama.cpp definitions
12
+ add_definitions(
13
+ -DNDEBUG
14
+ -DO3
15
+ -DLM_GGML_USE_CPU
16
+ -DLM_GGML_USE_ACCELERATE
17
+ -DLM_GGML_USE_METAL
18
+ -DLM_GGML_METAL_USE_BF16
19
+ )
20
+
21
+ set(SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../cpp)
22
+
23
+ # ARM64‑tuned ggml sources
24
+ set(SOURCE_FILES_ARCH
25
+ ${SOURCE_DIR}/ggml-cpu/arch/arm/quants.c
26
+ ${SOURCE_DIR}/ggml-cpu/arch/arm/repack.cpp
27
+ )
28
+
29
+ # Define public headers
30
+ set(PUBLIC_HEADERS
31
+ ${SOURCE_DIR}/cap-llama.h
32
+ ${SOURCE_DIR}/cap-completion.h
33
+ ${SOURCE_DIR}/cap-tts.h
34
+ ${SOURCE_DIR}/llama.h
35
+ ${SOURCE_DIR}/ggml.h
36
+ )
37
+
38
+ # Create library target
39
+ add_library(llama-cpp SHARED
40
+ ${SOURCE_DIR}/ggml.c
41
+ ${SOURCE_DIR}/ggml-alloc.c
42
+ ${SOURCE_DIR}/ggml-backend.cpp
43
+ ${SOURCE_DIR}/ggml-backend-reg.cpp
44
+ ${SOURCE_DIR}/ggml-cpu/amx/amx.cpp
45
+ ${SOURCE_DIR}/ggml-cpu/amx/mmq.cpp
46
+ ${SOURCE_DIR}/ggml-cpu/ggml-cpu.c
47
+ ${SOURCE_DIR}/ggml-cpu/ggml-cpu.cpp
48
+ ${SOURCE_DIR}/ggml-cpu/quants.c
49
+ ${SOURCE_DIR}/ggml-cpu/traits.cpp
50
+ ${SOURCE_DIR}/ggml-cpu/repack.cpp
51
+ ${SOURCE_DIR}/ggml-cpu/unary-ops.cpp
52
+ ${SOURCE_DIR}/ggml-cpu/binary-ops.cpp
53
+ ${SOURCE_DIR}/ggml-cpu/vec.cpp
54
+ ${SOURCE_DIR}/ggml-cpu/ops.cpp
55
+ ${SOURCE_DIR}/ggml-metal.m
56
+ ${SOURCE_DIR}/ggml-opt.cpp
57
+ ${SOURCE_DIR}/ggml-threading.cpp
58
+ ${SOURCE_DIR}/ggml-quants.c
59
+ ${SOURCE_DIR}/gguf.cpp
60
+ ${SOURCE_DIR}/log.cpp
61
+ ${SOURCE_DIR}/llama-impl.cpp
62
+ ${SOURCE_DIR}/llama-grammar.cpp
63
+ ${SOURCE_DIR}/llama-sampling.cpp
64
+ ${SOURCE_DIR}/llama-vocab.cpp
65
+ ${SOURCE_DIR}/llama-adapter.cpp
66
+ ${SOURCE_DIR}/llama-chat.cpp
67
+ ${SOURCE_DIR}/llama-context.cpp
68
+ ${SOURCE_DIR}/llama-arch.cpp
69
+ ${SOURCE_DIR}/llama-batch.cpp
70
+ ${SOURCE_DIR}/llama-cparams.cpp
71
+ ${SOURCE_DIR}/llama-hparams.cpp
72
+ ${SOURCE_DIR}/llama.cpp
73
+ ${SOURCE_DIR}/llama-model.cpp
74
+ ${SOURCE_DIR}/llama-model-loader.cpp
75
+ ${SOURCE_DIR}/llama-model-saver.cpp
76
+ ${SOURCE_DIR}/llama-mmap.cpp
77
+ ${SOURCE_DIR}/llama-kv-cache.cpp
78
+ ${SOURCE_DIR}/llama-kv-cache-iswa.cpp
79
+ ${SOURCE_DIR}/llama-memory-hybrid.cpp
80
+ ${SOURCE_DIR}/llama-memory-recurrent.cpp
81
+ ${SOURCE_DIR}/llama-memory.cpp
82
+ ${SOURCE_DIR}/llama-io.cpp
83
+ ${SOURCE_DIR}/llama-graph.cpp
84
+ ${SOURCE_DIR}/sampling.cpp
85
+ ${SOURCE_DIR}/unicode-data.cpp
86
+ ${SOURCE_DIR}/unicode.cpp
87
+ ${SOURCE_DIR}/common.cpp
88
+ ${SOURCE_DIR}/chat.cpp
89
+ # Additional sources required for JSON parsing, chat parser, and mtmd tools
90
+ ${SOURCE_DIR}/chat-parser.cpp
91
+ ${SOURCE_DIR}/regex-partial.cpp
92
+ ${SOURCE_DIR}/json-partial.cpp
93
+ ${SOURCE_DIR}/json-schema-to-grammar.cpp
94
+ ${SOURCE_DIR}/anyascii.c
95
+ ${SOURCE_DIR}/tools/mtmd/mtmd.cpp
96
+ ${SOURCE_DIR}/tools/mtmd/mtmd-helper.cpp
97
+ ${SOURCE_DIR}/tools/mtmd/mtmd-audio.cpp
98
+ ${SOURCE_DIR}/tools/mtmd/clip.cpp
99
+ ${SOURCE_DIR}/cap-llama.cpp
100
+ ${SOURCE_DIR}/cap-completion.cpp
101
+ ${SOURCE_DIR}/cap-tts.cpp
102
+ ${SOURCE_FILES_ARCH}
103
+ )
104
+
105
+ # Set target properties
106
+ set_target_properties(llama-cpp PROPERTIES
107
+ FRAMEWORK TRUE
108
+ FRAMEWORK_VERSION A
109
+ MACOSX_FRAMEWORK_IDENTIFIER com.arusatech.llama-cpp
110
+ MACOSX_FRAMEWORK_BUNDLE_VERSION 1.0.0
111
+ MACOSX_FRAMEWORK_SHORT_VERSION_STRING 1.0.0
112
+ XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer"
113
+ XCODE_ATTRIBUTE_DEVELOPMENT_TEAM ""
114
+ XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH NO
115
+ XCODE_ATTRIBUTE_ENABLE_BITCODE NO
116
+ )
117
+
118
+ # Include directories
119
+ target_include_directories(llama-cpp PRIVATE
120
+ ${SOURCE_DIR}
121
+ ${SOURCE_DIR}/ggml-cpu
122
+ ${SOURCE_DIR}/tools/mtmd
123
+ )
124
+
125
+ # ARM64: rely on Clang's default tuning; no x86‑specific -march flags here
126
+
127
+ # Link frameworks via linker flags
128
+ target_link_options(llama-cpp PRIVATE
129
+ "-Wl,-framework,Accelerate"
130
+ "-Wl,-framework,Metal"
131
+ "-Wl,-framework,MetalKit"
132
+ "-Wl,-framework,Foundation"
133
+ "-Wl,-framework,CoreGraphics"
134
+ )
135
+
136
+ # Public headers
137
+ set_target_properties(llama-cpp PROPERTIES
138
+ PUBLIC_HEADER "${PUBLIC_HEADERS}"
139
+ )
140
+
141
+ # Install rules
142
+ install(TARGETS llama-cpp
143
+ FRAMEWORK DESTINATION .
144
+ )
145
+
146
+ message(STATUS "Building llama-cpp for ARM64 (devices)")
147
+ message(STATUS "Source directory: ${SOURCE_DIR}")
148
+ message(STATUS "Architecture: arm64")
149
+
@@ -0,0 +1,176 @@
1
+ cmake_minimum_required(VERSION 3.16)
2
+ project(llama-cpp VERSION 1.0.0 LANGUAGES CXX C)
3
+
4
+ set(CMAKE_CXX_STANDARD 17)
5
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
6
+
7
+ # iOS specific settings for x86_64 emulator
8
+ set(CMAKE_OSX_DEPLOYMENT_TARGET 13.0)
9
+ set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE NO)
10
+
11
+ # Dependencies and compile options optimized for x86_64
12
+ add_definitions(
13
+ -DNDEBUG
14
+ -DO3
15
+ -DLM_GGML_USE_CPU
16
+ -DLM_GGML_USE_ACCELERATE
17
+ -DLM_GGML_USE_METAL
18
+ -DLM_GGML_METAL_USE_BF16
19
+ )
20
+
21
+ # X86_64 specific optimizations for emulator
22
+ add_definitions(-DLM_GGML_CPU_GENERIC)
23
+ add_definitions(-DLM_GGML_USE_AVX2)
24
+ add_definitions(-DLM_GGML_USE_AVX)
25
+ add_definitions(-DLM_GGML_USE_SSE3)
26
+ add_definitions(-DLM_GGML_USE_SSE)
27
+ add_definitions(-DLM_GGML_USE_F16C)
28
+ add_definitions(-DLM_GGML_USE_FMA)
29
+
30
+ set(SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../cpp)
31
+
32
+ # Use x86_64 optimized source files for emulator
33
+ set(SOURCE_FILES_ARCH
34
+ ${SOURCE_DIR}/ggml-cpu/arch/x86/quants.c
35
+ ${SOURCE_DIR}/ggml-cpu/arch/x86/repack.cpp
36
+ )
37
+
38
+ # Define public headers
39
+ set(PUBLIC_HEADERS
40
+ ${SOURCE_DIR}/cap-llama.h
41
+ ${SOURCE_DIR}/cap-completion.h
42
+ ${SOURCE_DIR}/cap-tts.h
43
+ ${SOURCE_DIR}/llama.h
44
+ ${SOURCE_DIR}/ggml.h
45
+ )
46
+
47
+ # Create library target
48
+ add_library(llama-cpp SHARED
49
+ ${SOURCE_DIR}/ggml.c
50
+ ${SOURCE_DIR}/ggml-alloc.c
51
+ ${SOURCE_DIR}/ggml-backend.cpp
52
+ ${SOURCE_DIR}/ggml-backend-reg.cpp
53
+ ${SOURCE_DIR}/ggml-cpu/amx/amx.cpp
54
+ ${SOURCE_DIR}/ggml-cpu/amx/mmq.cpp
55
+ ${SOURCE_DIR}/ggml-cpu/ggml-cpu.c
56
+ ${SOURCE_DIR}/ggml-cpu/ggml-cpu.cpp
57
+ ${SOURCE_DIR}/ggml-cpu/quants.c
58
+ ${SOURCE_DIR}/ggml-cpu/traits.cpp
59
+ ${SOURCE_DIR}/ggml-cpu/repack.cpp
60
+ ${SOURCE_DIR}/ggml-cpu/unary-ops.cpp
61
+ ${SOURCE_DIR}/ggml-cpu/binary-ops.cpp
62
+ ${SOURCE_DIR}/ggml-cpu/vec.cpp
63
+ ${SOURCE_DIR}/ggml-cpu/ops.cpp
64
+ ${SOURCE_DIR}/ggml-metal.m
65
+ ${SOURCE_DIR}/ggml-opt.cpp
66
+ ${SOURCE_DIR}/ggml-threading.cpp
67
+ ${SOURCE_DIR}/ggml-quants.c
68
+ ${SOURCE_DIR}/gguf.cpp
69
+ ${SOURCE_DIR}/log.cpp
70
+ ${SOURCE_DIR}/llama-impl.cpp
71
+ ${SOURCE_DIR}/llama-grammar.cpp
72
+ ${SOURCE_DIR}/llama-sampling.cpp
73
+ ${SOURCE_DIR}/llama-vocab.cpp
74
+ ${SOURCE_DIR}/llama-adapter.cpp
75
+ ${SOURCE_DIR}/llama-chat.cpp
76
+ ${SOURCE_DIR}/llama-context.cpp
77
+ ${SOURCE_DIR}/llama-arch.cpp
78
+ ${SOURCE_DIR}/llama-batch.cpp
79
+ ${SOURCE_DIR}/llama-cparams.cpp
80
+ ${SOURCE_DIR}/llama-hparams.cpp
81
+ ${SOURCE_DIR}/llama.cpp
82
+ ${SOURCE_DIR}/llama-model.cpp
83
+ ${SOURCE_DIR}/llama-model-loader.cpp
84
+ ${SOURCE_DIR}/llama-model-saver.cpp
85
+ ${SOURCE_DIR}/llama-mmap.cpp
86
+ ${SOURCE_DIR}/llama-kv-cache.cpp
87
+ ${SOURCE_DIR}/llama-kv-cache-iswa.cpp
88
+ ${SOURCE_DIR}/llama-memory-hybrid.cpp
89
+ ${SOURCE_DIR}/llama-memory-recurrent.cpp
90
+ ${SOURCE_DIR}/llama-memory.cpp
91
+ ${SOURCE_DIR}/llama-io.cpp
92
+ ${SOURCE_DIR}/llama-graph.cpp
93
+ ${SOURCE_DIR}/sampling.cpp
94
+ ${SOURCE_DIR}/unicode-data.cpp
95
+ ${SOURCE_DIR}/unicode.cpp
96
+ ${SOURCE_DIR}/common.cpp
97
+ ${SOURCE_DIR}/chat.cpp
98
+ # Additional sources required for JSON parsing, chat parser, and mtmd tools
99
+ ${SOURCE_DIR}/chat-parser.cpp
100
+ ${SOURCE_DIR}/regex-partial.cpp
101
+ ${SOURCE_DIR}/json-partial.cpp
102
+ ${SOURCE_DIR}/json-schema-to-grammar.cpp
103
+ ${SOURCE_DIR}/anyascii.c
104
+ ${SOURCE_DIR}/tools/mtmd/mtmd.cpp
105
+ ${SOURCE_DIR}/tools/mtmd/mtmd-helper.cpp
106
+ ${SOURCE_DIR}/tools/mtmd/mtmd-audio.cpp
107
+ ${SOURCE_DIR}/tools/mtmd/clip.cpp
108
+ ${SOURCE_DIR}/minja/minja.hpp
109
+ ${SOURCE_DIR}/minja/chat-template.hpp
110
+ ${SOURCE_DIR}/nlohmann/json.hpp
111
+ ${SOURCE_DIR}/nlohmann/json_fwd.hpp
112
+ ${SOURCE_DIR}/cap-llama.cpp
113
+ ${SOURCE_DIR}/cap-completion.cpp
114
+ ${SOURCE_DIR}/cap-tts.cpp
115
+ ${SOURCE_FILES_ARCH}
116
+ )
117
+
118
+ # Set target properties
119
+ set_target_properties(llama-cpp PROPERTIES
120
+ FRAMEWORK TRUE
121
+ FRAMEWORK_VERSION A
122
+ MACOSX_FRAMEWORK_IDENTIFIER com.arusatech.llama-cpp
123
+ MACOSX_FRAMEWORK_BUNDLE_VERSION 1.0.0
124
+ MACOSX_FRAMEWORK_SHORT_VERSION_STRING 1.0.0
125
+ XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer"
126
+ XCODE_ATTRIBUTE_DEVELOPMENT_TEAM ""
127
+ XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH NO
128
+ XCODE_ATTRIBUTE_ENABLE_BITCODE NO
129
+ )
130
+
131
+ # Set include directories
132
+ target_include_directories(llama-cpp PRIVATE
133
+ ${SOURCE_DIR}
134
+ ${SOURCE_DIR}/ggml-cpu
135
+ ${SOURCE_DIR}/tools/mtmd
136
+ )
137
+
138
+ # Set compile options for x86_64 optimization
139
+ target_compile_options(llama-cpp PRIVATE
140
+ -march=x86-64
141
+ -mtune=generic
142
+ -mavx2
143
+ -mavx
144
+ -msse3
145
+ -msse
146
+ -mfma
147
+ -mf16c
148
+ )
149
+
150
+ # Set link options
151
+ # NOTE:
152
+ # Use -Wl,-framework,<Name> so that the compiler driver passes the correct
153
+ # framework flags through to the linker as a single option.
154
+ target_link_options(llama-cpp PRIVATE
155
+ "-Wl,-framework,Accelerate"
156
+ "-Wl,-framework,Metal"
157
+ "-Wl,-framework,MetalKit"
158
+ "-Wl,-framework,Foundation"
159
+ "-Wl,-framework,CoreGraphics"
160
+ )
161
+
162
+ # Set public headers
163
+ set_target_properties(llama-cpp PROPERTIES
164
+ PUBLIC_HEADER "${PUBLIC_HEADERS}"
165
+ )
166
+
167
+ # Install rules
168
+ install(TARGETS llama-cpp
169
+ FRAMEWORK DESTINATION .
170
+ )
171
+
172
+ # Print build information
173
+ message(STATUS "Building llama-cpp for x86_64 (emulator)")
174
+ message(STATUS "Source directory: ${SOURCE_DIR}")
175
+ message(STATUS "Architecture: x86_64")
176
+ message(STATUS "Optimizations: AVX2, AVX, SSE3, SSE, FMA, F16C")
@@ -0,0 +1,149 @@
1
+ cmake_minimum_required(VERSION 3.16)
2
+ project(llama-cpp VERSION 1.0.0 LANGUAGES CXX C)
3
+
4
+ set(CMAKE_CXX_STANDARD 17)
5
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
6
+
7
+ # iOS specific settings for ARM64 devices
8
+ set(CMAKE_OSX_DEPLOYMENT_TARGET 13.0)
9
+ set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE NO)
10
+
11
+ # Common llama.cpp definitions
12
+ add_definitions(
13
+ -DNDEBUG
14
+ -DO3
15
+ -DLM_GGML_USE_CPU
16
+ -DLM_GGML_USE_ACCELERATE
17
+ -DLM_GGML_USE_METAL
18
+ -DLM_GGML_METAL_USE_BF16
19
+ )
20
+
21
+ set(SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../cpp)
22
+
23
+ # ARM64‑tuned ggml sources
24
+ set(SOURCE_FILES_ARCH
25
+ ${SOURCE_DIR}/ggml-cpu/arch/arm/quants.c
26
+ ${SOURCE_DIR}/ggml-cpu/arch/arm/repack.cpp
27
+ )
28
+
29
+ # Define public headers
30
+ set(PUBLIC_HEADERS
31
+ ${SOURCE_DIR}/cap-llama.h
32
+ ${SOURCE_DIR}/cap-completion.h
33
+ ${SOURCE_DIR}/cap-tts.h
34
+ ${SOURCE_DIR}/llama.h
35
+ ${SOURCE_DIR}/ggml.h
36
+ )
37
+
38
+ # Create library target
39
+ add_library(llama-cpp SHARED
40
+ ${SOURCE_DIR}/ggml.c
41
+ ${SOURCE_DIR}/ggml-alloc.c
42
+ ${SOURCE_DIR}/ggml-backend.cpp
43
+ ${SOURCE_DIR}/ggml-backend-reg.cpp
44
+ ${SOURCE_DIR}/ggml-cpu/amx/amx.cpp
45
+ ${SOURCE_DIR}/ggml-cpu/amx/mmq.cpp
46
+ ${SOURCE_DIR}/ggml-cpu/ggml-cpu.c
47
+ ${SOURCE_DIR}/ggml-cpu/ggml-cpu.cpp
48
+ ${SOURCE_DIR}/ggml-cpu/quants.c
49
+ ${SOURCE_DIR}/ggml-cpu/traits.cpp
50
+ ${SOURCE_DIR}/ggml-cpu/repack.cpp
51
+ ${SOURCE_DIR}/ggml-cpu/unary-ops.cpp
52
+ ${SOURCE_DIR}/ggml-cpu/binary-ops.cpp
53
+ ${SOURCE_DIR}/ggml-cpu/vec.cpp
54
+ ${SOURCE_DIR}/ggml-cpu/ops.cpp
55
+ ${SOURCE_DIR}/ggml-metal.m
56
+ ${SOURCE_DIR}/ggml-opt.cpp
57
+ ${SOURCE_DIR}/ggml-threading.cpp
58
+ ${SOURCE_DIR}/ggml-quants.c
59
+ ${SOURCE_DIR}/gguf.cpp
60
+ ${SOURCE_DIR}/log.cpp
61
+ ${SOURCE_DIR}/llama-impl.cpp
62
+ ${SOURCE_DIR}/llama-grammar.cpp
63
+ ${SOURCE_DIR}/llama-sampling.cpp
64
+ ${SOURCE_DIR}/llama-vocab.cpp
65
+ ${SOURCE_DIR}/llama-adapter.cpp
66
+ ${SOURCE_DIR}/llama-chat.cpp
67
+ ${SOURCE_DIR}/llama-context.cpp
68
+ ${SOURCE_DIR}/llama-arch.cpp
69
+ ${SOURCE_DIR}/llama-batch.cpp
70
+ ${SOURCE_DIR}/llama-cparams.cpp
71
+ ${SOURCE_DIR}/llama-hparams.cpp
72
+ ${SOURCE_DIR}/llama.cpp
73
+ ${SOURCE_DIR}/llama-model.cpp
74
+ ${SOURCE_DIR}/llama-model-loader.cpp
75
+ ${SOURCE_DIR}/llama-model-saver.cpp
76
+ ${SOURCE_DIR}/llama-mmap.cpp
77
+ ${SOURCE_DIR}/llama-kv-cache.cpp
78
+ ${SOURCE_DIR}/llama-kv-cache-iswa.cpp
79
+ ${SOURCE_DIR}/llama-memory-hybrid.cpp
80
+ ${SOURCE_DIR}/llama-memory-recurrent.cpp
81
+ ${SOURCE_DIR}/llama-memory.cpp
82
+ ${SOURCE_DIR}/llama-io.cpp
83
+ ${SOURCE_DIR}/llama-graph.cpp
84
+ ${SOURCE_DIR}/sampling.cpp
85
+ ${SOURCE_DIR}/unicode-data.cpp
86
+ ${SOURCE_DIR}/unicode.cpp
87
+ ${SOURCE_DIR}/common.cpp
88
+ ${SOURCE_DIR}/chat.cpp
89
+ # Additional sources required for JSON parsing, chat parser, and mtmd tools
90
+ ${SOURCE_DIR}/chat-parser.cpp
91
+ ${SOURCE_DIR}/regex-partial.cpp
92
+ ${SOURCE_DIR}/json-partial.cpp
93
+ ${SOURCE_DIR}/json-schema-to-grammar.cpp
94
+ ${SOURCE_DIR}/anyascii.c
95
+ ${SOURCE_DIR}/tools/mtmd/mtmd.cpp
96
+ ${SOURCE_DIR}/tools/mtmd/mtmd-helper.cpp
97
+ ${SOURCE_DIR}/tools/mtmd/mtmd-audio.cpp
98
+ ${SOURCE_DIR}/tools/mtmd/clip.cpp
99
+ ${SOURCE_DIR}/cap-llama.cpp
100
+ ${SOURCE_DIR}/cap-completion.cpp
101
+ ${SOURCE_DIR}/cap-tts.cpp
102
+ ${SOURCE_FILES_ARCH}
103
+ )
104
+
105
+ # Set target properties
106
+ set_target_properties(llama-cpp PROPERTIES
107
+ FRAMEWORK TRUE
108
+ FRAMEWORK_VERSION A
109
+ MACOSX_FRAMEWORK_IDENTIFIER com.arusatech.llama-cpp
110
+ MACOSX_FRAMEWORK_BUNDLE_VERSION 1.0.0
111
+ MACOSX_FRAMEWORK_SHORT_VERSION_STRING 1.0.0
112
+ XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer"
113
+ XCODE_ATTRIBUTE_DEVELOPMENT_TEAM ""
114
+ XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH NO
115
+ XCODE_ATTRIBUTE_ENABLE_BITCODE NO
116
+ )
117
+
118
+ # Include directories
119
+ target_include_directories(llama-cpp PRIVATE
120
+ ${SOURCE_DIR}
121
+ ${SOURCE_DIR}/ggml-cpu
122
+ ${SOURCE_DIR}/tools/mtmd
123
+ )
124
+
125
+ # ARM64: rely on Clang's default tuning; no x86‑specific -march flags here
126
+
127
+ # Link frameworks via linker flags
128
+ target_link_options(llama-cpp PRIVATE
129
+ "-Wl,-framework,Accelerate"
130
+ "-Wl,-framework,Metal"
131
+ "-Wl,-framework,MetalKit"
132
+ "-Wl,-framework,Foundation"
133
+ "-Wl,-framework,CoreGraphics"
134
+ )
135
+
136
+ # Public headers
137
+ set_target_properties(llama-cpp PROPERTIES
138
+ PUBLIC_HEADER "${PUBLIC_HEADERS}"
139
+ )
140
+
141
+ # Install rules
142
+ install(TARGETS llama-cpp
143
+ FRAMEWORK DESTINATION .
144
+ )
145
+
146
+ message(STATUS "Building llama-cpp for ARM64 (devices)")
147
+ message(STATUS "Source directory: ${SOURCE_DIR}")
148
+ message(STATUS "Architecture: arm64")
149
+
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "llama-cpp-capacitor",
3
- "version": "0.1.1",
3
+ "version": "0.1.3",
4
4
  "description": "A native Capacitor plugin that embeds llama.cpp directly into mobile apps, enabling offline AI inference with chat-first API design. Complete iOS and Android support: text generation, chat, multimodal, TTS, LoRA, embeddings, and more.",
5
5
  "main": "dist/plugin.cjs.js",
6
6
  "type": "module",
@@ -10,12 +10,17 @@
10
10
  "files": [
11
11
  "android/src/main/",
12
12
  "android/build.gradle",
13
+ "build-native.sh",
13
14
  "cpp/",
14
15
  "dist/",
16
+ "ios/CMakeLists.txt",
17
+ "ios/CMakeLists-arm64.txt",
18
+ "ios/CMakeLists-x86_64.txt",
15
19
  "ios/Sources",
16
20
  "ios/Frameworks",
17
21
  "Package.swift",
18
22
  "LlamaCpp.podspec",
23
+ "LlamaCppCapacitor.podspec",
19
24
  "types/"
20
25
  ],
21
26
  "author": "Yakub Mohammad",
@@ -64,8 +69,11 @@
64
69
  "docgen": "npx docgen --api LlamaCppPlugin --output-readme README.md --output-json dist/docs.json",
65
70
  "build": "npm run clean && npm run docgen && tsc && rollup -c rollup.config.mjs",
66
71
  "build:native": "./build-native.sh",
72
+ "build:all": "npm run build && npm run build:native",
67
73
  "build:ios": "cd ios && cmake -B build -S . && cmake --build build --config Release",
68
74
  "build:android": "cd android && gradlew.bat assembleRelease",
75
+ "pack": "npm run build && npm pack --dry-run",
76
+ "pack:full": "npm run build:all && npm pack --dry-run",
69
77
  "test": "jest",
70
78
  "test:integration": "./scripts/test-integration.sh",
71
79
  "test:integration:jest": "jest --config test/jest.integration.config.js",