@fugood/llama.node 0.2.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/bin/darwin/arm64/default.metallib +0 -0
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/default.metallib +0 -0
  4. package/bin/darwin/x64/llama-node.node +0 -0
  5. package/bin/linux/arm64/llama-node.node +0 -0
  6. package/bin/linux/x64/llama-node.node +0 -0
  7. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  8. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  9. package/bin/win32/arm64/llama-node.node +0 -0
  10. package/bin/win32/arm64/node.lib +0 -0
  11. package/bin/win32/x64/llama-node.node +0 -0
  12. package/bin/win32/x64/node.lib +0 -0
  13. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  14. package/bin/win32-vulkan/arm64/node.lib +0 -0
  15. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  16. package/bin/win32-vulkan/x64/node.lib +0 -0
  17. package/package.json +1 -1
  18. package/src/LlamaContext.cpp +2 -2
  19. package/src/llama.cpp/CMakeLists.txt +72 -46
  20. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +16 -0
  21. package/src/llama.cpp/cmake/arm64-windows-msvc.cmake +6 -0
  22. package/src/llama.cpp/common/common.cpp +732 -752
  23. package/src/llama.cpp/common/common.h +47 -41
  24. package/src/llama.cpp/common/grammar-parser.cpp +1 -1
  25. package/src/llama.cpp/common/json-schema-to-grammar.cpp +6 -6
  26. package/src/llama.cpp/common/log.h +5 -5
  27. package/src/llama.cpp/common/sampling.cpp +89 -7
  28. package/src/llama.cpp/common/sampling.h +5 -0
  29. package/src/llama.cpp/common/train.cpp +2 -2
  30. package/src/llama.cpp/examples/batched/batched.cpp +1 -1
  31. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +1 -1
  32. package/src/llama.cpp/examples/embedding/embedding.cpp +3 -2
  33. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +2 -2
  34. package/src/llama.cpp/examples/finetune/finetune.cpp +4 -3
  35. package/src/llama.cpp/examples/imatrix/imatrix.cpp +2 -2
  36. package/src/llama.cpp/examples/infill/infill.cpp +8 -8
  37. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +2 -2
  38. package/src/llama.cpp/examples/llama.android/llama/CMakeLists.txt +13 -8
  39. package/src/llama.cpp/examples/llava/clip.h +1 -1
  40. package/src/llama.cpp/examples/llava/llava-cli.cpp +1 -1
  41. package/src/llama.cpp/examples/llava/llava.cpp +0 -15
  42. package/src/llama.cpp/examples/lookahead/lookahead.cpp +1 -1
  43. package/src/llama.cpp/examples/lookup/lookup.cpp +1 -1
  44. package/src/llama.cpp/examples/main/main.cpp +24 -16
  45. package/src/llama.cpp/examples/parallel/parallel.cpp +1 -1
  46. package/src/llama.cpp/examples/perplexity/perplexity.cpp +9 -9
  47. package/src/llama.cpp/examples/quantize/quantize.cpp +2 -2
  48. package/src/llama.cpp/examples/retrieval/retrieval.cpp +2 -2
  49. package/src/llama.cpp/examples/rpc/rpc-server.cpp +78 -14
  50. package/src/llama.cpp/examples/server/server.cpp +21 -9
  51. package/src/llama.cpp/examples/tokenize/tokenize.cpp +359 -9
  52. package/src/llama.cpp/examples/train-text-from-scratch/train-text-from-scratch.cpp +4 -3
  53. package/src/llama.cpp/ggml-backend.c +0 -1
  54. package/src/llama.cpp/ggml-common.h +0 -54
  55. package/src/llama.cpp/ggml-cuda.h +1 -0
  56. package/src/llama.cpp/ggml-impl.h +51 -0
  57. package/src/llama.cpp/ggml-kompute.cpp +4 -0
  58. package/src/llama.cpp/ggml-opencl.cpp +4 -1
  59. package/src/llama.cpp/ggml-quants.c +3700 -2041
  60. package/src/llama.cpp/ggml-rpc.cpp +188 -56
  61. package/src/llama.cpp/ggml-sycl.cpp +99 -530
  62. package/src/llama.cpp/ggml-vulkan-shaders.hpp +9351 -5627
  63. package/src/llama.cpp/ggml-vulkan.cpp +202 -225
  64. package/src/llama.cpp/ggml.c +1034 -1154
  65. package/src/llama.cpp/ggml.h +59 -31
  66. package/src/llama.cpp/llama.cpp +859 -609
  67. package/src/llama.cpp/llama.h +19 -6
  68. package/src/llama.cpp/requirements.txt +0 -1
  69. package/src/llama.cpp/tests/test-backend-ops.cpp +113 -47
  70. package/src/llama.cpp/tests/test-chat-template.cpp +16 -4
  71. package/src/llama.cpp/tests/test-grad0.cpp +43 -83
  72. package/src/llama.cpp/unicode-data.cpp +6969 -2169
  73. package/src/llama.cpp/unicode-data.h +15 -12
  74. package/src/llama.cpp/unicode.cpp +89 -111
  75. package/src/llama.cpp/unicode.h +44 -12
  76. package/src/llama.cpp/build.zig +0 -172
  77. package/src/llama.cpp/ggml-mpi.c +0 -216
  78. package/src/llama.cpp/ggml-mpi.h +0 -39
  79. package/src/llama.cpp/requirements/requirements-convert-persimmon-to-gguf.txt +0 -2
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@fugood/llama.node",
3
3
  "access": "public",
4
- "version": "0.2.1",
4
+ "version": "0.2.2",
5
5
  "description": "Llama.cpp for Node.js",
6
6
  "main": "lib/index.js",
7
7
  "scripts": {
@@ -61,7 +61,7 @@ LlamaContext::LlamaContext(const Napi::CallbackInfo &info)
61
61
  params.n_ctx = get_option<int32_t>(options, "n_ctx", 512);
62
62
  params.n_batch = get_option<int32_t>(options, "n_batch", 2048);
63
63
  params.n_threads =
64
- get_option<int32_t>(options, "n_threads", get_math_cpu_count() / 2);
64
+ get_option<int32_t>(options, "n_threads", cpu_get_num_math() / 2);
65
65
  params.n_gpu_layers = get_option<int32_t>(options, "n_gpu_layers", -1);
66
66
  params.use_mlock = get_option<bool>(options, "use_mlock", false);
67
67
  params.use_mmap = get_option<bool>(options, "use_mmap", true);
@@ -81,7 +81,7 @@ LlamaContext::LlamaContext(const Napi::CallbackInfo &info)
81
81
  }
82
82
 
83
83
  _sess = std::make_shared<LlamaSession>(model, ctx, params);
84
- _info = get_system_info(params);
84
+ _info = gpt_params_get_system_info(params);
85
85
  }
86
86
 
87
87
  // getSystemInfo(): string
@@ -1,4 +1,4 @@
1
- cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
1
+ cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
2
2
  project("llama.cpp" C CXX)
3
3
  include(CheckIncludeFileCXX)
4
4
 
@@ -72,11 +72,13 @@ else()
72
72
  set(INS_ENB ON)
73
73
  endif()
74
74
 
75
+ option(LLAMA_SVE "llama: enable SVE" OFF)
75
76
  option(LLAMA_AVX "llama: enable AVX" ${INS_ENB})
76
77
  option(LLAMA_AVX2 "llama: enable AVX2" ${INS_ENB})
77
78
  option(LLAMA_AVX512 "llama: enable AVX512" OFF)
78
79
  option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
79
80
  option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
81
+ option(LLAMA_AVX512_BF16 "llama: enable AVX512-BF16" OFF)
80
82
  option(LLAMA_FMA "llama: enable FMA" ${INS_ENB})
81
83
  # in MSVC F16C is implied with AVX2/AVX512
82
84
  if (NOT MSVC)
@@ -122,9 +124,7 @@ set(LLAMA_METAL_MACOSX_VERSION_MIN "" CACHE STRING
122
124
  "llama: metal minimum macOS version")
123
125
  set(LLAMA_METAL_STD "" CACHE STRING "llama: metal standard version (-std flag)")
124
126
  option(LLAMA_KOMPUTE "llama: use Kompute" OFF)
125
- option(LLAMA_MPI "llama: use MPI" OFF)
126
127
  option(LLAMA_RPC "llama: use RPC" OFF)
127
- option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
128
128
  option(LLAMA_SYCL "llama: use SYCL" OFF)
129
129
  option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF)
130
130
  set(LLAMA_SYCL_TARGET "INTEL" CACHE STRING "llama: sycl target device")
@@ -134,6 +134,8 @@ set(LLAMA_SCHED_MAX_COPIES "4" CACHE STRING "llama: max input copies for pipeli
134
134
  option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
135
135
  option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
136
136
  option(LLAMA_BUILD_SERVER "llama: build server example" ON)
137
+ option(LLAMA_LASX "llama: enable lasx" ON)
138
+ option(LLAMA_LSX "llama: enable lsx" ON)
137
139
 
138
140
  # add perf arguments
139
141
  option(LLAMA_PERF "llama: enable perf" OFF)
@@ -382,10 +384,6 @@ if (LLAMA_LLAMAFILE)
382
384
  set(GGML_SOURCES_LLAMAFILE sgemm.cpp)
383
385
  endif()
384
386
 
385
- if (LLAMA_QKK_64)
386
- add_compile_definitions(GGML_QKK_64)
387
- endif()
388
-
389
387
  if (LLAMA_CUBLAS)
390
388
  message(WARNING "LLAMA_CUBLAS is deprecated and will be removed in the future.\nUse LLAMA_CUDA instead")
391
389
  set(LLAMA_CUDA ON)
@@ -466,35 +464,6 @@ if (LLAMA_CUDA)
466
464
  endif()
467
465
  endif()
468
466
 
469
- if (LLAMA_MPI)
470
- cmake_minimum_required(VERSION 3.10)
471
- find_package(MPI)
472
- if (MPI_C_FOUND)
473
- message(STATUS "MPI found")
474
-
475
- set(GGML_HEADERS_MPI ggml-mpi.h)
476
- set(GGML_SOURCES_MPI ggml-mpi.c)
477
-
478
- add_compile_definitions(GGML_USE_MPI)
479
- add_compile_definitions(${MPI_C_COMPILE_DEFINITIONS})
480
-
481
- if (NOT MSVC)
482
- add_compile_options(-Wno-cast-qual)
483
- endif()
484
-
485
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_C_LIBRARIES})
486
- set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${MPI_C_INCLUDE_DIRS})
487
-
488
- # Even if you're only using the C header, C++ programs may bring in MPI
489
- # C++ functions, so more linkage is needed
490
- if (MPI_CXX_FOUND)
491
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_CXX_LIBRARIES})
492
- endif()
493
- else()
494
- message(WARNING "MPI not found")
495
- endif()
496
- endif()
497
-
498
467
  if (LLAMA_RPC)
499
468
  add_compile_definitions(GGML_USE_RPC)
500
469
 
@@ -532,6 +501,12 @@ if (LLAMA_VULKAN)
532
501
 
533
502
  add_compile_definitions(GGML_USE_VULKAN)
534
503
 
504
+ # Workaround to the "can't dereference invalidated vector iterator" bug in clang-cl debug build
505
+ # Posssibly relevant: https://stackoverflow.com/questions/74748276/visual-studio-no-displays-the-correct-length-of-stdvector
506
+ if (MSVC AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
507
+ add_compile_definitions(_ITERATOR_DEBUG_LEVEL=0)
508
+ endif()
509
+
535
510
  if (LLAMA_VULKAN_CHECK_RESULTS)
536
511
  add_compile_definitions(GGML_VULKAN_CHECK_RESULTS)
537
512
  endif()
@@ -555,16 +530,37 @@ if (LLAMA_VULKAN)
555
530
  endif()
556
531
 
557
532
  if (LLAMA_HIPBLAS)
558
- list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
559
-
560
- if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")
561
- message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang")
533
+ if ($ENV{ROCM_PATH})
534
+ set(ROCM_PATH $ENV{ROCM_PATH})
535
+ else()
536
+ set(ROCM_PATH /opt/rocm)
562
537
  endif()
538
+ list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH})
563
539
 
564
- if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
565
- message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
540
+ # CMake on Windows doesn't support the HIP language yet
541
+ if(WIN32)
542
+ set(CXX_IS_HIPCC TRUE)
543
+ else()
544
+ string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}")
566
545
  endif()
567
546
 
547
+ if(CXX_IS_HIPCC)
548
+ if(LINUX)
549
+ if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
550
+ message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
551
+ endif()
552
+
553
+ message(WARNING "Setting hipcc as the C++ compiler is legacy behavior."
554
+ " Prefer setting the HIP compiler directly. See README for details.")
555
+ endif()
556
+ else()
557
+ # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES.
558
+ if(AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES)
559
+ set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_TARGETS})
560
+ endif()
561
+ cmake_minimum_required(VERSION 3.21)
562
+ enable_language(HIP)
563
+ endif()
568
564
  find_package(hip REQUIRED)
569
565
  find_package(hipblas REQUIRED)
570
566
  find_package(rocblas REQUIRED)
@@ -598,13 +594,18 @@ if (LLAMA_HIPBLAS)
598
594
  add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
599
595
  add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
600
596
 
601
- set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
597
+ if (CXX_IS_HIPCC)
598
+ set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
599
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device)
600
+ else()
601
+ set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP)
602
+ endif()
602
603
 
603
604
  if (LLAMA_STATIC)
604
605
  message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
605
606
  endif()
606
607
 
607
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
608
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} PUBLIC hip::host roc::rocblas roc::hipblas)
608
609
  endif()
609
610
 
610
611
  if (LLAMA_SYCL)
@@ -1007,6 +1008,11 @@ if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STR
1007
1008
  if (GGML_COMPILER_SUPPORT_DOTPROD)
1008
1009
  add_compile_definitions(__ARM_FEATURE_DOTPROD)
1009
1010
  endif ()
1011
+ check_cxx_source_compiles("#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vmlaq_f32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_MATMUL_INT8)
1012
+ if (GGML_COMPILER_SUPPORT_MATMUL_INT8)
1013
+ add_compile_definitions(__ARM_FEATURE_MATMUL_INT8)
1014
+ endif ()
1015
+
1010
1016
  check_cxx_source_compiles("#include <arm_neon.h>\nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
1011
1017
  if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
1012
1018
  add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
@@ -1035,6 +1041,9 @@ if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STR
1035
1041
  # Raspberry Pi 3, 4, Zero 2 (32-bit)
1036
1042
  list(APPEND ARCH_FLAGS -mno-unaligned-access)
1037
1043
  endif()
1044
+ if (LLAMA_SVE)
1045
+ list(APPEND ARCH_FLAGS -march=armv8.6-a+sve)
1046
+ endif()
1038
1047
  endif()
1039
1048
  elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR
1040
1049
  (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND
@@ -1059,6 +1068,10 @@ elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LW
1059
1068
  add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
1060
1069
  add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
1061
1070
  endif()
1071
+ if (LLAMA_AVX512_BF16)
1072
+ add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512BF16__>)
1073
+ add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512BF16__>)
1074
+ endif()
1062
1075
  elseif (LLAMA_AVX2)
1063
1076
  list(APPEND ARCH_FLAGS /arch:AVX2)
1064
1077
  elseif (LLAMA_AVX)
@@ -1090,6 +1103,9 @@ elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LW
1090
1103
  if (LLAMA_AVX512_VNNI)
1091
1104
  list(APPEND ARCH_FLAGS -mavx512vnni)
1092
1105
  endif()
1106
+ if (LLAMA_AVX512_BF16)
1107
+ list(APPEND ARCH_FLAGS -mavx512bf16)
1108
+ endif()
1093
1109
  endif()
1094
1110
  elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
1095
1111
  message(STATUS "PowerPC detected")
@@ -1099,6 +1115,17 @@ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
1099
1115
  list(APPEND ARCH_FLAGS -mcpu=native -mtune=native)
1100
1116
  #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
1101
1117
  endif()
1118
+ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
1119
+ message(STATUS "loongarch64 detected")
1120
+
1121
+ list(APPEND ARCH_FLAGS -march=loongarch64)
1122
+ if (LLAMA_LASX)
1123
+ list(APPEND ARCH_FLAGS -mlasx)
1124
+ endif()
1125
+ if (LLAMA_LSX)
1126
+ list(APPEND ARCH_FLAGS -mlsx)
1127
+ endif()
1128
+
1102
1129
  else()
1103
1130
  message(STATUS "Unknown architecture")
1104
1131
  endif()
@@ -1187,7 +1214,6 @@ add_library(ggml OBJECT
1187
1214
  ${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
1188
1215
  ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
1189
1216
  ${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
1190
- ${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI}
1191
1217
  ${GGML_SOURCES_RPC} ${GGML_HEADERS_RPC}
1192
1218
  ${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
1193
1219
  ${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL}
@@ -1275,7 +1301,7 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake
1275
1301
 
1276
1302
  set(GGML_PUBLIC_HEADERS "ggml.h" "ggml-alloc.h" "ggml-backend.h"
1277
1303
  "${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}"
1278
- "${GGML_HEADERS_METAL}" "${GGML_HEADERS_MPI}" "${GGML_HEADERS_EXTRA}")
1304
+ "${GGML_HEADERS_METAL}" "${GGML_HEADERS_EXTRA}")
1279
1305
 
1280
1306
  set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
1281
1307
  install(TARGETS ggml PUBLIC_HEADER)
@@ -0,0 +1,16 @@
1
+ set( CMAKE_SYSTEM_NAME Windows )
2
+ set( CMAKE_SYSTEM_PROCESSOR arm64 )
3
+
4
+ set( target arm64-pc-windows-msvc )
5
+
6
+ set( CMAKE_C_COMPILER clang )
7
+ set( CMAKE_CXX_COMPILER clang++ )
8
+
9
+ set( CMAKE_C_COMPILER_TARGET ${target} )
10
+ set( CMAKE_CXX_COMPILER_TARGET ${target} )
11
+
12
+ set( arch_c_flags "-march=armv8.7-a -fvectorize -ffp-model=fast" )
13
+ set( warn_c_flags "-Wno-format -Wno-unused-variable -Wno-unused-function -Wno-gnu-zero-variadic-macro-arguments" )
14
+
15
+ set( CMAKE_C_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" )
16
+ set( CMAKE_CXX_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" )
@@ -0,0 +1,6 @@
1
+ set( CMAKE_SYSTEM_NAME Windows )
2
+ set( CMAKE_SYSTEM_PROCESSOR arm64 )
3
+
4
+ set( target arm64-pc-windows-msvc )
5
+ set( CMAKE_C_COMPILER_TARGET ${target} )
6
+ set( CMAKE_CXX_COMPILER_TARGET ${target} )