cui-llama.rn 1.1.4 → 1.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,6 +12,7 @@ set(
12
12
  ${RNLLAMA_LIB_DIR}/llama-grammar.cpp
13
13
  ${RNLLAMA_LIB_DIR}/llama-sampling.cpp
14
14
  ${RNLLAMA_LIB_DIR}/llama-vocab.cpp
15
+ ${RNLLAMA_LIB_DIR}/log.cpp
15
16
 
16
17
  ${RNLLAMA_LIB_DIR}/ggml-aarch64.c
17
18
  ${RNLLAMA_LIB_DIR}/ggml-alloc.c
@@ -523,7 +523,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
523
523
  }
524
524
  }
525
525
 
526
- llama_perf_print(llama->ctx, LLAMA_PERF_TYPE_CONTEXT);
526
+ llama_perf_context_print(llama->ctx);
527
527
  llama->is_predicting = false;
528
528
 
529
529
  auto result = createWriteableMap(env);
@@ -538,7 +538,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
538
538
  putString(env, result, "stopping_word", llama->stopping_word.c_str());
539
539
  putInt(env, result, "tokens_cached", llama->n_past);
540
540
 
541
- const auto timings_token = llama_get_token_timings(llama->ctx);
541
+ const auto timings_token = llama_perf_context(llama -> ctx);
542
542
 
543
543
  auto timingsResult = createWriteableMap(env);
544
544
  putInt(env, timingsResult, "prompt_n", timings_token.n_p_eval);
@@ -635,8 +635,7 @@ Java_com_rnllama_LlamaContext_embedding(
635
635
 
636
636
  llama->rewind();
637
637
 
638
- // llama_reset_timings(llama->ctx);
639
- llama_perf_reset(llama->ctx, LLAMA_PERF_TYPE_CONTEXT);
638
+ llama_perf_context_reset(llama->ctx);
640
639
  gpt_sampler_reset(llama->ctx_sampling);
641
640
 
642
641