llama-cpp-capacitor 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,518 @@
1
+ var __rest = (this && this.__rest) || function (s, e) {
2
+ var t = {};
3
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
4
+ t[p] = s[p];
5
+ if (s != null && typeof Object.getOwnPropertySymbols === "function")
6
+ for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
7
+ if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
8
+ t[p[i]] = s[p[i]];
9
+ }
10
+ return t;
11
+ };
12
+ var _a, _b, _c;
13
+ import { registerPlugin } from '@capacitor/core';
14
+ // Constants
15
+ export const LLAMACPP_MTMD_DEFAULT_MEDIA_MARKER = '<__media__>';
16
+ // Event names
17
+ const EVENT_ON_INIT_CONTEXT_PROGRESS = '@LlamaCpp_onInitContextProgress';
18
+ const EVENT_ON_TOKEN = '@LlamaCpp_onToken';
19
+ const EVENT_ON_NATIVE_LOG = '@LlamaCpp_onNativeLog';
20
+ // Register the plugin
21
+ const LlamaCpp = registerPlugin('LlamaCpp');
22
+ // Log listeners management
23
+ const logListeners = [];
24
+ // Set up native log listener
25
+ LlamaCpp.addListener(EVENT_ON_NATIVE_LOG, (evt) => {
26
+ logListeners.forEach((listener) => listener(evt.level, evt.text));
27
+ });
28
+ // Trigger unset to use default log callback
29
+ (_c = (_b = (_a = LlamaCpp === null || LlamaCpp === void 0 ? void 0 : LlamaCpp.toggleNativeLog) === null || _a === void 0 ? void 0 : _a.call(LlamaCpp, { enabled: false })) === null || _b === void 0 ? void 0 : _b.catch) === null || _c === void 0 ? void 0 : _c.call(_b, () => { });
30
+ export const RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER = LLAMACPP_MTMD_DEFAULT_MEDIA_MARKER;
31
+ const validCacheTypes = [
32
+ 'f16',
33
+ 'f32',
34
+ 'bf16',
35
+ 'q8_0',
36
+ 'q4_0',
37
+ 'q4_1',
38
+ 'iq4_nl',
39
+ 'q5_0',
40
+ 'q5_1',
41
+ ];
42
+ const getJsonSchema = (responseFormat) => {
43
+ var _a;
44
+ if ((responseFormat === null || responseFormat === void 0 ? void 0 : responseFormat.type) === 'json_schema') {
45
+ return (_a = responseFormat.json_schema) === null || _a === void 0 ? void 0 : _a.schema;
46
+ }
47
+ if ((responseFormat === null || responseFormat === void 0 ? void 0 : responseFormat.type) === 'json_object') {
48
+ return responseFormat.schema || {};
49
+ }
50
+ return null;
51
+ };
52
+ export class LlamaContext {
53
+ constructor({ contextId, gpu, reasonNoGPU, model }) {
54
+ this.gpu = false;
55
+ this.reasonNoGPU = '';
56
+ this.id = contextId;
57
+ this.gpu = gpu;
58
+ this.reasonNoGPU = reasonNoGPU;
59
+ this.model = model;
60
+ }
61
+ /**
62
+ * Load cached prompt & completion state from a file.
63
+ */
64
+ async loadSession(filepath) {
65
+ let path = filepath;
66
+ if (path.startsWith('file://'))
67
+ path = path.slice(7);
68
+ return LlamaCpp.loadSession({ contextId: this.id, filepath: path });
69
+ }
70
+ /**
71
+ * Save current cached prompt & completion state to a file.
72
+ */
73
+ async saveSession(filepath, options) {
74
+ return LlamaCpp.saveSession({
75
+ contextId: this.id,
76
+ filepath,
77
+ size: (options === null || options === void 0 ? void 0 : options.tokenSize) || -1
78
+ });
79
+ }
80
+ isLlamaChatSupported() {
81
+ return !!this.model.chatTemplates.llamaChat;
82
+ }
83
+ isJinjaSupported() {
84
+ const { minja } = this.model.chatTemplates;
85
+ return !!(minja === null || minja === void 0 ? void 0 : minja.toolUse) || !!(minja === null || minja === void 0 ? void 0 : minja.default);
86
+ }
87
+ async getFormattedChat(messages, template, params) {
88
+ var _a;
89
+ const mediaPaths = [];
90
+ const chat = messages.map((msg) => {
91
+ if (Array.isArray(msg.content)) {
92
+ const content = msg.content.map((part) => {
93
+ var _a;
94
+ // Handle multimodal content
95
+ if (part.type === 'image_url') {
96
+ let path = ((_a = part.image_url) === null || _a === void 0 ? void 0 : _a.url) || '';
97
+ if (path === null || path === void 0 ? void 0 : path.startsWith('file://'))
98
+ path = path.slice(7);
99
+ mediaPaths.push(path);
100
+ return {
101
+ type: 'text',
102
+ text: RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER,
103
+ };
104
+ }
105
+ else if (part.type === 'input_audio') {
106
+ const { input_audio: audio } = part;
107
+ if (!audio)
108
+ throw new Error('input_audio is required');
109
+ const { format } = audio;
110
+ if (format != 'wav' && format != 'mp3') {
111
+ throw new Error(`Unsupported audio format: ${format}`);
112
+ }
113
+ if (audio.url) {
114
+ const path = audio.url.replace(/file:\/\//, '');
115
+ mediaPaths.push(path);
116
+ }
117
+ else if (audio.data) {
118
+ mediaPaths.push(audio.data);
119
+ }
120
+ return {
121
+ type: 'text',
122
+ text: RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER,
123
+ };
124
+ }
125
+ return part;
126
+ });
127
+ return Object.assign(Object.assign({}, msg), { content });
128
+ }
129
+ return msg;
130
+ });
131
+ const useJinja = this.isJinjaSupported() && (params === null || params === void 0 ? void 0 : params.jinja);
132
+ let tmpl;
133
+ if (template)
134
+ tmpl = template; // Force replace if provided
135
+ const jsonSchema = getJsonSchema(params === null || params === void 0 ? void 0 : params.response_format);
136
+ const result = await LlamaCpp.getFormattedChat({
137
+ contextId: this.id,
138
+ messages: JSON.stringify(chat),
139
+ chatTemplate: tmpl,
140
+ params: {
141
+ jinja: useJinja,
142
+ json_schema: jsonSchema ? JSON.stringify(jsonSchema) : undefined,
143
+ tools: (params === null || params === void 0 ? void 0 : params.tools) ? JSON.stringify(params.tools) : undefined,
144
+ parallel_tool_calls: (params === null || params === void 0 ? void 0 : params.parallel_tool_calls) ? JSON.stringify(params.parallel_tool_calls)
145
+ : undefined,
146
+ tool_choice: params === null || params === void 0 ? void 0 : params.tool_choice,
147
+ enable_thinking: (_a = params === null || params === void 0 ? void 0 : params.enable_thinking) !== null && _a !== void 0 ? _a : true,
148
+ add_generation_prompt: params === null || params === void 0 ? void 0 : params.add_generation_prompt,
149
+ now: typeof (params === null || params === void 0 ? void 0 : params.now) === 'number' ? params.now.toString() : params === null || params === void 0 ? void 0 : params.now,
150
+ chat_template_kwargs: (params === null || params === void 0 ? void 0 : params.chat_template_kwargs) ? JSON.stringify(Object.entries(params.chat_template_kwargs).reduce((acc, [key, value]) => {
151
+ acc[key] = JSON.stringify(value); // Each value is a stringified JSON object
152
+ return acc;
153
+ }, {})) : undefined,
154
+ },
155
+ });
156
+ if (!useJinja) {
157
+ return {
158
+ type: 'llama-chat',
159
+ prompt: result,
160
+ has_media: mediaPaths.length > 0,
161
+ media_paths: mediaPaths,
162
+ };
163
+ }
164
+ const jinjaResult = result;
165
+ jinjaResult.type = 'jinja';
166
+ jinjaResult.has_media = mediaPaths.length > 0;
167
+ jinjaResult.media_paths = mediaPaths;
168
+ return jinjaResult;
169
+ }
170
+ /**
171
+ * Generate a completion based on the provided parameters
172
+ * @param params Completion parameters including prompt or messages
173
+ * @param callback Optional callback for token-by-token streaming
174
+ * @returns Promise resolving to the completion result
175
+ *
176
+ * Note: For multimodal support, you can include an media_paths parameter.
177
+ * This will process the images and add them to the context before generating text.
178
+ * Multimodal support must be enabled via initMultimodal() first.
179
+ */
180
+ async completion(params, callback) {
181
+ const nativeParams = Object.assign(Object.assign({}, params), { prompt: params.prompt || '', emit_partial_completion: !!callback });
182
+ if (params.messages) {
183
+ const formattedResult = await this.getFormattedChat(params.messages, params.chat_template || params.chatTemplate, {
184
+ jinja: params.jinja,
185
+ tools: params.tools,
186
+ parallel_tool_calls: params.parallel_tool_calls,
187
+ tool_choice: params.tool_choice,
188
+ enable_thinking: params.enable_thinking,
189
+ add_generation_prompt: params.add_generation_prompt,
190
+ now: params.now,
191
+ chat_template_kwargs: params.chat_template_kwargs,
192
+ });
193
+ if (formattedResult.type === 'jinja') {
194
+ const jinjaResult = formattedResult;
195
+ nativeParams.prompt = jinjaResult.prompt || '';
196
+ if (typeof jinjaResult.chat_format === 'number')
197
+ nativeParams.chat_format = jinjaResult.chat_format;
198
+ if (jinjaResult.grammar)
199
+ nativeParams.grammar = jinjaResult.grammar;
200
+ if (typeof jinjaResult.grammar_lazy === 'boolean')
201
+ nativeParams.grammar_lazy = jinjaResult.grammar_lazy;
202
+ if (jinjaResult.grammar_triggers)
203
+ nativeParams.grammar_triggers = jinjaResult.grammar_triggers;
204
+ if (jinjaResult.preserved_tokens)
205
+ nativeParams.preserved_tokens = jinjaResult.preserved_tokens;
206
+ if (jinjaResult.additional_stops) {
207
+ if (!nativeParams.stop)
208
+ nativeParams.stop = [];
209
+ nativeParams.stop.push(...jinjaResult.additional_stops);
210
+ }
211
+ if (jinjaResult.has_media) {
212
+ nativeParams.media_paths = jinjaResult.media_paths;
213
+ }
214
+ }
215
+ else if (formattedResult.type === 'llama-chat') {
216
+ const llamaChatResult = formattedResult;
217
+ nativeParams.prompt = llamaChatResult.prompt || '';
218
+ if (llamaChatResult.has_media) {
219
+ nativeParams.media_paths = llamaChatResult.media_paths;
220
+ }
221
+ }
222
+ }
223
+ else {
224
+ nativeParams.prompt = params.prompt || '';
225
+ }
226
+ // If media_paths were explicitly provided or extracted from messages, use them
227
+ if (!nativeParams.media_paths && params.media_paths) {
228
+ nativeParams.media_paths = params.media_paths;
229
+ }
230
+ if (nativeParams.response_format && !nativeParams.grammar) {
231
+ const jsonSchema = getJsonSchema(params.response_format);
232
+ if (jsonSchema)
233
+ nativeParams.json_schema = JSON.stringify(jsonSchema);
234
+ }
235
+ let tokenListener = callback &&
236
+ LlamaCpp.addListener(EVENT_ON_TOKEN, (evt) => {
237
+ const { contextId, tokenResult } = evt;
238
+ if (contextId !== this.id)
239
+ return;
240
+ callback(tokenResult);
241
+ });
242
+ if (!nativeParams.prompt)
243
+ throw new Error('Prompt is required');
244
+ const promise = LlamaCpp.completion({ contextId: this.id, params: nativeParams });
245
+ return promise
246
+ .then((completionResult) => {
247
+ tokenListener === null || tokenListener === void 0 ? void 0 : tokenListener.remove();
248
+ tokenListener = null;
249
+ return completionResult;
250
+ })
251
+ .catch((err) => {
252
+ tokenListener === null || tokenListener === void 0 ? void 0 : tokenListener.remove();
253
+ tokenListener = null;
254
+ throw err;
255
+ });
256
+ }
257
+ stopCompletion() {
258
+ return LlamaCpp.stopCompletion({ contextId: this.id });
259
+ }
260
+ /**
261
+ * Tokenize text or text with images
262
+ * @param text Text to tokenize
263
+ * @param params.media_paths Array of image paths to tokenize (if multimodal is enabled)
264
+ * @returns Promise resolving to the tokenize result
265
+ */
266
+ tokenize(text, { media_paths: mediaPaths, } = {}) {
267
+ return LlamaCpp.tokenize({ contextId: this.id, text, imagePaths: mediaPaths });
268
+ }
269
+ detokenize(tokens) {
270
+ return LlamaCpp.detokenize({ contextId: this.id, tokens });
271
+ }
272
+ embedding(text, params) {
273
+ return LlamaCpp.embedding({ contextId: this.id, text, params: params || {} });
274
+ }
275
+ /**
276
+ * Rerank documents based on relevance to a query
277
+ * @param query The query text to rank documents against
278
+ * @param documents Array of document texts to rank
279
+ * @param params Optional reranking parameters
280
+ * @returns Promise resolving to an array of ranking results with scores and indices
281
+ */
282
+ async rerank(query, documents, params) {
283
+ const results = await LlamaCpp.rerank({
284
+ contextId: this.id,
285
+ query,
286
+ documents,
287
+ params: params || {}
288
+ });
289
+ // Sort by score descending and add document text if requested
290
+ return results
291
+ .map((result) => (Object.assign(Object.assign({}, result), { document: documents[result.index] })))
292
+ .sort((a, b) => b.score - a.score);
293
+ }
294
+ async bench(pp, tg, pl, nr) {
295
+ const result = await LlamaCpp.bench({ contextId: this.id, pp, tg, pl, nr });
296
+ const [modelDesc, modelSize, modelNParams, ppAvg, ppStd, tgAvg, tgStd] = JSON.parse(result);
297
+ return {
298
+ modelDesc,
299
+ modelSize,
300
+ modelNParams,
301
+ ppAvg,
302
+ ppStd,
303
+ tgAvg,
304
+ tgStd,
305
+ };
306
+ }
307
+ async applyLoraAdapters(loraList) {
308
+ let loraAdapters = [];
309
+ if (loraList)
310
+ loraAdapters = loraList.map((l) => ({
311
+ path: l.path.replace(/file:\/\//, ''),
312
+ scaled: l.scaled,
313
+ }));
314
+ return LlamaCpp.applyLoraAdapters({ contextId: this.id, loraAdapters });
315
+ }
316
+ async removeLoraAdapters() {
317
+ return LlamaCpp.removeLoraAdapters({ contextId: this.id });
318
+ }
319
+ async getLoadedLoraAdapters() {
320
+ return LlamaCpp.getLoadedLoraAdapters({ contextId: this.id });
321
+ }
322
+ /**
323
+ * Initialize multimodal support with a mmproj file
324
+ * @param params Parameters for multimodal support
325
+ * @param params.path Path to the multimodal projector file
326
+ * @param params.use_gpu Whether to use GPU
327
+ * @returns Promise resolving to true if initialization was successful
328
+ */
329
+ async initMultimodal({ path, use_gpu: useGpu, }) {
330
+ if (path.startsWith('file://'))
331
+ path = path.slice(7);
332
+ return LlamaCpp.initMultimodal({
333
+ contextId: this.id,
334
+ params: {
335
+ path,
336
+ use_gpu: useGpu !== null && useGpu !== void 0 ? useGpu : true,
337
+ },
338
+ });
339
+ }
340
+ /**
341
+ * Check if multimodal support is enabled
342
+ * @returns Promise resolving to true if multimodal is enabled
343
+ */
344
+ async isMultimodalEnabled() {
345
+ return await LlamaCpp.isMultimodalEnabled({ contextId: this.id });
346
+ }
347
+ /**
348
+ * Check multimodal support
349
+ * @returns Promise resolving to an object with vision and audio support
350
+ */
351
+ async getMultimodalSupport() {
352
+ return await LlamaCpp.getMultimodalSupport({ contextId: this.id });
353
+ }
354
+ /**
355
+ * Release multimodal support
356
+ * @returns Promise resolving to void
357
+ */
358
+ async releaseMultimodal() {
359
+ return await LlamaCpp.releaseMultimodal({ contextId: this.id });
360
+ }
361
+ /**
362
+ * Initialize TTS support with a vocoder model
363
+ * @param params Parameters for TTS support
364
+ * @param params.path Path to the vocoder model
365
+ * @param params.n_batch Batch size for the vocoder model
366
+ * @returns Promise resolving to true if initialization was successful
367
+ */
368
+ async initVocoder({ path, n_batch: nBatch }) {
369
+ if (path.startsWith('file://'))
370
+ path = path.slice(7);
371
+ return await LlamaCpp.initVocoder({
372
+ contextId: this.id,
373
+ params: { path, n_batch: nBatch }
374
+ });
375
+ }
376
+ /**
377
+ * Check if TTS support is enabled
378
+ * @returns Promise resolving to true if TTS is enabled
379
+ */
380
+ async isVocoderEnabled() {
381
+ return await LlamaCpp.isVocoderEnabled({ contextId: this.id });
382
+ }
383
+ /**
384
+ * Get a formatted audio completion prompt
385
+ * @param speakerJsonStr JSON string representing the speaker
386
+ * @param textToSpeak Text to speak
387
+ * @returns Promise resolving to the formatted audio completion result with prompt and grammar
388
+ */
389
+ async getFormattedAudioCompletion(speaker, textToSpeak) {
390
+ return await LlamaCpp.getFormattedAudioCompletion({
391
+ contextId: this.id,
392
+ speakerJsonStr: speaker ? JSON.stringify(speaker) : '',
393
+ textToSpeak,
394
+ });
395
+ }
396
+ /**
397
+ * Get guide tokens for audio completion
398
+ * @param textToSpeak Text to speak
399
+ * @returns Promise resolving to the guide tokens
400
+ */
401
+ async getAudioCompletionGuideTokens(textToSpeak) {
402
+ return await LlamaCpp.getAudioCompletionGuideTokens({ contextId: this.id, textToSpeak });
403
+ }
404
+ /**
405
+ * Decode audio tokens
406
+ * @param tokens Array of audio tokens
407
+ * @returns Promise resolving to the decoded audio tokens
408
+ */
409
+ async decodeAudioTokens(tokens) {
410
+ return await LlamaCpp.decodeAudioTokens({ contextId: this.id, tokens });
411
+ }
412
+ /**
413
+ * Release TTS support
414
+ * @returns Promise resolving to void
415
+ */
416
+ async releaseVocoder() {
417
+ return await LlamaCpp.releaseVocoder({ contextId: this.id });
418
+ }
419
+ async release() {
420
+ return LlamaCpp.releaseContext({ contextId: this.id });
421
+ }
422
+ }
423
+ export async function toggleNativeLog(enabled) {
424
+ return LlamaCpp.toggleNativeLog({ enabled });
425
+ }
426
+ export function addNativeLogListener(listener) {
427
+ logListeners.push(listener);
428
+ return {
429
+ remove: () => {
430
+ logListeners.splice(logListeners.indexOf(listener), 1);
431
+ },
432
+ };
433
+ }
434
+ export async function setContextLimit(limit) {
435
+ return LlamaCpp.setContextLimit({ limit });
436
+ }
437
+ let contextIdCounter = 0;
438
+ const contextIdRandom = () => process.env.NODE_ENV === 'test' ? 0 : Math.floor(Math.random() * 100000);
439
+ const modelInfoSkip = [
440
+ // Large fields
441
+ 'tokenizer.ggml.tokens',
442
+ 'tokenizer.ggml.token_type',
443
+ 'tokenizer.ggml.merges',
444
+ 'tokenizer.ggml.scores',
445
+ ];
446
+ export async function loadLlamaModelInfo(model) {
447
+ let path = model;
448
+ if (path.startsWith('file://'))
449
+ path = path.slice(7);
450
+ return LlamaCpp.modelInfo({ path, skip: modelInfoSkip });
451
+ }
452
+ const poolTypeMap = {
453
+ // -1 is unspecified as undefined
454
+ none: 0,
455
+ mean: 1,
456
+ cls: 2,
457
+ last: 3,
458
+ rank: 4,
459
+ };
460
+ export async function initLlama(_a, onProgress) {
461
+ var { model, is_model_asset: isModelAsset, pooling_type: poolingType, lora, lora_list: loraList } = _a, rest = __rest(_a, ["model", "is_model_asset", "pooling_type", "lora", "lora_list"]);
462
+ let path = model;
463
+ if (path.startsWith('file://'))
464
+ path = path.slice(7);
465
+ let loraPath = lora;
466
+ if (loraPath === null || loraPath === void 0 ? void 0 : loraPath.startsWith('file://'))
467
+ loraPath = loraPath.slice(7);
468
+ let loraAdapters = [];
469
+ if (loraList)
470
+ loraAdapters = loraList.map((l) => ({
471
+ path: l.path.replace(/file:\/\//, ''),
472
+ scaled: l.scaled,
473
+ }));
474
+ const contextId = contextIdCounter + contextIdRandom();
475
+ contextIdCounter += 1;
476
+ let removeProgressListener = null;
477
+ if (onProgress) {
478
+ removeProgressListener = LlamaCpp.addListener(EVENT_ON_INIT_CONTEXT_PROGRESS, (evt) => {
479
+ if (evt.contextId !== contextId)
480
+ return;
481
+ onProgress(evt.progress);
482
+ });
483
+ }
484
+ const poolType = poolTypeMap[poolingType];
485
+ if (rest.cache_type_k && !validCacheTypes.includes(rest.cache_type_k)) {
486
+ console.warn(`[LlamaCpp] initLlama: Invalid cache K type: ${rest.cache_type_k}, falling back to f16`);
487
+ delete rest.cache_type_k;
488
+ }
489
+ if (rest.cache_type_v && !validCacheTypes.includes(rest.cache_type_v)) {
490
+ console.warn(`[LlamaCpp] initLlama: Invalid cache V type: ${rest.cache_type_v}, falling back to f16`);
491
+ delete rest.cache_type_v;
492
+ }
493
+ const { gpu, reasonNoGPU, model: modelDetails, androidLib, } = await LlamaCpp.initContext({
494
+ contextId,
495
+ params: Object.assign({ model: path, is_model_asset: !!isModelAsset, use_progress_callback: !!onProgress, pooling_type: poolType, lora: loraPath, lora_list: loraAdapters }, rest),
496
+ }).catch((err) => {
497
+ removeProgressListener === null || removeProgressListener === void 0 ? void 0 : removeProgressListener.remove();
498
+ throw err;
499
+ });
500
+ removeProgressListener === null || removeProgressListener === void 0 ? void 0 : removeProgressListener.remove();
501
+ return new LlamaContext({
502
+ contextId,
503
+ gpu,
504
+ reasonNoGPU,
505
+ model: modelDetails,
506
+ androidLib,
507
+ });
508
+ }
509
+ export async function releaseAllLlama() {
510
+ return LlamaCpp.releaseAllContexts();
511
+ }
512
+ export const BuildInfo = {
513
+ number: '1.0.0',
514
+ commit: 'capacitor-llama-cpp',
515
+ };
516
+ // Re-export the plugin for direct access
517
+ export { LlamaCpp };
518
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,iBAAiB,CAAC;AA+BjD,YAAY;AACZ,MAAM,CAAC,MAAM,kCAAkC,GAAG,aAAa,CAAC;AAEhE,cAAc;AACd,MAAM,8BAA8B,GAAG,iCAAiC,CAAC;AACzE,MAAM,cAAc,GAAG,mBAAmB,CAAC;AAC3C,MAAM,mBAAmB,GAAG,uBAAuB,CAAC;AAEpD,sBAAsB;AACtB,MAAM,QAAQ,GAAG,cAAc,CAAiB,UAAU,CAAC,CAAC;AAE5D,2BAA2B;AAC3B,MAAM,YAAY,GAAiD,EAAE,CAAC;AAEtE,6BAA6B;AAC7B,QAAQ,CAAC,WAAW,CAAC,mBAAmB,EAAE,CAAC,GAAoC,EAAE,EAAE;IACjF,YAAY,CAAC,OAAO,CAAC,CAAC,QAAQ,EAAE,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AACpE,CAAC,CAAC,CAAC;AAEH,4CAA4C;AAC5C,kBAAA,QAAQ,aAAR,QAAQ,uBAAR,QAAQ,CAAE,eAAe,+CAAzB,QAAQ,EAAoB,EAAE,OAAO,EAAE,KAAK,EAAE,2CAAG,KAAK,mDAAG,GAAG,EAAE,GAAE,CAAC,EAAE;AAiCnE,MAAM,CAAC,MAAM,iCAAiC,GAAG,kCAAkC,CAAC;AA0BpF,MAAM,eAAe,GAAG;IACtB,KAAK;IACL,KAAK;IACL,MAAM;IACN,MAAM;IACN,MAAM;IACN,MAAM;IACN,QAAQ;IACR,MAAM;IACN,MAAM;CACP,CAAC;AAEF,MAAM,aAAa,GAAG,CAAC,cAAyC,EAAE,EAAE;;IAClE,IAAI,CAAA,cAAc,aAAd,cAAc,uBAAd,cAAc,CAAE,IAAI,MAAK,aAAa,EAAE;QAC1C,aAAO,cAAc,CAAC,WAAW,0CAAE,MAAM,CAAC;KAC3C;IACD,IAAI,CAAA,cAAc,aAAd,cAAc,uBAAd,cAAc,CAAE,IAAI,MAAK,aAAa,EAAE;QAC1C,OAAO,cAAc,CAAC,MAAM,IAAI,EAAE,CAAC;KACpC;IACD,OAAO,IAAI,CAAC;AACd,CAAC,CAAC;AAEF,MAAM,OAAO,YAAY;IAMvB,YAAY,EAAE,SAAS,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAsB;QAJtE,QAAG,GAAY,KAAK,CAAC;QACrB,gBAAW,GAAW,EAAE,CAAC;QAIvB,IAAI,CAAC,EAAE,GAAG,SAAS,CAAC;QACpB,IAAI,CAAC,GAAG,GAAG,GAAG,CAAC;QACf,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;QAC/B,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;IACrB,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,WAAW,CAAC,QAAgB;QAChC,IAAI,IAAI,GAAG,QAAQ,CAAC;QACpB,IAAI,IAAI,CAAC,UAAU,CAAC,SAAS,CAAC;YAAE,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACrD,OAAO,QAAQ,CAAC,WAAW,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;IACtE,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,WAAW,CACf,QAAgB,EAChB,OAA+B;QAE/B,OAAO,QAAQ,CAAC,WAAW,CAAC;YAC1B,SAAS,EAAE,IAAI,CAAC,EAAE;YAClB,QAAQ;YACR,IAAI,EAAE,CAAA,OAAO,aAAP,OAAO,uBAAP,OAAO,CAAE,SAAS,KAAI,CAAC,CAAC;SAC/B,CAAC,CAAC;IACL,CAAC;IAED,oBAAoB;QAClB,OAAO,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC,SAAS,CAAC;IAC9C,CAAC;IAED,gBAAgB;QACd,MAAM,EAAE,KAAK,EAAE,GAAG,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC;QAC3C,OAAO,CAAC,EAAC,KAAK,aAAL,KAAK,uBAAL,KAAK,CAAE,OAAO,CAAA,IAAI,CAAC,EAAC,KAAK,aAAL,KAAK,uBAAL,KAAK,CAAE,OAAO,CAAA,CAAC;IAC9C,CAAC;IAED,KAAK,CAAC,gBAAgB,CACpB,QAAuC,EACvC,QAAwB,EACxB,MAUC;;QAED,MAAM,UAAU,GAAa,EAAE,CAAC;QAChC,MAAM,IAAI,GAAG,QAAQ,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,EAAE;YAChC,IAAI,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE;gBAC9B,MAAM,OAAO,GAAG,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE;;oBACvC,4BAA4B;oBAC5B,IAAI,IAAI,CAAC,IAAI,KAAK,WAAW,EAAE;wBAC7B,IAAI,IAAI,GAAG,OAAA,IAAI,CAAC,SAAS,0CAAE,GAAG,KAAI,EAAE,CAAC;wBACrC,IAAI,IAAI,aAAJ,IAAI,uBAAJ,IAAI,CAAE,UAAU,CAAC,SAAS;4BAAG,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;wBACtD,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;wBACtB,OAAO;4BACL,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,iCAAiC;yBACxC,CAAC;qBACH;yBAAM,IAAI,IAAI,CAAC,IAAI,KAAK,aAAa,EAAE;wBACtC,MAAM,EAAE,WAAW,EAAE,KAAK,EAAE,GAAG,IAAI,CAAC;wBACpC,IAAI,CAAC,KAAK;4BAAE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;wBAEvD,MAAM,EAAE,MAAM,EAAE,GAAG,KAAK,CAAC;wBACzB,IAAI,MAAM,IAAI,KAAK,IAAI,MAAM,IAAI,KAAK,EAAE;4BACtC,MAAM,IAAI,KAAK,CAAC,6BAA6B,MAAM,EAAE,CAAC,CAAC;yBACxD;wBACD,IAAI,KAAK,CAAC,GAAG,EAAE;4BACb,MAAM,IAAI,GAAG,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,WAAW,EAAE,EAAE,CAAC,CAAC;4BAChD,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;yBACvB;6BAAM,IAAI,KAAK,CAAC,IAAI,EAAE;4BACrB,UAAU,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;yBAC7B;wBACD,OAAO;4BACL,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,iCAAiC;yBACxC,CAAC;qBACH;oBACD,OAAO,IAAI,CAAC;gBACd,CAAC,CAAC,CAAC;gBAEH,uCACK,GAAG,KACN,OAAO,IACP;aACH;YACD,OAAO,GAAG,CAAC;QACb,CAAC,CAA6B,CAAC;QAE/B,MAAM,QAAQ,GAAG,IAAI,CAAC,gBAAgB,EAAE,KAAI,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,KAAK,CAAA,CAAC;QAC1D,IAAI,IAAI,CAAC;QACT,IAAI,QAAQ;YAAE,IAAI,GAAG,QAAQ,CAAC,CAAC,4BAA4B;QAC3D,MAAM,UAAU,GAAG,aAAa,CAAC,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,eAAe,CAAC,CAAC;QAE1D,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,gBAAgB,CAAC;YAC7C,SAAS,EAAE,IAAI,CAAC,EAAE;YAClB,QAAQ,EAAE,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC;YAC9B,YAAY,EAAE,IAAI;YAClB,MAAM,EAAE;gBACN,KAAK,EAAE,QAAQ;gBACf,WAAW,EAAE,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,SAAS;gBAChE,KAAK,EAAE,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,KAAK,EAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS;gBAC/D,mBAAmB,EAAE,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,mBAAmB,EAC9C,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,mBAAmB,CAAC;oBAC5C,CAAC,CAAC,SAAS;gBACb,WAAW,EAAE,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,WAAW;gBAChC,eAAe,QAAE,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,eAAe,mCAAI,IAAI;gBAChD,qBAAqB,EAAE,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,qBAAqB;gBACpD,GAAG,EAAE,QAAO,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,GAAG,CAAA,KAAK,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,GAAG;gBAC1E,oBAAoB,EAAE,CAAA,MAAM,aAAN,MAAM,uBAAN,MAAM,CAAE,oBAAoB,EAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CACjE,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,oBAAoB,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,EAAE,EAAE;oBACvE,GAAG,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,0CAA0C;oBAC5E,OAAO,GAAG,CAAC;gBACb,CAAC,EAAE,EAAyB,CAAC,CAC9B,CAAC,CAAC,CAAC,SAAS;aACd;SACF,CAAC,CAAC;QAEH,IAAI,CAAC,QAAQ,EAAE;YACb,OAAO;gBACL,IAAI,EAAE,YAAY;gBAClB,MAAM,EAAE,MAAgB;gBACxB,SAAS,EAAE,UAAU,CAAC,MAAM,GAAG,CAAC;gBAChC,WAAW,EAAE,UAAU;aACxB,CAAC;SACH;QACD,MAAM,WAAW,GAAG,MAAkC,CAAC;QACvD,WAAW,CAAC,IAAI,GAAG,OAAO,CAAC;QAC3B,WAAW,CAAC,SAAS,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;QAC9C,WAAW,CAAC,WAAW,GAAG,UAAU,CAAC;QACrC,OAAO,WAAW,CAAC;IACrB,CAAC;IAED;;;;;;;;;OASG;IACH,KAAK,CAAC,UAAU,CACd,MAAwB,EACxB,QAAoC;QAEpC,MAAM,YAAY,mCACb,MAAM,KACT,MAAM,EAAE,MAAM,CAAC,MAAM,IAAI,EAAE,EAC3B,uBAAuB,EAAE,CAAC,CAAC,QAAQ,GACpC,CAAC;QAEF,IAAI,MAAM,CAAC,QAAQ,EAAE;YACnB,MAAM,eAAe,GAAG,MAAM,IAAI,CAAC,gBAAgB,CACjD,MAAM,CAAC,QAAQ,EACf,MAAM,CAAC,aAAa,IAAI,MAAM,CAAC,YAAY,EAC3C;gBACE,KAAK,EAAE,MAAM,CAAC,KAAK;gBACnB,KAAK,EAAE,MAAM,CAAC,KAAK;gBACnB,mBAAmB,EAAE,MAAM,CAAC,mBAAmB;gBAC/C,WAAW,EAAE,MAAM,CAAC,WAAW;gBAC/B,eAAe,EAAE,MAAM,CAAC,eAAe;gBACvC,qBAAqB,EAAE,MAAM,CAAC,qBAAqB;gBACnD,GAAG,EAAE,MAAM,CAAC,GAAG;gBACf,oBAAoB,EAAE,MAAM,CAAC,oBAAoB;aAClD,CACF,CAAC;YACF,IAAI,eAAe,CAAC,IAAI,KAAK,OAAO,EAAE;gBACpC,MAAM,WAAW,GAAG,eAA2C,CAAC;gBAEhE,YAAY,CAAC,MAAM,GAAG,WAAW,CAAC,MAAM,IAAI,EAAE,CAAC;gBAC/C,IAAI,OAAO,WAAW,CAAC,WAAW,KAAK,QAAQ;oBAC7C,YAAY,CAAC,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;gBACrD,IAAI,WAAW,CAAC,OAAO;oBAAE,YAAY,CAAC,OAAO,GAAG,WAAW,CAAC,OAAO,CAAC;gBACpE,IAAI,OAAO,WAAW,CAAC,YAAY,KAAK,SAAS;oBAC/C,YAAY,CAAC,YAAY,GAAG,WAAW,CAAC,YAAY,CAAC;gBACvD,IAAI,WAAW,CAAC,gBAAgB;oBAC9B,YAAY,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,CAAC;gBAC/D,IAAI,WAAW,CAAC,gBAAgB;oBAC9B,YAAY,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,CAAC;gBAC/D,IAAI,WAAW,CAAC,gBAAgB,EAAE;oBAChC,IAAI,CAAC,YAAY,CAAC,IAAI;wBAAE,YAAY,CAAC,IAAI,GAAG,EAAE,CAAC;oBAC/C,YAAY,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,WAAW,CAAC,gBAAgB,CAAC,CAAC;iBACzD;gBACD,IAAI,WAAW,CAAC,SAAS,EAAE;oBACzB,YAAY,CAAC,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;iBACpD;aACF;iBAAM,IAAI,eAAe,CAAC,IAAI,KAAK,YAAY,EAAE;gBAChD,MAAM,eAAe,GAAG,eAAsC,CAAC;gBAC/D,YAAY,CAAC,MAAM,GAAG,eAAe,CAAC,MAAM,IAAI,EAAE,CAAC;gBACnD,IAAI,eAAe,CAAC,SAAS,EAAE;oBAC7B,YAAY,CAAC,WAAW,GAAG,eAAe,CAAC,WAAW,CAAC;iBACxD;aACF;SACF;aAAM;YACL,YAAY,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,EAAE,CAAC;SAC3C;QAED,+EAA+E;QAC/E,IAAI,CAAC,YAAY,CAAC,WAAW,IAAI,MAAM,CAAC,WAAW,EAAE;YACnD,YAAY,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,CAAC;SAC/C;QAED,IAAI,YAAY,CAAC,eAAe,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE;YACzD,MAAM,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,eAAe,CAAC,CAAC;YACzD,IAAI,UAAU;gBAAE,YAAY,CAAC,WAAW,GAAG,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;SACvE;QAED,IAAI,aAAa,GACf,QAAQ;YACR,QAAQ,CAAC,WAAW,CAAC,cAAc,EAAE,CAAC,GAAqB,EAAE,EAAE;gBAC7D,MAAM,EAAE,SAAS,EAAE,WAAW,EAAE,GAAG,GAAG,CAAC;gBACvC,IAAI,SAAS,KAAK,IAAI,CAAC,EAAE;oBAAE,OAAO;gBAClC,QAAQ,CAAC,WAAW,CAAC,CAAC;YACxB,CAAC,CAAC,CAAC;QAEL,IAAI,CAAC,YAAY,CAAC,MAAM;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAEhE,MAAM,OAAO,GAAG,QAAQ,CAAC,UAAU,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,MAAM,EAAE,YAAY,EAAE,CAAC,CAAC;QAClF,OAAO,OAAO;aACX,IAAI,CAAC,CAAC,gBAAgB,EAAE,EAAE;YACzB,aAAa,aAAb,aAAa,uBAAb,aAAa,CAAE,MAAM,GAAG;YACxB,aAAa,GAAG,IAAI,CAAC;YACrB,OAAO,gBAAgB,CAAC;QAC1B,CAAC,CAAC;aACD,KAAK,CAAC,CAAC,GAAQ,EAAE,EAAE;YAClB,aAAa,aAAb,aAAa,uBAAb,aAAa,CAAE,MAAM,GAAG;YACxB,aAAa,GAAG,IAAI,CAAC;YACrB,MAAM,GAAG,CAAC;QACZ,CAAC,CAAC,CAAC;IACP,CAAC;IAED,cAAc;QACZ,OAAO,QAAQ,CAAC,cAAc,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IACzD,CAAC;IAED;;;;;OAKG;IACH,QAAQ,CACN,IAAY,EACZ,EACE,WAAW,EAAE,UAAU,MAGrB,EAAE;QAEN,OAAO,QAAQ,CAAC,QAAQ,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,IAAI,EAAE,UAAU,EAAE,UAAU,EAAE,CAAC,CAAC;IACjF,CAAC;IAED,UAAU,CAAC,MAAgB;QACzB,OAAO,QAAQ,CAAC,UAAU,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,MAAM,EAAE,CAAC,CAAC;IAC7D,CAAC;IAED,SAAS,CACP,IAAY,EACZ,MAAwB;QAExB,OAAO,QAAQ,CAAC,SAAS,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,IAAI,EAAE,EAAE,CAAC,CAAC;IAChF,CAAC;IAED;;;;;;OAMG;IACH,KAAK,CAAC,MAAM,CACV,KAAa,EACb,SAAmB,EACnB,MAAqB;QAErB,MAAM,OAAO,GAAG,MAAM,QAAQ,CAAC,MAAM,CAAC;YACpC,SAAS,EAAE,IAAI,CAAC,EAAE;YAClB,KAAK;YACL,SAAS;YACT,MAAM,EAAE,MAAM,IAAI,EAAE;SACrB,CAAC,CAAC;QAEH,8DAA8D;QAC9D,OAAO,OAAO;aACX,GAAG,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,iCACZ,MAAM,KACT,QAAQ,EAAE,SAAS,CAAC,MAAM,CAAC,KAAK,CAAC,IACjC,CAAC;aACF,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC;IACvC,CAAC;IAED,KAAK,CAAC,KAAK,CACT,EAAU,EACV,EAAU,EACV,EAAU,EACV,EAAU;QAEV,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,KAAK,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC;QAC5E,MAAM,CAAC,SAAS,EAAE,SAAS,EAAE,YAAY,EAAE,KAAK,EAAE,KAAK,EAAE,KAAK,EAAE,KAAK,CAAC,GACpE,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QACrB,OAAO;YACL,SAAS;YACT,SAAS;YACT,YAAY;YACZ,KAAK;YACL,KAAK;YACL,KAAK;YACL,KAAK;SACN,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,iBAAiB,CACrB,QAAkD;QAElD,IAAI,YAAY,GAA6C,EAAE,CAAC;QAChE,IAAI,QAAQ;YACV,YAAY,GAAG,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;gBAClC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,WAAW,EAAE,EAAE,CAAC;gBACrC,MAAM,EAAE,CAAC,CAAC,MAAM;aACjB,CAAC,CAAC,CAAC;QACN,OAAO,QAAQ,CAAC,iBAAiB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,YAAY,EAAE,CAAC,CAAC;IAC1E,CAAC;IAED,KAAK,CAAC,kBAAkB;QACtB,OAAO,QAAQ,CAAC,kBAAkB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IAC7D,CAAC;IAED,KAAK,CAAC,qBAAqB;QAGzB,OAAO,QAAQ,CAAC,qBAAqB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IAChE,CAAC;IAED;;;;;;OAMG;IACH,KAAK,CAAC,cAAc,CAAC,EACnB,IAAI,EACJ,OAAO,EAAE,MAAM,GAIhB;QACC,IAAI,IAAI,CAAC,UAAU,CAAC,SAAS,CAAC;YAAE,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACrD,OAAO,QAAQ,CAAC,cAAc,CAAC;YAC7B,SAAS,EAAE,IAAI,CAAC,EAAE;YAClB,MAAM,EAAE;gBACN,IAAI;gBACJ,OAAO,EAAE,MAAM,aAAN,MAAM,cAAN,MAAM,GAAI,IAAI;aACxB;SACF,CAAC,CAAC;IACL,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,mBAAmB;QACvB,OAAO,MAAM,QAAQ,CAAC,mBAAmB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IACpE,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,oBAAoB;QAIxB,OAAO,MAAM,QAAQ,CAAC,oBAAoB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IACrE,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,iBAAiB;QACrB,OAAO,MAAM,QAAQ,CAAC,iBAAiB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IAClE,CAAC;IAED;;;;;;OAMG;IACH,KAAK,CAAC,WAAW,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAsC;QAC7E,IAAI,IAAI,CAAC,UAAU,CAAC,SAAS,CAAC;YAAE,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACrD,OAAO,MAAM,QAAQ,CAAC,WAAW,CAAC;YAChC,SAAS,EAAE,IAAI,CAAC,EAAE;YAClB,MAAM,EAAE,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE;SAClC,CAAC,CAAC;IACL,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,gBAAgB;QACpB,OAAO,MAAM,QAAQ,CAAC,gBAAgB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IACjE,CAAC;IAED;;;;;OAKG;IACH,KAAK,CAAC,2BAA2B,CAC/B,OAAsB,EACtB,WAAmB;QAKnB,OAAO,MAAM,QAAQ,CAAC,2BAA2B,CAAC;YAChD,SAAS,EAAE,IAAI,CAAC,EAAE;YAClB,cAAc,EAAE,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;YACtD,WAAW;SACZ,CAAC,CAAC;IACL,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,6BAA6B,CACjC,WAAmB;QAEnB,OAAO,MAAM,QAAQ,CAAC,6BAA6B,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,CAAC;IAC3F,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,iBAAiB,CAAC,MAAgB;QACtC,OAAO,MAAM,QAAQ,CAAC,iBAAiB,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,MAAM,EAAE,CAAC,CAAC;IAC1E,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,cAAc;QAClB,OAAO,MAAM,QAAQ,CAAC,cAAc,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,KAAK,CAAC,OAAO;QACX,OAAO,QAAQ,CAAC,cAAc,CAAC,EAAE,SAAS,EAAE,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;IACzD,CAAC;CACF;AAED,MAAM,CAAC,KAAK,UAAU,eAAe,CAAC,OAAgB;IACpD,OAAO,QAAQ,CAAC,eAAe,CAAC,EAAE,OAAO,EAAE,CAAC,CAAC;AAC/C,CAAC;AAED,MAAM,UAAU,oBAAoB,CAClC,QAA+C;IAE/C,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;IAC5B,OAAO;QACL,MAAM,EAAE,GAAG,EAAE;YACX,YAAY,CAAC,MAAM,CAAC,YAAY,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC;QACzD,CAAC;KACF,CAAC;AACJ,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,eAAe,CAAC,KAAa;IACjD,OAAO,QAAQ,CAAC,eAAe,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;AAC7C,CAAC;AAED,IAAI,gBAAgB,GAAG,CAAC,CAAC;AACzB,MAAM,eAAe,GAAG,GAAG,EAAE,CAC3B,OAAO,CAAC,GAAG,CAAC,QAAQ,KAAK,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,MAAM,CAAC,CAAC;AAE3E,MAAM,aAAa,GAAG;IACpB,eAAe;IACf,uBAAuB;IACvB,2BAA2B;IAC3B,uBAAuB;IACvB,uBAAuB;CACxB,CAAC;AAEF,MAAM,CAAC,KAAK,UAAU,kBAAkB,CAAC,KAAa;IACpD,IAAI,IAAI,GAAG,KAAK,CAAC;IACjB,IAAI,IAAI,CAAC,UAAU,CAAC,SAAS,CAAC;QAAE,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;IACrD,OAAO,QAAQ,CAAC,SAAS,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC,CAAC;AAC3D,CAAC;AAED,MAAM,WAAW,GAAG;IAClB,iCAAiC;IACjC,IAAI,EAAE,CAAC;IACP,IAAI,EAAE,CAAC;IACP,GAAG,EAAE,CAAC;IACN,IAAI,EAAE,CAAC;IACP,IAAI,EAAE,CAAC;CACR,CAAC;AAEF,MAAM,CAAC,KAAK,UAAU,SAAS,CAC7B,EAOgB,EAChB,UAAuC;QARvC,EACE,KAAK,EACL,cAAc,EAAE,YAAY,EAC5B,YAAY,EAAE,WAAW,EACzB,IAAI,EACJ,SAAS,EAAE,QAAQ,OAEL,EADX,IAAI,cANT,gEAOC,CADQ;IAIT,IAAI,IAAI,GAAG,KAAK,CAAC;IACjB,IAAI,IAAI,CAAC,UAAU,CAAC,SAAS,CAAC;QAAE,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;IAErD,IAAI,QAAQ,GAAG,IAAI,CAAC;IACpB,IAAI,QAAQ,aAAR,QAAQ,uBAAR,QAAQ,CAAE,UAAU,CAAC,SAAS;QAAG,QAAQ,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;IAElE,IAAI,YAAY,GAA6C,EAAE,CAAC;IAChE,IAAI,QAAQ;QACV,YAAY,GAAG,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;YAClC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,WAAW,EAAE,EAAE,CAAC;YACrC,MAAM,EAAE,CAAC,CAAC,MAAM;SACjB,CAAC,CAAC,CAAC;IAEN,MAAM,SAAS,GAAG,gBAAgB,GAAG,eAAe,EAAE,CAAC;IACvD,gBAAgB,IAAI,CAAC,CAAC;IAEtB,IAAI,sBAAsB,GAAQ,IAAI,CAAC;IACvC,IAAI,UAAU,EAAE;QACd,sBAAsB,GAAG,QAAQ,CAAC,WAAW,CAC3C,8BAA8B,EAC9B,CAAC,GAA4C,EAAE,EAAE;YAC/C,IAAI,GAAG,CAAC,SAAS,KAAK,SAAS;gBAAE,OAAO;YACxC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;QAC3B,CAAC,CACF,CAAC;KACH;IAED,MAAM,QAAQ,GAAG,WAAW,CAAC,WAAuC,CAAC,CAAC;IAEtE,IAAI,IAAI,CAAC,YAAY,IAAI,CAAC,eAAe,CAAC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,EAAE;QACrE,OAAO,CAAC,IAAI,CAAC,+CAA+C,IAAI,CAAC,YAAY,uBAAuB,CAAC,CAAC;QACtG,OAAO,IAAI,CAAC,YAAY,CAAC;KAC1B;IACD,IAAI,IAAI,CAAC,YAAY,IAAI,CAAC,eAAe,CAAC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,EAAE;QACrE,OAAO,CAAC,IAAI,CAAC,+CAA+C,IAAI,CAAC,YAAY,uBAAuB,CAAC,CAAC;QACtG,OAAO,IAAI,CAAC,YAAY,CAAC;KAC1B;IAED,MAAM,EACJ,GAAG,EACH,WAAW,EACX,KAAK,EAAE,YAAY,EACnB,UAAU,GACX,GAAG,MAAM,QAAQ,CAAC,WAAW,CAAC;QAC7B,SAAS;QACT,MAAM,kBACJ,KAAK,EAAE,IAAI,EACX,cAAc,EAAE,CAAC,CAAC,YAAY,EAC9B,qBAAqB,EAAE,CAAC,CAAC,UAAU,EACnC,YAAY,EAAE,QAAQ,EACtB,IAAI,EAAE,QAAQ,EACd,SAAS,EAAE,YAAY,IACpB,IAAI,CACR;KACF,CAAC,CAAC,KAAK,CAAC,CAAC,GAAQ,EAAE,EAAE;QACpB,sBAAsB,aAAtB,sBAAsB,uBAAtB,sBAAsB,CAAE,MAAM,GAAG;QACjC,MAAM,GAAG,CAAC;IACZ,CAAC,CAAC,CAAC;IACH,sBAAsB,aAAtB,sBAAsB,uBAAtB,sBAAsB,CAAE,MAAM,GAAG;IACjC,OAAO,IAAI,YAAY,CAAC;QACtB,SAAS;QACT,GAAG;QACH,WAAW;QACX,KAAK,EAAE,YAAY;QACnB,UAAU;KACX,CAAC,CAAC;AACL,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,eAAe;IACnC,OAAO,QAAQ,CAAC,kBAAkB,EAAE,CAAC;AACvC,CAAC;AAED,MAAM,CAAC,MAAM,SAAS,GAAG;IACvB,MAAM,EAAE,OAAO;IACf,MAAM,EAAE,qBAAqB;CAC9B,CAAC;AAEF,yCAAyC;AACzC,OAAO,EAAE,QAAQ,EAAE,CAAC","sourcesContent":["import { registerPlugin } from '@capacitor/core';\nimport type {\n NativeContextParams,\n NativeLlamaContext,\n NativeCompletionParams,\n NativeCompletionTokenProb,\n NativeCompletionResult,\n NativeTokenizeResult,\n NativeEmbeddingResult,\n NativeSessionLoadResult,\n NativeEmbeddingParams,\n NativeRerankParams,\n NativeRerankResult,\n NativeCompletionTokenProbItem,\n NativeCompletionResultTimings,\n JinjaFormattedChatResult,\n FormattedChatResult,\n NativeImageProcessingResult,\n NativeLlamaChatMessage,\n LlamaCppMessagePart,\n LlamaCppOAICompatibleMessage,\n ContextParams,\n EmbeddingParams,\n RerankParams,\n RerankResult,\n CompletionResponseFormat,\n CompletionParams,\n BenchResult,\n LlamaCppPlugin,\n} from './definitions';\n\n// Constants\nexport const LLAMACPP_MTMD_DEFAULT_MEDIA_MARKER = '<__media__>';\n\n// Event names\nconst EVENT_ON_INIT_CONTEXT_PROGRESS = '@LlamaCpp_onInitContextProgress';\nconst EVENT_ON_TOKEN = '@LlamaCpp_onToken';\nconst EVENT_ON_NATIVE_LOG = '@LlamaCpp_onNativeLog';\n\n// Register the plugin\nconst LlamaCpp = registerPlugin<LlamaCppPlugin>('LlamaCpp');\n\n// Log listeners management\nconst logListeners: Array<(level: string, text: string) => void> = [];\n\n// Set up native log listener\nLlamaCpp.addListener(EVENT_ON_NATIVE_LOG, (evt: { level: string; text: string }) => {\n logListeners.forEach((listener) => listener(evt.level, evt.text));\n});\n\n// Trigger unset to use default log callback\nLlamaCpp?.toggleNativeLog?.({ enabled: false })?.catch?.(() => {});\n\n// High-level types for the plugin interface\nexport type RNLlamaMessagePart = LlamaCppMessagePart;\nexport type RNLlamaOAICompatibleMessage = LlamaCppOAICompatibleMessage;\n\n// Re-export all types from definitions\nexport type {\n NativeContextParams,\n NativeLlamaContext,\n NativeCompletionParams,\n NativeCompletionTokenProb,\n NativeCompletionResult,\n NativeTokenizeResult,\n NativeEmbeddingResult,\n NativeSessionLoadResult,\n NativeEmbeddingParams,\n NativeRerankParams,\n NativeRerankResult,\n NativeCompletionTokenProbItem,\n NativeCompletionResultTimings,\n FormattedChatResult,\n JinjaFormattedChatResult,\n NativeImageProcessingResult,\n ContextParams,\n EmbeddingParams,\n RerankParams,\n RerankResult,\n CompletionResponseFormat,\n CompletionParams,\n BenchResult,\n};\n\nexport const RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER = LLAMACPP_MTMD_DEFAULT_MEDIA_MARKER;\n\nexport type ToolCall = {\n type: 'function';\n id?: string;\n function: {\n name: string;\n arguments: string; // JSON string\n };\n};\n\nexport type TokenData = {\n token: string;\n completion_probabilities?: Array<NativeCompletionTokenProb>;\n // Parsed content from accumulated text\n content?: string;\n reasoning_content?: string;\n tool_calls?: Array<ToolCall>;\n accumulated_text?: string;\n};\n\ntype TokenNativeEvent = {\n contextId: number;\n tokenResult: TokenData;\n};\n\nconst validCacheTypes = [\n 'f16',\n 'f32',\n 'bf16',\n 'q8_0',\n 'q4_0',\n 'q4_1',\n 'iq4_nl',\n 'q5_0',\n 'q5_1',\n];\n\nconst getJsonSchema = (responseFormat?: CompletionResponseFormat) => {\n if (responseFormat?.type === 'json_schema') {\n return responseFormat.json_schema?.schema;\n }\n if (responseFormat?.type === 'json_object') {\n return responseFormat.schema || {};\n }\n return null;\n};\n\nexport class LlamaContext {\n id: number;\n gpu: boolean = false;\n reasonNoGPU: string = '';\n model: NativeLlamaContext['model'];\n\n constructor({ contextId, gpu, reasonNoGPU, model }: NativeLlamaContext) {\n this.id = contextId;\n this.gpu = gpu;\n this.reasonNoGPU = reasonNoGPU;\n this.model = model;\n }\n\n /**\n * Load cached prompt & completion state from a file.\n */\n async loadSession(filepath: string): Promise<NativeSessionLoadResult> {\n let path = filepath;\n if (path.startsWith('file://')) path = path.slice(7);\n return LlamaCpp.loadSession({ contextId: this.id, filepath: path });\n }\n\n /**\n * Save current cached prompt & completion state to a file.\n */\n async saveSession(\n filepath: string,\n options?: { tokenSize: number },\n ): Promise<number> {\n return LlamaCpp.saveSession({ \n contextId: this.id, \n filepath, \n size: options?.tokenSize || -1 \n });\n }\n\n isLlamaChatSupported(): boolean {\n return !!this.model.chatTemplates.llamaChat;\n }\n\n isJinjaSupported(): boolean {\n const { minja } = this.model.chatTemplates;\n return !!minja?.toolUse || !!minja?.default;\n }\n\n async getFormattedChat(\n messages: RNLlamaOAICompatibleMessage[],\n template?: string | null,\n params?: {\n jinja?: boolean;\n response_format?: CompletionResponseFormat;\n tools?: object;\n parallel_tool_calls?: object;\n tool_choice?: string;\n enable_thinking?: boolean;\n add_generation_prompt?: boolean;\n now?: string | number;\n chat_template_kwargs?: Record<string, string>;\n },\n ): Promise<FormattedChatResult | JinjaFormattedChatResult> {\n const mediaPaths: string[] = [];\n const chat = messages.map((msg) => {\n if (Array.isArray(msg.content)) {\n const content = msg.content.map((part) => {\n // Handle multimodal content\n if (part.type === 'image_url') {\n let path = part.image_url?.url || '';\n if (path?.startsWith('file://')) path = path.slice(7);\n mediaPaths.push(path);\n return {\n type: 'text',\n text: RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER,\n };\n } else if (part.type === 'input_audio') {\n const { input_audio: audio } = part;\n if (!audio) throw new Error('input_audio is required');\n\n const { format } = audio;\n if (format != 'wav' && format != 'mp3') {\n throw new Error(`Unsupported audio format: ${format}`);\n }\n if (audio.url) {\n const path = audio.url.replace(/file:\\/\\//, '');\n mediaPaths.push(path);\n } else if (audio.data) {\n mediaPaths.push(audio.data);\n }\n return {\n type: 'text',\n text: RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER,\n };\n }\n return part;\n });\n\n return {\n ...msg,\n content,\n };\n }\n return msg;\n }) as NativeLlamaChatMessage[];\n\n const useJinja = this.isJinjaSupported() && params?.jinja;\n let tmpl;\n if (template) tmpl = template; // Force replace if provided\n const jsonSchema = getJsonSchema(params?.response_format);\n\n const result = await LlamaCpp.getFormattedChat({\n contextId: this.id,\n messages: JSON.stringify(chat),\n chatTemplate: tmpl,\n params: {\n jinja: useJinja,\n json_schema: jsonSchema ? JSON.stringify(jsonSchema) : undefined,\n tools: params?.tools ? JSON.stringify(params.tools) : undefined,\n parallel_tool_calls: params?.parallel_tool_calls\n ? JSON.stringify(params.parallel_tool_calls)\n : undefined,\n tool_choice: params?.tool_choice,\n enable_thinking: params?.enable_thinking ?? true,\n add_generation_prompt: params?.add_generation_prompt,\n now: typeof params?.now === 'number' ? params.now.toString() : params?.now,\n chat_template_kwargs: params?.chat_template_kwargs ? JSON.stringify(\n Object.entries(params.chat_template_kwargs).reduce((acc, [key, value]) => {\n acc[key] = JSON.stringify(value); // Each value is a stringified JSON object\n return acc;\n }, {} as Record<string, any>)\n ) : undefined,\n },\n });\n\n if (!useJinja) {\n return {\n type: 'llama-chat',\n prompt: result as string,\n has_media: mediaPaths.length > 0,\n media_paths: mediaPaths,\n };\n }\n const jinjaResult = result as JinjaFormattedChatResult;\n jinjaResult.type = 'jinja';\n jinjaResult.has_media = mediaPaths.length > 0;\n jinjaResult.media_paths = mediaPaths;\n return jinjaResult;\n }\n\n /**\n * Generate a completion based on the provided parameters\n * @param params Completion parameters including prompt or messages\n * @param callback Optional callback for token-by-token streaming\n * @returns Promise resolving to the completion result\n *\n * Note: For multimodal support, you can include an media_paths parameter.\n * This will process the images and add them to the context before generating text.\n * Multimodal support must be enabled via initMultimodal() first.\n */\n async completion(\n params: CompletionParams,\n callback?: (data: TokenData) => void,\n ): Promise<NativeCompletionResult> {\n const nativeParams = {\n ...params,\n prompt: params.prompt || '',\n emit_partial_completion: !!callback,\n };\n\n if (params.messages) {\n const formattedResult = await this.getFormattedChat(\n params.messages,\n params.chat_template || params.chatTemplate,\n {\n jinja: params.jinja,\n tools: params.tools,\n parallel_tool_calls: params.parallel_tool_calls,\n tool_choice: params.tool_choice,\n enable_thinking: params.enable_thinking,\n add_generation_prompt: params.add_generation_prompt,\n now: params.now,\n chat_template_kwargs: params.chat_template_kwargs,\n },\n );\n if (formattedResult.type === 'jinja') {\n const jinjaResult = formattedResult as JinjaFormattedChatResult;\n\n nativeParams.prompt = jinjaResult.prompt || '';\n if (typeof jinjaResult.chat_format === 'number')\n nativeParams.chat_format = jinjaResult.chat_format;\n if (jinjaResult.grammar) nativeParams.grammar = jinjaResult.grammar;\n if (typeof jinjaResult.grammar_lazy === 'boolean')\n nativeParams.grammar_lazy = jinjaResult.grammar_lazy;\n if (jinjaResult.grammar_triggers)\n nativeParams.grammar_triggers = jinjaResult.grammar_triggers;\n if (jinjaResult.preserved_tokens)\n nativeParams.preserved_tokens = jinjaResult.preserved_tokens;\n if (jinjaResult.additional_stops) {\n if (!nativeParams.stop) nativeParams.stop = [];\n nativeParams.stop.push(...jinjaResult.additional_stops);\n }\n if (jinjaResult.has_media) {\n nativeParams.media_paths = jinjaResult.media_paths;\n }\n } else if (formattedResult.type === 'llama-chat') {\n const llamaChatResult = formattedResult as FormattedChatResult;\n nativeParams.prompt = llamaChatResult.prompt || '';\n if (llamaChatResult.has_media) {\n nativeParams.media_paths = llamaChatResult.media_paths;\n }\n }\n } else {\n nativeParams.prompt = params.prompt || '';\n }\n\n // If media_paths were explicitly provided or extracted from messages, use them\n if (!nativeParams.media_paths && params.media_paths) {\n nativeParams.media_paths = params.media_paths;\n }\n\n if (nativeParams.response_format && !nativeParams.grammar) {\n const jsonSchema = getJsonSchema(params.response_format);\n if (jsonSchema) nativeParams.json_schema = JSON.stringify(jsonSchema);\n }\n\n let tokenListener: any =\n callback &&\n LlamaCpp.addListener(EVENT_ON_TOKEN, (evt: TokenNativeEvent) => {\n const { contextId, tokenResult } = evt;\n if (contextId !== this.id) return;\n callback(tokenResult);\n });\n\n if (!nativeParams.prompt) throw new Error('Prompt is required');\n\n const promise = LlamaCpp.completion({ contextId: this.id, params: nativeParams });\n return promise\n .then((completionResult) => {\n tokenListener?.remove();\n tokenListener = null;\n return completionResult;\n })\n .catch((err: any) => {\n tokenListener?.remove();\n tokenListener = null;\n throw err;\n });\n }\n\n stopCompletion(): Promise<void> {\n return LlamaCpp.stopCompletion({ contextId: this.id });\n }\n\n /**\n * Tokenize text or text with images\n * @param text Text to tokenize\n * @param params.media_paths Array of image paths to tokenize (if multimodal is enabled)\n * @returns Promise resolving to the tokenize result\n */\n tokenize(\n text: string,\n {\n media_paths: mediaPaths,\n }: {\n media_paths?: string[];\n } = {},\n ): Promise<NativeTokenizeResult> {\n return LlamaCpp.tokenize({ contextId: this.id, text, imagePaths: mediaPaths });\n }\n\n detokenize(tokens: number[]): Promise<string> {\n return LlamaCpp.detokenize({ contextId: this.id, tokens });\n }\n\n embedding(\n text: string,\n params?: EmbeddingParams,\n ): Promise<NativeEmbeddingResult> {\n return LlamaCpp.embedding({ contextId: this.id, text, params: params || {} });\n }\n\n /**\n * Rerank documents based on relevance to a query\n * @param query The query text to rank documents against\n * @param documents Array of document texts to rank\n * @param params Optional reranking parameters\n * @returns Promise resolving to an array of ranking results with scores and indices\n */\n async rerank(\n query: string,\n documents: string[],\n params?: RerankParams,\n ): Promise<RerankResult[]> {\n const results = await LlamaCpp.rerank({ \n contextId: this.id, \n query, \n documents, \n params: params || {} \n });\n\n // Sort by score descending and add document text if requested\n return results\n .map((result) => ({\n ...result,\n document: documents[result.index],\n }))\n .sort((a, b) => b.score - a.score);\n }\n\n async bench(\n pp: number,\n tg: number,\n pl: number,\n nr: number,\n ): Promise<BenchResult> {\n const result = await LlamaCpp.bench({ contextId: this.id, pp, tg, pl, nr });\n const [modelDesc, modelSize, modelNParams, ppAvg, ppStd, tgAvg, tgStd] =\n JSON.parse(result);\n return {\n modelDesc,\n modelSize,\n modelNParams,\n ppAvg,\n ppStd,\n tgAvg,\n tgStd,\n };\n }\n\n async applyLoraAdapters(\n loraList: Array<{ path: string; scaled?: number }>,\n ): Promise<void> {\n let loraAdapters: Array<{ path: string; scaled?: number }> = [];\n if (loraList)\n loraAdapters = loraList.map((l) => ({\n path: l.path.replace(/file:\\/\\//, ''),\n scaled: l.scaled,\n }));\n return LlamaCpp.applyLoraAdapters({ contextId: this.id, loraAdapters });\n }\n\n async removeLoraAdapters(): Promise<void> {\n return LlamaCpp.removeLoraAdapters({ contextId: this.id });\n }\n\n async getLoadedLoraAdapters(): Promise<\n Array<{ path: string; scaled?: number }>\n > {\n return LlamaCpp.getLoadedLoraAdapters({ contextId: this.id });\n }\n\n /**\n * Initialize multimodal support with a mmproj file\n * @param params Parameters for multimodal support\n * @param params.path Path to the multimodal projector file\n * @param params.use_gpu Whether to use GPU\n * @returns Promise resolving to true if initialization was successful\n */\n async initMultimodal({\n path,\n use_gpu: useGpu,\n }: {\n path: string;\n use_gpu?: boolean;\n }): Promise<boolean> {\n if (path.startsWith('file://')) path = path.slice(7);\n return LlamaCpp.initMultimodal({\n contextId: this.id,\n params: {\n path,\n use_gpu: useGpu ?? true,\n },\n });\n }\n\n /**\n * Check if multimodal support is enabled\n * @returns Promise resolving to true if multimodal is enabled\n */\n async isMultimodalEnabled(): Promise<boolean> {\n return await LlamaCpp.isMultimodalEnabled({ contextId: this.id });\n }\n\n /**\n * Check multimodal support\n * @returns Promise resolving to an object with vision and audio support\n */\n async getMultimodalSupport(): Promise<{\n vision: boolean;\n audio: boolean;\n }> {\n return await LlamaCpp.getMultimodalSupport({ contextId: this.id });\n }\n\n /**\n * Release multimodal support\n * @returns Promise resolving to void\n */\n async releaseMultimodal(): Promise<void> {\n return await LlamaCpp.releaseMultimodal({ contextId: this.id });\n }\n\n /**\n * Initialize TTS support with a vocoder model\n * @param params Parameters for TTS support\n * @param params.path Path to the vocoder model\n * @param params.n_batch Batch size for the vocoder model\n * @returns Promise resolving to true if initialization was successful\n */\n async initVocoder({ path, n_batch: nBatch }: { path: string; n_batch?: number }): Promise<boolean> {\n if (path.startsWith('file://')) path = path.slice(7);\n return await LlamaCpp.initVocoder({ \n contextId: this.id, \n params: { path, n_batch: nBatch } \n });\n }\n\n /**\n * Check if TTS support is enabled\n * @returns Promise resolving to true if TTS is enabled\n */\n async isVocoderEnabled(): Promise<boolean> {\n return await LlamaCpp.isVocoderEnabled({ contextId: this.id });\n }\n\n /**\n * Get a formatted audio completion prompt\n * @param speakerJsonStr JSON string representing the speaker\n * @param textToSpeak Text to speak\n * @returns Promise resolving to the formatted audio completion result with prompt and grammar\n */\n async getFormattedAudioCompletion(\n speaker: object | null,\n textToSpeak: string,\n ): Promise<{\n prompt: string;\n grammar?: string;\n }> {\n return await LlamaCpp.getFormattedAudioCompletion({\n contextId: this.id,\n speakerJsonStr: speaker ? JSON.stringify(speaker) : '',\n textToSpeak,\n });\n }\n\n /**\n * Get guide tokens for audio completion\n * @param textToSpeak Text to speak\n * @returns Promise resolving to the guide tokens\n */\n async getAudioCompletionGuideTokens(\n textToSpeak: string,\n ): Promise<Array<number>> {\n return await LlamaCpp.getAudioCompletionGuideTokens({ contextId: this.id, textToSpeak });\n }\n\n /**\n * Decode audio tokens\n * @param tokens Array of audio tokens\n * @returns Promise resolving to the decoded audio tokens\n */\n async decodeAudioTokens(tokens: number[]): Promise<Array<number>> {\n return await LlamaCpp.decodeAudioTokens({ contextId: this.id, tokens });\n }\n\n /**\n * Release TTS support\n * @returns Promise resolving to void\n */\n async releaseVocoder(): Promise<void> {\n return await LlamaCpp.releaseVocoder({ contextId: this.id });\n }\n\n async release(): Promise<void> {\n return LlamaCpp.releaseContext({ contextId: this.id });\n }\n}\n\nexport async function toggleNativeLog(enabled: boolean): Promise<void> {\n return LlamaCpp.toggleNativeLog({ enabled });\n}\n\nexport function addNativeLogListener(\n listener: (level: string, text: string) => void,\n): { remove: () => void } {\n logListeners.push(listener);\n return {\n remove: () => {\n logListeners.splice(logListeners.indexOf(listener), 1);\n },\n };\n}\n\nexport async function setContextLimit(limit: number): Promise<void> {\n return LlamaCpp.setContextLimit({ limit });\n}\n\nlet contextIdCounter = 0;\nconst contextIdRandom = () =>\n process.env.NODE_ENV === 'test' ? 0 : Math.floor(Math.random() * 100000);\n\nconst modelInfoSkip = [\n // Large fields\n 'tokenizer.ggml.tokens',\n 'tokenizer.ggml.token_type',\n 'tokenizer.ggml.merges',\n 'tokenizer.ggml.scores',\n];\n\nexport async function loadLlamaModelInfo(model: string): Promise<Object> {\n let path = model;\n if (path.startsWith('file://')) path = path.slice(7);\n return LlamaCpp.modelInfo({ path, skip: modelInfoSkip });\n}\n\nconst poolTypeMap = {\n // -1 is unspecified as undefined\n none: 0,\n mean: 1,\n cls: 2,\n last: 3,\n rank: 4,\n};\n\nexport async function initLlama(\n {\n model,\n is_model_asset: isModelAsset,\n pooling_type: poolingType,\n lora,\n lora_list: loraList,\n ...rest\n }: ContextParams,\n onProgress?: (progress: number) => void,\n): Promise<LlamaContext> {\n let path = model;\n if (path.startsWith('file://')) path = path.slice(7);\n\n let loraPath = lora;\n if (loraPath?.startsWith('file://')) loraPath = loraPath.slice(7);\n\n let loraAdapters: Array<{ path: string; scaled?: number }> = [];\n if (loraList)\n loraAdapters = loraList.map((l) => ({\n path: l.path.replace(/file:\\/\\//, ''),\n scaled: l.scaled,\n }));\n\n const contextId = contextIdCounter + contextIdRandom();\n contextIdCounter += 1;\n\n let removeProgressListener: any = null;\n if (onProgress) {\n removeProgressListener = LlamaCpp.addListener(\n EVENT_ON_INIT_CONTEXT_PROGRESS,\n (evt: { contextId: number; progress: number }) => {\n if (evt.contextId !== contextId) return;\n onProgress(evt.progress);\n },\n );\n }\n\n const poolType = poolTypeMap[poolingType as keyof typeof poolTypeMap];\n\n if (rest.cache_type_k && !validCacheTypes.includes(rest.cache_type_k)) {\n console.warn(`[LlamaCpp] initLlama: Invalid cache K type: ${rest.cache_type_k}, falling back to f16`);\n delete rest.cache_type_k;\n }\n if (rest.cache_type_v && !validCacheTypes.includes(rest.cache_type_v)) {\n console.warn(`[LlamaCpp] initLlama: Invalid cache V type: ${rest.cache_type_v}, falling back to f16`);\n delete rest.cache_type_v;\n }\n\n const {\n gpu,\n reasonNoGPU,\n model: modelDetails,\n androidLib,\n } = await LlamaCpp.initContext({\n contextId,\n params: {\n model: path,\n is_model_asset: !!isModelAsset,\n use_progress_callback: !!onProgress,\n pooling_type: poolType,\n lora: loraPath,\n lora_list: loraAdapters,\n ...rest,\n },\n }).catch((err: any) => {\n removeProgressListener?.remove();\n throw err;\n });\n removeProgressListener?.remove();\n return new LlamaContext({\n contextId,\n gpu,\n reasonNoGPU,\n model: modelDetails,\n androidLib,\n });\n}\n\nexport async function releaseAllLlama(): Promise<void> {\n return LlamaCpp.releaseAllContexts();\n}\n\nexport const BuildInfo = {\n number: '1.0.0',\n commit: 'capacitor-llama-cpp',\n};\n\n// Re-export the plugin for direct access\nexport { LlamaCpp };\n"]}