xinference 0.12.1__py3-none-any.whl → 0.12.2.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +34 -8
- xinference/client/restful/restful_client.py +4 -0
- xinference/core/event.py +5 -6
- xinference/core/model.py +8 -3
- xinference/core/scheduler.py +13 -3
- xinference/model/llm/llm_family.json +6 -2
- xinference/model/llm/llm_family_modelscope.json +6 -2
- xinference/model/llm/pytorch/chatglm.py +23 -0
- xinference/model/llm/pytorch/core.py +39 -49
- xinference/model/llm/pytorch/glm4v.py +11 -0
- xinference/model/llm/pytorch/internlm2.py +15 -0
- xinference/model/llm/pytorch/utils.py +46 -179
- xinference/model/llm/utils.py +14 -2
- xinference/model/rerank/core.py +35 -6
- xinference/types.py +28 -0
- xinference/web/ui/build/asset-manifest.json +6 -6
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/css/main.4bafd904.css +2 -0
- xinference/web/ui/build/static/css/main.4bafd904.css.map +1 -0
- xinference/web/ui/build/static/js/main.b80d9c08.js +3 -0
- xinference/web/ui/build/static/js/main.b80d9c08.js.map +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/0c2fb5375667931c4a331c99e0d87dc145e8f327cea3f44d6e56f54c7c1d4020.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/131091b25d26b17cdca187d7542a21475c211138d900cf667682260e76ef9463.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/16537795de12c61903b6110c241f62a7855b2d0fc1e7c3d1faa347267f3a6893.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/17b8f071491402d70b146532358b1a612226e5dc7b3e8755a1322d27b4680cee.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/395409bd005e19d48b437c48d88e5126c7865ba9631fe98535333c952e383dc5.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/3da7d55e87882a4af923e187b1351160e34ca102f589086439c15131a227fb6e.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/43991bb67c3136863e6fb37f796466b12eb547a1465408cc77820fddafb3bed3.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/72bcecc71c5267250edeb89608859d449b586f13ff9923a5e70e7172976ec403.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/{15e2cf8cd8d0989719b6349428ff576f9009ff4c2dcc52378be0bd938e82495e.json → 935efd2867664c58230378fdf2ff1ea85e58d853b7214014e20dfbca8dab7b05.json} +1 -1
- xinference/web/ui/node_modules/.cache/babel-loader/a7109d4425e3d94ca2726fc7020fd33bf5030afd4c9cf4bf71e21776cd70646a.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/c2abe75f04ad82fba68f35ed9cbe2e287762c876684fddccccfa73f739489b65.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/f28b83886159d83b84f099b05d607a822dca4dd7f2d8aa6d56fe08bab0b5b086.json +1 -0
- {xinference-0.12.1.dist-info → xinference-0.12.2.post1.dist-info}/METADATA +1 -1
- {xinference-0.12.1.dist-info → xinference-0.12.2.post1.dist-info}/RECORD +41 -40
- xinference/web/ui/build/static/css/main.074e2b31.css +0 -2
- xinference/web/ui/build/static/css/main.074e2b31.css.map +0 -1
- xinference/web/ui/build/static/js/main.a58ff436.js +0 -3
- xinference/web/ui/build/static/js/main.a58ff436.js.map +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/10262a281dec3bc2b185f4385ceb6846626f52d41cb4d46c7c649e719f979d4d.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/2c63e940b945fd5817157e08a42b889b30d668ea4c91332f48ef2b1b9d26f520.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/3c2f277c93c5f1638e08db38df0d0fb4e58d1c5571aea03241a5c04ff4094704.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/4135fe8745434cbce6438d1ebfa47422e0c77d884db4edc75c8bf32ea1d50621.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/4de0a71074f9cbe1e7862750dcdd08cbc1bae7d9d9849a78b1783ca670017b3c.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/762a75a62daf3bec2cfc97ec8612798493fb34ef87087dcad6aad64ab7f14345.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/7f3bdb3a48fa00c046c8b185acd4da6f2e2940a20dbd77f9373d60de3fd6633e.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/9cfd33238ca43e5bf9fc7e442690e8cc6027c73553db36de87e3597ed524ee4b.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/e6eccc9aa641e7da833492e27846dc965f9750281420977dc84654ca6ed221e4.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f2f73bfdc13b12b02c8cbc4769b0b8e6367e9b6d8331c322d94318491a0b3653.json +0 -1
- /xinference/web/ui/build/static/js/{main.a58ff436.js.LICENSE.txt → main.b80d9c08.js.LICENSE.txt} +0 -0
- {xinference-0.12.1.dist-info → xinference-0.12.2.post1.dist-info}/LICENSE +0 -0
- {xinference-0.12.1.dist-info → xinference-0.12.2.post1.dist-info}/WHEEL +0 -0
- {xinference-0.12.1.dist-info → xinference-0.12.2.post1.dist-info}/entry_points.txt +0 -0
- {xinference-0.12.1.dist-info → xinference-0.12.2.post1.dist-info}/top_level.txt +0 -0
|
@@ -17,11 +17,9 @@ import logging
|
|
|
17
17
|
import os
|
|
18
18
|
import time
|
|
19
19
|
import uuid
|
|
20
|
-
from threading import Thread
|
|
21
20
|
from typing import Dict, Iterable, Iterator, List, Optional, Tuple
|
|
22
21
|
|
|
23
22
|
import torch
|
|
24
|
-
from transformers import GenerationConfig, TextIteratorStreamer
|
|
25
23
|
from transformers.cache_utils import DynamicCache
|
|
26
24
|
from transformers.generation.logits_process import (
|
|
27
25
|
LogitsProcessorList,
|
|
@@ -363,179 +361,6 @@ def generate_stream(
|
|
|
363
361
|
empty_cache()
|
|
364
362
|
|
|
365
363
|
|
|
366
|
-
@torch.inference_mode()
|
|
367
|
-
def generate_stream_falcon(
|
|
368
|
-
model_uid,
|
|
369
|
-
model,
|
|
370
|
-
tokenizer,
|
|
371
|
-
prompt,
|
|
372
|
-
device,
|
|
373
|
-
generate_config,
|
|
374
|
-
judge_sent_end=False,
|
|
375
|
-
) -> Iterator[Tuple[CompletionChunk, CompletionUsage]]:
|
|
376
|
-
context_len = get_context_length(model.config)
|
|
377
|
-
stream_interval = generate_config.get("stream_interval", 2)
|
|
378
|
-
stream = generate_config.get("stream", False)
|
|
379
|
-
stream_options = generate_config.pop("stream_options", None)
|
|
380
|
-
include_usage = (
|
|
381
|
-
stream_options["include_usage"] if isinstance(stream_options, dict) else False
|
|
382
|
-
)
|
|
383
|
-
len_prompt = len(prompt)
|
|
384
|
-
|
|
385
|
-
temperature = float(generate_config.get("temperature", 1.0))
|
|
386
|
-
repetition_penalty = float(generate_config.get("repetition_penalty", 1.0))
|
|
387
|
-
top_p = float(generate_config.get("top_p", 1.0))
|
|
388
|
-
top_k = int(generate_config.get("top_k", 50)) # -1 means disable
|
|
389
|
-
max_new_tokens = int(generate_config.get("max_tokens", max_tokens_field.default))
|
|
390
|
-
echo = bool(generate_config.get("echo", False))
|
|
391
|
-
stop_str = generate_config.get("stop", None)
|
|
392
|
-
stop_token_ids = generate_config.get("stop_token_ids", None) or []
|
|
393
|
-
stop_token_ids.append(tokenizer.eos_token_id)
|
|
394
|
-
chunk_id = str(uuid.uuid4())
|
|
395
|
-
|
|
396
|
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
397
|
-
input_ids = inputs["input_ids"]
|
|
398
|
-
attention_mask = inputs["attention_mask"]
|
|
399
|
-
|
|
400
|
-
max_src_len = context_len - max_new_tokens - 8
|
|
401
|
-
|
|
402
|
-
input_ids = input_ids[-max_src_len:] # truncate from the left
|
|
403
|
-
attention_mask = attention_mask[-max_src_len:] # truncate from the left
|
|
404
|
-
input_echo_len = len(input_ids)
|
|
405
|
-
|
|
406
|
-
decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
|
407
|
-
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, **decode_config)
|
|
408
|
-
|
|
409
|
-
generation_config = GenerationConfig(
|
|
410
|
-
max_new_tokens=max_new_tokens,
|
|
411
|
-
do_sample=temperature >= 1e-5,
|
|
412
|
-
temperature=temperature,
|
|
413
|
-
repetition_penalty=repetition_penalty,
|
|
414
|
-
no_repeat_ngram_size=10,
|
|
415
|
-
top_p=top_p,
|
|
416
|
-
top_k=top_k,
|
|
417
|
-
eos_token_id=stop_token_ids,
|
|
418
|
-
)
|
|
419
|
-
|
|
420
|
-
generation_kwargs = dict(
|
|
421
|
-
inputs=input_ids,
|
|
422
|
-
attention_mask=attention_mask,
|
|
423
|
-
streamer=streamer,
|
|
424
|
-
generation_config=generation_config,
|
|
425
|
-
)
|
|
426
|
-
|
|
427
|
-
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
|
428
|
-
thread.start()
|
|
429
|
-
|
|
430
|
-
if echo:
|
|
431
|
-
# means keep the prompt
|
|
432
|
-
output = prompt
|
|
433
|
-
else:
|
|
434
|
-
output = ""
|
|
435
|
-
|
|
436
|
-
last_output_length = 0
|
|
437
|
-
for i, new_text in enumerate(streamer):
|
|
438
|
-
output += new_text
|
|
439
|
-
if i % stream_interval == 0:
|
|
440
|
-
if echo:
|
|
441
|
-
rfind_start = len_prompt
|
|
442
|
-
else:
|
|
443
|
-
rfind_start = 0
|
|
444
|
-
|
|
445
|
-
partially_stopped = False
|
|
446
|
-
if stop_str:
|
|
447
|
-
if isinstance(stop_str, str):
|
|
448
|
-
pos = output.rfind(stop_str, rfind_start)
|
|
449
|
-
if pos != -1:
|
|
450
|
-
output = output[:pos]
|
|
451
|
-
else:
|
|
452
|
-
partially_stopped = is_partial_stop(output, stop_str)
|
|
453
|
-
elif isinstance(stop_str, Iterable):
|
|
454
|
-
for each_stop in stop_str:
|
|
455
|
-
pos = output.rfind(each_stop, rfind_start)
|
|
456
|
-
if pos != -1:
|
|
457
|
-
output = output[:pos]
|
|
458
|
-
break
|
|
459
|
-
else:
|
|
460
|
-
partially_stopped = is_partial_stop(output, each_stop)
|
|
461
|
-
if partially_stopped:
|
|
462
|
-
break
|
|
463
|
-
else:
|
|
464
|
-
raise ValueError("Invalid stop field type.")
|
|
465
|
-
|
|
466
|
-
if stream:
|
|
467
|
-
output = output.strip("�")
|
|
468
|
-
tmp_output_length = len(output)
|
|
469
|
-
output = output[last_output_length:]
|
|
470
|
-
last_output_length = tmp_output_length
|
|
471
|
-
|
|
472
|
-
# prevent yielding partial stop sequence
|
|
473
|
-
if not partially_stopped:
|
|
474
|
-
completion_choice = CompletionChoice(
|
|
475
|
-
text=output, index=0, logprobs=None, finish_reason=None
|
|
476
|
-
)
|
|
477
|
-
completion_chunk = CompletionChunk(
|
|
478
|
-
id=chunk_id,
|
|
479
|
-
object="text_completion",
|
|
480
|
-
created=int(time.time()),
|
|
481
|
-
model=model_uid,
|
|
482
|
-
choices=[completion_choice],
|
|
483
|
-
)
|
|
484
|
-
completion_usage = CompletionUsage(
|
|
485
|
-
prompt_tokens=input_echo_len,
|
|
486
|
-
completion_tokens=i,
|
|
487
|
-
total_tokens=(input_echo_len + i),
|
|
488
|
-
)
|
|
489
|
-
|
|
490
|
-
yield completion_chunk, completion_usage
|
|
491
|
-
output = output.strip()
|
|
492
|
-
|
|
493
|
-
# finish stream event, which contains finish reason
|
|
494
|
-
if i == max_new_tokens - 1:
|
|
495
|
-
finish_reason = "length"
|
|
496
|
-
elif partially_stopped:
|
|
497
|
-
finish_reason = None
|
|
498
|
-
else:
|
|
499
|
-
finish_reason = "stop"
|
|
500
|
-
|
|
501
|
-
completion_choice = CompletionChoice(
|
|
502
|
-
text=output, index=0, logprobs=None, finish_reason=finish_reason
|
|
503
|
-
)
|
|
504
|
-
completion_chunk = CompletionChunk(
|
|
505
|
-
id=chunk_id,
|
|
506
|
-
object="text_completion",
|
|
507
|
-
created=int(time.time()),
|
|
508
|
-
model=model_uid,
|
|
509
|
-
choices=[completion_choice],
|
|
510
|
-
)
|
|
511
|
-
completion_usage = CompletionUsage(
|
|
512
|
-
prompt_tokens=input_echo_len,
|
|
513
|
-
completion_tokens=i,
|
|
514
|
-
total_tokens=(input_echo_len + i),
|
|
515
|
-
)
|
|
516
|
-
|
|
517
|
-
yield completion_chunk, completion_usage
|
|
518
|
-
|
|
519
|
-
if include_usage:
|
|
520
|
-
completion_chunk = CompletionChunk(
|
|
521
|
-
id=chunk_id,
|
|
522
|
-
object="text_completion",
|
|
523
|
-
created=int(time.time()),
|
|
524
|
-
model=model_uid,
|
|
525
|
-
choices=[],
|
|
526
|
-
)
|
|
527
|
-
completion_usage = CompletionUsage(
|
|
528
|
-
prompt_tokens=input_echo_len,
|
|
529
|
-
completion_tokens=i,
|
|
530
|
-
total_tokens=(input_echo_len + i),
|
|
531
|
-
)
|
|
532
|
-
yield completion_chunk, completion_usage
|
|
533
|
-
|
|
534
|
-
# clean
|
|
535
|
-
gc.collect()
|
|
536
|
-
empty_cache()
|
|
537
|
-
|
|
538
|
-
|
|
539
364
|
def _get_token_from_logits(
|
|
540
365
|
req: InferenceRequest, i: int, logits, temperature, repetition_penalty, top_p, top_k
|
|
541
366
|
):
|
|
@@ -570,12 +395,15 @@ def _pad_to_max_length(x: List[int], max_len: int, pad: int) -> List[int]:
|
|
|
570
395
|
return [pad] * (max_len - len(x)) + x
|
|
571
396
|
|
|
572
397
|
|
|
573
|
-
def _pad_seqs_inplace(seqs: List[List[int]], pad: int):
|
|
398
|
+
def _pad_seqs_inplace(seqs: List[List[int]], reqs: List[InferenceRequest], pad: int):
|
|
574
399
|
max_len = max(len(seq) for seq in seqs)
|
|
575
400
|
n = len(seqs)
|
|
576
401
|
i = 0
|
|
577
402
|
while i < n:
|
|
403
|
+
prev_seq_len = len(seqs[i])
|
|
578
404
|
seqs[i] = _pad_to_max_length(seqs[i], max_len, pad)
|
|
405
|
+
padding_len = len(seqs[i]) - prev_seq_len
|
|
406
|
+
reqs[i].padding_len = padding_len
|
|
579
407
|
i += 1
|
|
580
408
|
|
|
581
409
|
|
|
@@ -681,6 +509,25 @@ def _merge_kv_cache(
|
|
|
681
509
|
return ret_kv.to_legacy_cache()
|
|
682
510
|
|
|
683
511
|
|
|
512
|
+
def _get_attention_mask_and_position_ids(kv, reqs: List[InferenceRequest]):
|
|
513
|
+
batch_size, seq_length, device = (
|
|
514
|
+
kv[0][0].shape[0],
|
|
515
|
+
kv[0][0].shape[2],
|
|
516
|
+
kv[0][0].device,
|
|
517
|
+
)
|
|
518
|
+
seq_length = seq_length + 1
|
|
519
|
+
position_ids = torch.as_tensor([[seq_length - 1]], dtype=torch.long, device=device)
|
|
520
|
+
attention_mask = torch.ones(
|
|
521
|
+
(batch_size, seq_length), dtype=torch.long, device=device
|
|
522
|
+
)
|
|
523
|
+
padding_lens = torch.as_tensor([r.padding_len for r in reqs])
|
|
524
|
+
mask = torch.arange(seq_length).expand(
|
|
525
|
+
batch_size, seq_length
|
|
526
|
+
) < padding_lens.unsqueeze(1)
|
|
527
|
+
attention_mask[mask] = 0
|
|
528
|
+
return attention_mask, position_ids
|
|
529
|
+
|
|
530
|
+
|
|
684
531
|
@torch.inference_mode()
|
|
685
532
|
def _batch_inference_one_step_internal(
|
|
686
533
|
req_list: List[InferenceRequest],
|
|
@@ -689,7 +536,9 @@ def _batch_inference_one_step_internal(
|
|
|
689
536
|
tokenizer,
|
|
690
537
|
device,
|
|
691
538
|
context_len: int,
|
|
539
|
+
stop_tokens: Tuple[int],
|
|
692
540
|
decode_round: int = 16,
|
|
541
|
+
require_attention_mask: bool = False,
|
|
693
542
|
bos_flag: str = "<bos_stream>",
|
|
694
543
|
eos_flag: str = "<eos_stream>",
|
|
695
544
|
):
|
|
@@ -699,7 +548,8 @@ def _batch_inference_one_step_internal(
|
|
|
699
548
|
if not valid_req_list:
|
|
700
549
|
return
|
|
701
550
|
generate_config_mapping: Dict[InferenceRequest, Tuple] = {
|
|
702
|
-
r: r.get_generate_configs(tokenizer.eos_token_id)
|
|
551
|
+
r: r.get_generate_configs(tokenizer.eos_token_id, stop_tokens)
|
|
552
|
+
for r in valid_req_list
|
|
703
553
|
}
|
|
704
554
|
s_time = time.time()
|
|
705
555
|
|
|
@@ -721,7 +571,7 @@ def _batch_inference_one_step_internal(
|
|
|
721
571
|
max_src_len = get_max_src_len(context_len, req)
|
|
722
572
|
req.prompt_tokens = input_id[-max_src_len:]
|
|
723
573
|
prompt_tokens.append(req.prompt_tokens)
|
|
724
|
-
_pad_seqs_inplace(prompt_tokens, 0)
|
|
574
|
+
_pad_seqs_inplace(prompt_tokens, valid_req_list, 0)
|
|
725
575
|
out = model(torch.as_tensor(prompt_tokens, device=device), use_cache=True)
|
|
726
576
|
|
|
727
577
|
logits = out.logits
|
|
@@ -763,10 +613,18 @@ def _batch_inference_one_step_internal(
|
|
|
763
613
|
# here, only decode phase, just run some rounds
|
|
764
614
|
for _i in range(decode_round):
|
|
765
615
|
decode_tokens: List[List[int]] = [[r.new_tokens[-1]] for r in valid_req_list]
|
|
616
|
+
inf_kws = {}
|
|
617
|
+
if require_attention_mask:
|
|
618
|
+
attention_mask, position_ids = _get_attention_mask_and_position_ids(
|
|
619
|
+
past_key_values, valid_req_list
|
|
620
|
+
)
|
|
621
|
+
inf_kws["position_ids"] = position_ids
|
|
622
|
+
inf_kws["attention_mask"] = attention_mask
|
|
766
623
|
out = model(
|
|
767
624
|
input_ids=torch.as_tensor(decode_tokens, device=device),
|
|
768
625
|
use_cache=True,
|
|
769
626
|
past_key_values=past_key_values,
|
|
627
|
+
**inf_kws,
|
|
770
628
|
)
|
|
771
629
|
logits = out.logits
|
|
772
630
|
past_key_values = out.past_key_values
|
|
@@ -903,12 +761,21 @@ def batch_inference_one_step(
|
|
|
903
761
|
tokenizer,
|
|
904
762
|
device,
|
|
905
763
|
context_len: int,
|
|
764
|
+
stop_token_ids: Tuple[int],
|
|
765
|
+
require_attention_mask: bool = False,
|
|
906
766
|
):
|
|
907
767
|
from ....core.model import OutOfMemoryError
|
|
908
768
|
|
|
909
769
|
try:
|
|
910
770
|
_batch_inference_one_step_internal(
|
|
911
|
-
req_list,
|
|
771
|
+
req_list,
|
|
772
|
+
model_uid,
|
|
773
|
+
model,
|
|
774
|
+
tokenizer,
|
|
775
|
+
device,
|
|
776
|
+
context_len,
|
|
777
|
+
stop_token_ids,
|
|
778
|
+
require_attention_mask=require_attention_mask,
|
|
912
779
|
)
|
|
913
780
|
except OutOfMemoryError:
|
|
914
781
|
logger.exception(
|
xinference/model/llm/utils.py
CHANGED
|
@@ -661,7 +661,13 @@ Begin!"""
|
|
|
661
661
|
content, func, args = cls._eval_gorilla_openfunctions_arguments(c, tools)
|
|
662
662
|
elif family in ["chatglm3", "glm4-chat"]:
|
|
663
663
|
content, func, args = cls._eval_glm_chat_arguments(c, tools)
|
|
664
|
-
elif family in [
|
|
664
|
+
elif family in [
|
|
665
|
+
"qwen-chat",
|
|
666
|
+
"qwen1.5-chat",
|
|
667
|
+
"qwen1.5-moe-chat",
|
|
668
|
+
"qwen2-instruct",
|
|
669
|
+
"qwen2-moe-instruct",
|
|
670
|
+
]:
|
|
665
671
|
content, func, args = cls._eval_qwen_chat_arguments(c, tools)
|
|
666
672
|
else:
|
|
667
673
|
raise Exception(
|
|
@@ -680,7 +686,13 @@ Begin!"""
|
|
|
680
686
|
returns the part after "\nFinal Answer:" if found, else returns delta.
|
|
681
687
|
"""
|
|
682
688
|
family = model_family.model_family or model_family.model_name
|
|
683
|
-
if family in [
|
|
689
|
+
if family in [
|
|
690
|
+
"qwen-chat",
|
|
691
|
+
"qwen1.5-chat",
|
|
692
|
+
"qwen1.5-moe-chat",
|
|
693
|
+
"qwen2-instruct",
|
|
694
|
+
"qwen2-moe-instruct",
|
|
695
|
+
]:
|
|
684
696
|
# Encapsulating function to reset 'found' after each call
|
|
685
697
|
found = False
|
|
686
698
|
|
xinference/model/rerank/core.py
CHANGED
|
@@ -23,7 +23,7 @@ import numpy as np
|
|
|
23
23
|
|
|
24
24
|
from ...constants import XINFERENCE_CACHE_DIR
|
|
25
25
|
from ...device_utils import empty_cache
|
|
26
|
-
from ...types import Document, DocumentObj, Rerank
|
|
26
|
+
from ...types import Document, DocumentObj, Rerank, RerankTokens
|
|
27
27
|
from ..core import CacheableModelSpec, ModelDescription
|
|
28
28
|
from ..utils import is_model_cached
|
|
29
29
|
|
|
@@ -121,11 +121,17 @@ class RerankModel:
|
|
|
121
121
|
if model_spec.type == "unknown":
|
|
122
122
|
model_spec.type = self._auto_detect_type(model_path)
|
|
123
123
|
|
|
124
|
+
@staticmethod
|
|
125
|
+
def _get_tokenizer(model_path):
|
|
126
|
+
from transformers import AutoTokenizer
|
|
127
|
+
|
|
128
|
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
|
129
|
+
return tokenizer
|
|
130
|
+
|
|
124
131
|
@staticmethod
|
|
125
132
|
def _auto_detect_type(model_path):
|
|
126
133
|
"""This method may not be stable due to the fact that the tokenizer name may be changed.
|
|
127
134
|
Therefore, we only use this method for unknown model types."""
|
|
128
|
-
from transformers import AutoTokenizer
|
|
129
135
|
|
|
130
136
|
type_mapper = {
|
|
131
137
|
"LlamaTokenizerFast": "LLM-based layerwise",
|
|
@@ -133,12 +139,13 @@ class RerankModel:
|
|
|
133
139
|
"XLMRobertaTokenizerFast": "normal",
|
|
134
140
|
}
|
|
135
141
|
|
|
136
|
-
tokenizer =
|
|
142
|
+
tokenizer = RerankModel._get_tokenizer(model_path)
|
|
137
143
|
rerank_type = type_mapper.get(type(tokenizer).__name__)
|
|
138
144
|
if rerank_type is None:
|
|
139
|
-
|
|
140
|
-
f"Can't determine the rerank type based on the tokenizer {tokenizer}"
|
|
145
|
+
logger.warning(
|
|
146
|
+
f"Can't determine the rerank type based on the tokenizer {tokenizer}, use normal type by default."
|
|
141
147
|
)
|
|
148
|
+
return "normal"
|
|
142
149
|
return rerank_type
|
|
143
150
|
|
|
144
151
|
def load(self):
|
|
@@ -185,6 +192,7 @@ class RerankModel:
|
|
|
185
192
|
top_n: Optional[int],
|
|
186
193
|
max_chunks_per_doc: Optional[int],
|
|
187
194
|
return_documents: Optional[bool],
|
|
195
|
+
return_len: Optional[bool],
|
|
188
196
|
**kwargs,
|
|
189
197
|
) -> Rerank:
|
|
190
198
|
self._counter += 1
|
|
@@ -223,7 +231,28 @@ class RerankModel:
|
|
|
223
231
|
)
|
|
224
232
|
for arg in sim_scores_argsort
|
|
225
233
|
]
|
|
226
|
-
|
|
234
|
+
if return_len:
|
|
235
|
+
tokenizer = self._get_tokenizer(self._model_path)
|
|
236
|
+
input_len = sum([len(tokenizer.tokenize(t)) for t in documents])
|
|
237
|
+
|
|
238
|
+
# Rerank Model output is just score or documents
|
|
239
|
+
# while return_documents = True
|
|
240
|
+
output_len = input_len
|
|
241
|
+
|
|
242
|
+
# api_version, billed_units, warnings
|
|
243
|
+
# is for Cohere API compatibility, set to None
|
|
244
|
+
metadata = {
|
|
245
|
+
"api_version": None,
|
|
246
|
+
"billed_units": None,
|
|
247
|
+
"tokens": (
|
|
248
|
+
RerankTokens(input_tokens=input_len, output_tokens=output_len)
|
|
249
|
+
if return_len
|
|
250
|
+
else None
|
|
251
|
+
),
|
|
252
|
+
"warnings": None,
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
return Rerank(id=str(uuid.uuid1()), results=docs, meta=metadata)
|
|
227
256
|
|
|
228
257
|
|
|
229
258
|
def get_cache_dir(model_spec: RerankModelSpec):
|
xinference/types.py
CHANGED
|
@@ -80,9 +80,37 @@ class DocumentObj(TypedDict):
|
|
|
80
80
|
document: Optional[Document]
|
|
81
81
|
|
|
82
82
|
|
|
83
|
+
# Cohere API compatibility
|
|
84
|
+
class ApiVersion(TypedDict):
|
|
85
|
+
version: str
|
|
86
|
+
is_deprecated: bool
|
|
87
|
+
is_experimental: bool
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# Cohere API compatibility
|
|
91
|
+
class BilledUnit(TypedDict):
|
|
92
|
+
input_tokens: int
|
|
93
|
+
output_tokens: int
|
|
94
|
+
search_units: int
|
|
95
|
+
classifications: int
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class RerankTokens(TypedDict):
|
|
99
|
+
input_tokens: int
|
|
100
|
+
output_tokens: int
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class Meta(TypedDict):
|
|
104
|
+
api_version: Optional[ApiVersion]
|
|
105
|
+
billed_units: Optional[BilledUnit]
|
|
106
|
+
tokens: RerankTokens
|
|
107
|
+
warnings: Optional[List[str]]
|
|
108
|
+
|
|
109
|
+
|
|
83
110
|
class Rerank(TypedDict):
|
|
84
111
|
id: str
|
|
85
112
|
results: List[DocumentObj]
|
|
113
|
+
meta: Meta
|
|
86
114
|
|
|
87
115
|
|
|
88
116
|
class CompletionLogprobs(TypedDict):
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"files": {
|
|
3
|
-
"main.css": "./static/css/main.
|
|
4
|
-
"main.js": "./static/js/main.
|
|
3
|
+
"main.css": "./static/css/main.4bafd904.css",
|
|
4
|
+
"main.js": "./static/js/main.b80d9c08.js",
|
|
5
5
|
"static/media/icon.webp": "./static/media/icon.4603d52c63041e5dfbfd.webp",
|
|
6
6
|
"index.html": "./index.html",
|
|
7
|
-
"main.
|
|
8
|
-
"main.
|
|
7
|
+
"main.4bafd904.css.map": "./static/css/main.4bafd904.css.map",
|
|
8
|
+
"main.b80d9c08.js.map": "./static/js/main.b80d9c08.js.map"
|
|
9
9
|
},
|
|
10
10
|
"entrypoints": [
|
|
11
|
-
"static/css/main.
|
|
12
|
-
"static/js/main.
|
|
11
|
+
"static/css/main.4bafd904.css",
|
|
12
|
+
"static/js/main.b80d9c08.js"
|
|
13
13
|
]
|
|
14
14
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>Xinference</title><script defer="defer" src="./static/js/main.
|
|
1
|
+
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>Xinference</title><script defer="defer" src="./static/js/main.b80d9c08.js"></script><link href="./static/css/main.4bafd904.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
.container{cursor:pointer;display:block}.container,.descriptionCard{border-radius:20px;height:300px;position:relative;width:300px}.descriptionCard{left:-1px;padding:20px;top:-1px}.cardTitle{display:flex;justify-content:space-between}.iconButtonBox{align-items:center;display:flex}.drawerCard{min-height:100%;padding:20px 80px 0;position:relative;width:60vw}.p{-webkit-line-clamp:4;-webkit-box-orient:vertical;display:-webkit-box;font-size:14px;overflow:hidden;padding:0 10px;text-overflow:ellipsis;word-break:break-word}.formContainer{height:80%;overflow:scroll;padding:0 10px}.buttonsContainer{align-items:center;bottom:50px;display:flex;justify-content:space-between;left:100px;position:absolute;right:100px}.buttonContainer{background-color:initial;border-width:0;width:45%}.buttonItem{border:1px solid #e5e7eb;border-radius:4px;padding:5px;width:100%}.instructionText{color:#666;font-size:12px;font-style:italic;margin:30px 0;text-align:center}.iconRow{bottom:20px;justify-content:space-between;left:20px;position:absolute;right:20px}.iconItem,.iconRow{align-items:center;display:flex}.iconItem{flex-direction:column;margin:20px}.boldIconText{font-size:1.2em;font-weight:700}.muiIcon{font-size:1.5em}.smallText{font-size:.8em}.dialogBox{background-color:#fff;height:607px;margin:32px;overflow-x:scroll;width:1241px}.dialogTitle{color:#000;display:flex;justify-content:space-between;padding:20px 20px 7px}.dialogTitle-model_name{font-size:18px;font-weight:700}.pathBox{cursor:pointer;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;width:160px}.pathBox2{width:300px}.empty{color:#555;font-size:20px;left:50%;position:absolute;top:30%;-webkit-transform:translate(-50%);transform:translate(-50%)}.deleteDialog{align-items:center;display:flex}.warningIcon{color:#ed6c02;margin-right:10px}.jsonDialog{background-color:#fff;border-radius:8px;color:#000;display:flex;flex-direction:column;padding:10px 30px}.jsonDialog-title{align-items:center;display:flex;justify-content:space-between;margin:10px 0 20px}.title-name{font-size:16px;font-weight:700}.main-box{height:500px;width:700px}.textarea-box{border:1px solid #ddd;border-radius:5px;color:#444;height:100%;padding:5px 10px;resize:none;width:100%}.but-box{display:flex;justify-content:end;margin-top:20px}.copyText{color:#666;cursor:pointer;font-size:14px!important}.copyText:hover{color:#1976d2}.formBox{max-height:80vh;max-width:50vw;min-width:50vw;overflow:auto;padding:40px 20px 0 0;position:relative;transition:all .4s ease-in-out}.broaden{max-width:100%;min-width:100%;padding-right:0}.show-json{align-items:center;color:#444;display:flex;position:fixed;right:60px;top:90px}.icon{cursor:pointer;margin-left:20px;position:absolute;right:-40px}.icon:hover{color:#1976d2}.arrow{font-size:24px!important}.jsonBox{min-height:80vh;position:relative;transition:all .4s ease-in-out;width:100%}.hide{overflow:hidden;-webkit-transform:translate(30vw);transform:translate(30vw);width:0}.checkboxWrapper{align-items:center;display:flex;flex-wrap:wrap;width:100%}.jsonBox-header{align-items:center;display:flex;justify-content:space-between}.jsonBox-title{font-weight:700;line-height:40px}.textarea{border:1px solid #ddd;border-radius:5px;color:#444;height:calc(100% - 40px);padding:5px 10px;resize:none;width:100%}.addBtn{margin-left:20px!important}.item{background-color:#eee;border-radius:10px;margin:10px 50px 0;overflow:hidden;padding:20px;position:relative}.item:hover .deleteBtn{-webkit-transform:translateX(-50px);transform:translateX(-50px)}.deleteBtn{background-color:#1976d2;border-radius:25px;height:50px;line-height:70px;position:absolute;right:20px;text-align:center;top:calc(50% - 25px);-webkit-transform:translateX(80px);transform:translateX(80px);transition:all .3s ease-in-out;width:50px}.deleteBtn:hover{box-shadow:0 0 10px #aaa;cursor:pointer}.deleteIcon{color:#fff;font-size:28px!important}
|
|
2
|
+
/*# sourceMappingURL=main.4bafd904.css.map*/
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"static/css/main.4bafd904.css","mappings":"AAAA,WAKE,cAAe,CAJf,aAMF,CACA,4BAFE,kBAAmB,CAFnB,YAAa,CAFb,iBAAkB,CAClB,WAaF,CARA,iBAGE,SAAU,CAGV,YAAa,CAJb,QAMF,CACA,WACE,YAAa,CACb,6BACF,CACA,eAEE,kBAAmB,CADnB,YAEF,CACA,YAGE,eAAgB,CADhB,mBAAoB,CADpB,iBAAkB,CAGlB,UACF,CACA,GAEE,oBAAqB,CACrB,2BAA4B,CAF5B,mBAAoB,CAMpB,cAAe,CAHf,eAAgB,CAIhB,cAAiB,CAHjB,sBAAuB,CACvB,qBAGF,CACA,eACE,UAAW,CACX,eAAgB,CAChB,cACF,CACA,kBAOE,kBAAmB,CALnB,WAAY,CAGZ,YAAa,CACb,6BAA8B,CAH9B,UAAW,CAFX,iBAAkB,CAGlB,WAIF,CACA,iBAGE,wBAA6B,CAD7B,cAAiB,CADjB,SAGF,CACA,YAME,wBAAqB,CAHrB,iBAAkB,CADlB,WAAY,CADZ,UAMF,CACA,iBAEE,UAAc,CADd,cAAe,CAEf,iBAAkB,CAClB,aAAc,CACd,iBACF,CACA,SAEE,WAAY,CAIZ,6BAA8B,CAH9B,SAAU,CAFV,iBAAkB,CAGlB,UAIF,CACA,mBAFE,kBAAmB,CAFnB,YASF,CALA,UAEE,qBAAsB,CAEtB,WACF,CACA,cAEE,eAAgB,CADhB,eAEF,CACA,SACE,eACF,CACA,WACE,cACF,CACA,WAGE,qBAAsB,CADtB,YAAa,CAEb,WAAY,CACZ,iBAAkB,CAJlB,YAKF,CACA,aAIE,UAAW,CAHX,YAAa,CACb,6BAA8B,CAC9B,qBAEF,CACA,wBACE,cAAe,CACf,eACF,CACA,SAEE,cAAe,CACf,eAAgB,CAEhB,sBAAuB,CADvB,kBAAmB,CAHnB,WAKF,CACA,UACE,WACF,CACA,OAKE,UAAW,CADX,cAAe,CAFf,QAAS,CADT,iBAAkB,CAElB,OAAQ,CAGR,iCAA6B,CAA7B,yBACF,CACA,cAEE,kBAAmB,CADnB,YAEF,CACA,aAEE,aAAuB,CADvB,iBAEF,CACA,YAIE,qBAAsB,CAEtB,iBAAkB,CADlB,UAAW,CAJX,YAAa,CACb,qBAAsB,CACtB,iBAIF,CACA,kBAGE,kBAAmB,CAFnB,YAAa,CACb,6BAA8B,CAE9B,kBACF,CACA,YACE,cAAe,CACf,eACF,CACA,UAEE,YAAa,CADb,WAEF,CACA,cAIE,qBAAsB,CACtB,iBAAkB,CAElB,UAAW,CALX,WAAY,CACZ,gBAAiB,CAGjB,WAAY,CALZ,UAOF,CACA,SACE,YAAa,CACb,mBAAoB,CACpB,eACF,CClLA,UAEE,UAAW,CACX,cAAe,CAFf,wBAGF,CAEA,gBACE,aACF,CCRA,SAIE,eAAgB,CAFhB,cAAe,CACf,cAAe,CAEf,aAAc,CACd,qBAAsB,CALtB,iBAAkB,CAMlB,8BACF,CAEA,SACE,cAAe,CACf,cAAe,CACf,eACF,CAEA,WAEE,kBAAmB,CAInB,UAAW,CALX,YAAa,CAEb,cAAe,CAEf,UAAW,CADX,QAGF,CAEA,MAGE,cAAe,CACf,gBAAiB,CAHjB,iBAAkB,CAClB,WAGF,CAEA,YACE,aACF,CAEA,OACE,wBACF,CAEA,SAEE,eAAgB,CADhB,iBAAkB,CAGlB,8BAAgC,CADhC,UAEF,CAEA,MAGE,eAAgB,CADhB,iCAA6B,CAA7B,yBAA6B,CAD7B,OAGF,CAEA,iBAGE,kBAAmB,CAFnB,YAAa,CACb,cAAe,CAEf,UACF,CAEA,gBAGE,kBAAmB,CAFnB,YAAa,CACb,6BAEF,CAEA,eAEE,eAAgB,CADhB,gBAEF,CAEA,UAIE,qBAAsB,CACtB,iBAAkB,CAElB,UAAW,CALX,wBAAyB,CACzB,gBAAiB,CAGjB,WAAY,CALZ,UAOF,CAEA,QACE,0BACF,CAEA,MAEE,qBAAsB,CAGtB,kBAAmB,CAFnB,kBAAmB,CAGnB,eAAgB,CAFhB,YAAa,CAHb,iBAMF,CAEA,uBACE,mCAA4B,CAA5B,2BACF,CAEA,WAUE,wBAAyB,CADzB,kBAAmB,CAJnB,WAAY,CAGZ,gBAAiB,CAPjB,iBAAkB,CAClB,UAAW,CAKX,iBAAkB,CAJlB,oBAAqB,CAGrB,kCAA2B,CAA3B,0BAA2B,CAK3B,8BAAgC,CAPhC,UAQF,CAEA,iBAEE,wBAAyB,CADzB,cAEF,CAEA,YAEE,UAAW,CADX,wBAEF","sources":["scenes/launch_model/styles/modelCardStyle.css","components/copyComponent/style.css","scenes/register_model/styles/registerModelStyle.css"],"sourcesContent":[".container {\n display: block;\n position: relative;\n width: 300px;\n height: 300px;\n cursor: pointer;\n border-radius: 20px;\n}\n.descriptionCard {\n position: relative;\n top: -1px;\n left: -1px;\n width: 300px;\n height: 300px;\n padding: 20px;\n border-radius: 20px;\n}\n.cardTitle {\n display: flex;\n justify-content: space-between;\n}\n.iconButtonBox {\n display: flex;\n align-items: center;\n}\n.drawerCard {\n position: relative;\n padding: 20px 80px 0;\n min-height: 100%;\n width: 60vw;\n}\n.p {\n display: -webkit-box;\n -webkit-line-clamp: 4;\n -webkit-box-orient: vertical;\n overflow: hidden;\n text-overflow: ellipsis;\n word-break: break-word;\n font-size: 14px;\n padding: 0px 10px;\n}\n.formContainer {\n height: 80%;\n overflow: scroll;\n padding: 0 10px;\n}\n.buttonsContainer {\n position: absolute;\n bottom: 50px;\n left: 100px;\n right: 100px;\n display: flex;\n justify-content: space-between;\n align-items: center;\n}\n.buttonContainer {\n width: 45%;\n border-width: 0px;\n background-color: transparent;\n}\n.buttonItem {\n width: 100%;\n padding: 5px;\n border-radius: 4px;\n border: 1px solid #e5e7eb;\n border-width: 1px;\n border-color: #e5e7eb;\n}\n.instructionText {\n font-size: 12px;\n color: #666666;\n font-style: italic;\n margin: 30px 0;\n text-align: center;\n}\n.iconRow {\n position: absolute;\n bottom: 20px;\n left: 20px;\n right: 20px;\n display: flex;\n justify-content: space-between;\n align-items: center;\n}\n.iconItem {\n display: flex;\n flex-direction: column;\n align-items: center;\n margin: 20px;\n}\n.boldIconText {\n font-weight: bold;\n font-size: 1.2em;\n}\n.muiIcon {\n font-size: 1.5em;\n}\n.smallText {\n font-size: 0.8em;\n}\n.dialogBox {\n width: 1241px;\n height: 607px;\n background-color: #fff;\n margin: 32px;\n overflow-x: scroll;\n}\n.dialogTitle {\n display: flex;\n justify-content: space-between;\n padding: 20px 20px 7px;\n color: #000;\n}\n.dialogTitle-model_name {\n font-size: 18px;\n font-weight: 700;\n}\n.pathBox {\n width: 160px;\n cursor: pointer;\n overflow: hidden;\n white-space: nowrap;\n text-overflow: ellipsis;\n}\n.pathBox2 {\n width: 300px;\n}\n.empty {\n position: absolute;\n left: 50%;\n top: 30%;\n font-size: 20px;\n color: #555;\n transform: translate(-50%, 0);\n}\n.deleteDialog {\n display: flex;\n align-items: center;\n}\n.warningIcon {\n margin-right: 10px;\n color: rgb(237, 108, 2);\n}\n.jsonDialog {\n display: flex;\n flex-direction: column;\n padding: 10px 30px;\n background-color: #fff;\n color: #000;\n border-radius: 8px;\n}\n.jsonDialog-title {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin: 10px 0 20px 0;\n}\n.title-name {\n font-size: 16px;\n font-weight: 700;\n}\n.main-box {\n width: 700px;\n height: 500px;\n}\n.textarea-box {\n width: 100%;\n height: 100%;\n padding: 5px 10px;\n border: 1px solid #ddd;\n border-radius: 5px;\n resize: none;\n color: #444;\n}\n.but-box {\n display: flex;\n justify-content: end;\n margin-top: 20px;\n}\n",".copyText {\n font-size: 14px !important;\n color: #666;\n cursor: pointer;\n}\n\n.copyText:hover {\n color: #1976d2;\n}\n",".formBox {\n position: relative;\n max-width: 50vw;\n min-width: 50vw;\n max-height: 80vh;\n overflow: auto;\n padding: 40px 20px 0 0;\n transition: all 0.4s ease-in-out;\n}\n\n.broaden {\n max-width: 100%;\n min-width: 100%;\n padding-right: 0;\n}\n\n.show-json {\n display: flex;\n align-items: center;\n position: fixed;\n top: 90px;\n right: 60px;\n color: #444;\n}\n\n.icon {\n position: absolute;\n right: -40px;\n cursor: pointer;\n margin-left: 20px;\n}\n\n.icon:hover {\n color: #1976d2;\n}\n\n.arrow {\n font-size: 24px !important;\n}\n\n.jsonBox {\n position: relative;\n min-height: 80vh;\n width: 100%;\n transition: all 0.4s ease-in-out;\n}\n\n.hide {\n width: 0;\n transform: translate(30vw, 0);\n overflow: hidden;\n}\n\n.checkboxWrapper {\n display: flex;\n flex-wrap: wrap;\n align-items: center;\n width: 100%;\n}\n\n.jsonBox-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n}\n\n.jsonBox-title {\n line-height: 40px;\n font-weight: 700;\n}\n\n.textarea {\n width: 100%;\n height: calc(100% - 40px);\n padding: 5px 10px;\n border: 1px solid #ddd;\n border-radius: 5px;\n resize: none;\n color: #444;\n}\n\n.addBtn {\n margin-left: 20px !important;\n}\n\n.item {\n position: relative;\n background-color: #eee;\n margin: 10px 50px 0;\n padding: 20px;\n border-radius: 10px;\n overflow: hidden;\n}\n\n.item:hover .deleteBtn {\n transform: translateX(-50px);\n}\n\n.deleteBtn {\n position: absolute;\n right: 20px;\n top: calc(50% - 25px);\n width: 50px;\n height: 50px;\n transform: translateX(80px);\n text-align: center;\n line-height: 70px;\n border-radius: 25px;\n background-color: #1976d2;\n transition: all 0.3s ease-in-out;\n}\n\n.deleteBtn:hover {\n cursor: pointer;\n box-shadow: 0 0 10px #aaa;\n}\n\n.deleteIcon {\n font-size: 28px !important;\n color: #fff;\n}\n"],"names":[],"sourceRoot":""}
|