xinference 0.14.1.post1__py3-none-any.whl → 0.14.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +15 -34
- xinference/client/restful/restful_client.py +2 -2
- xinference/core/chat_interface.py +45 -10
- xinference/core/image_interface.py +9 -0
- xinference/core/model.py +8 -5
- xinference/core/scheduler.py +1 -2
- xinference/core/worker.py +49 -42
- xinference/deploy/cmdline.py +2 -2
- xinference/deploy/test/test_cmdline.py +7 -7
- xinference/model/audio/chattts.py +24 -9
- xinference/model/audio/core.py +8 -2
- xinference/model/audio/fish_speech.py +228 -0
- xinference/model/audio/model_spec.json +8 -0
- xinference/model/embedding/core.py +23 -1
- xinference/model/image/model_spec.json +2 -1
- xinference/model/image/model_spec_modelscope.json +2 -1
- xinference/model/image/stable_diffusion/core.py +49 -1
- xinference/model/llm/__init__.py +26 -27
- xinference/model/llm/{ggml/llamacpp.py → llama_cpp/core.py} +2 -35
- xinference/model/llm/llm_family.json +606 -1266
- xinference/model/llm/llm_family.py +16 -139
- xinference/model/llm/llm_family_modelscope.json +276 -313
- xinference/model/llm/lmdeploy/__init__.py +0 -0
- xinference/model/llm/lmdeploy/core.py +557 -0
- xinference/model/llm/memory.py +9 -9
- xinference/model/llm/sglang/core.py +2 -2
- xinference/model/llm/{pytorch → transformers}/chatglm.py +6 -13
- xinference/model/llm/{pytorch → transformers}/cogvlm2.py +4 -45
- xinference/model/llm/transformers/cogvlm2_video.py +524 -0
- xinference/model/llm/{pytorch → transformers}/core.py +3 -10
- xinference/model/llm/{pytorch → transformers}/glm4v.py +2 -23
- xinference/model/llm/transformers/intern_vl.py +540 -0
- xinference/model/llm/{pytorch → transformers}/internlm2.py +4 -8
- xinference/model/llm/{pytorch → transformers}/minicpmv25.py +2 -23
- xinference/model/llm/{pytorch → transformers}/minicpmv26.py +66 -41
- xinference/model/llm/{pytorch → transformers}/utils.py +1 -2
- xinference/model/llm/{pytorch → transformers}/yi_vl.py +2 -24
- xinference/model/llm/utils.py +85 -70
- xinference/model/llm/vllm/core.py +110 -11
- xinference/model/utils.py +1 -95
- xinference/thirdparty/fish_speech/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/callbacks/__init__.py +3 -0
- xinference/thirdparty/fish_speech/fish_speech/callbacks/grad_norm.py +113 -0
- xinference/thirdparty/fish_speech/fish_speech/configs/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/configs/lora/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/conversation.py +2 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/concat_repeat.py +53 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/text_data_pb2.py +33 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/text_data_stream.py +36 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/semantic.py +496 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/vqgan.py +147 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/__init__.py +3 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/core.py +40 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/en_US.json +122 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/es_ES.json +122 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/ja_JP.json +123 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/pt_BR.json +133 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/zh_CN.json +122 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/scan.py +122 -0
- xinference/thirdparty/fish_speech/fish_speech/models/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/lit_module.py +202 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/llama.py +779 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/lora.py +92 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/__init__.py +3 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/lit_module.py +442 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/discriminator.py +44 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/firefly.py +625 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/fsq.py +139 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/reference.py +115 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/wavenet.py +225 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/utils.py +94 -0
- xinference/thirdparty/fish_speech/fish_speech/scheduler.py +40 -0
- xinference/thirdparty/fish_speech/fish_speech/text/__init__.py +4 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/basic_class.py +172 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/basic_constant.py +30 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/basic_util.py +342 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/cardinal.py +32 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/date.py +75 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/digit.py +32 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/fraction.py +35 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/money.py +43 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/percentage.py +33 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/telephone.py +51 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/text.py +177 -0
- xinference/thirdparty/fish_speech/fish_speech/text/clean.py +69 -0
- xinference/thirdparty/fish_speech/fish_speech/text/spliter.py +130 -0
- xinference/thirdparty/fish_speech/fish_speech/train.py +139 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/__init__.py +23 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/braceexpand.py +217 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/context.py +13 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/file.py +16 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/instantiators.py +50 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/logger.py +55 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/logging_utils.py +48 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/rich_utils.py +100 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/spectrogram.py +122 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/utils.py +114 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/launch_utils.py +120 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/manage.py +1237 -0
- xinference/thirdparty/fish_speech/tools/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/api.py +495 -0
- xinference/thirdparty/fish_speech/tools/auto_rerank.py +159 -0
- xinference/thirdparty/fish_speech/tools/download_models.py +55 -0
- xinference/thirdparty/fish_speech/tools/extract_model.py +21 -0
- xinference/thirdparty/fish_speech/tools/file.py +108 -0
- xinference/thirdparty/fish_speech/tools/gen_ref.py +36 -0
- xinference/thirdparty/fish_speech/tools/llama/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/llama/build_dataset.py +169 -0
- xinference/thirdparty/fish_speech/tools/llama/eval_in_context.py +171 -0
- xinference/thirdparty/fish_speech/tools/llama/generate.py +698 -0
- xinference/thirdparty/fish_speech/tools/llama/merge_lora.py +95 -0
- xinference/thirdparty/fish_speech/tools/llama/quantize.py +497 -0
- xinference/thirdparty/fish_speech/tools/llama/rebuild_tokenizer.py +57 -0
- xinference/thirdparty/fish_speech/tools/merge_asr_files.py +55 -0
- xinference/thirdparty/fish_speech/tools/post_api.py +164 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/auto_model.py +573 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/fun_asr.py +332 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/vad_utils.py +61 -0
- xinference/thirdparty/fish_speech/tools/smart_pad.py +47 -0
- xinference/thirdparty/fish_speech/tools/vqgan/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/vqgan/create_train_split.py +83 -0
- xinference/thirdparty/fish_speech/tools/vqgan/extract_vq.py +227 -0
- xinference/thirdparty/fish_speech/tools/vqgan/inference.py +120 -0
- xinference/thirdparty/fish_speech/tools/webui.py +619 -0
- xinference/thirdparty/fish_speech/tools/whisper_asr.py +176 -0
- xinference/thirdparty/internvl/__init__.py +0 -0
- xinference/thirdparty/internvl/conversation.py +393 -0
- xinference/thirdparty/omnilmm/model/utils.py +16 -1
- xinference/web/ui/build/asset-manifest.json +3 -3
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/js/main.661c7b0a.js +3 -0
- xinference/web/ui/build/static/js/{main.17ca0398.js.map → main.661c7b0a.js.map} +1 -1
- xinference/web/ui/node_modules/.cache/babel-loader/070d8c6b3b0f3485c6d3885f0b6bbfdf9643e088a468acbd5d596f2396071c16.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/213b5913e164773c2b0567455377765715f5f07225fbac77ad8e1e9dc9648a47.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/4de9a6942c5f1749d6cbfdd54279699975f16016b182848bc253886f52ec2ec3.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/5391543180fead1eeef5364300301498d58a7d91d62de3841a32768b67f4552f.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/5c26a23b5eacf5b752a08531577ae3840bb247745ef9a39583dc2d05ba93a82a.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/714c37ce0ec5b5c591033f02be2f3f491fdd70da3ef568ee4a4f94689a3d5ca2.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/822586ed1077201b64b954f12f25e3f9b45678c1acbabe53d8af3ca82ca71f33.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/978b57d1a04a701bc3fcfebc511f5f274eed6ed7eade67f6fb76c27d5fd9ecc8.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/a797831de0dc74897f4b50b3426555d748f328b4c2cc391de709eadaf6a5f3e3.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/bd6ad8159341315a1764c397621a560809f7eb7219ab5174c801fca7e969d943.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/e64b7e8cedcf43d4c95deba60ec1341855c887705805bb62431693118b870c69.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/e91938976f229ce986b2907e51e1f00540b584ced0a315d498c172d13220739d.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/f72f011744c4649fabddca6f7a9327861ac0a315a89b1a2e62a39774e7863845.json +1 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/METADATA +22 -13
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/RECORD +170 -79
- xinference/locale/utils.py +0 -39
- xinference/locale/zh_CN.json +0 -26
- xinference/model/llm/ggml/tools/__init__.py +0 -15
- xinference/model/llm/ggml/tools/convert_ggml_to_gguf.py +0 -498
- xinference/model/llm/ggml/tools/gguf.py +0 -884
- xinference/model/llm/pytorch/__init__.py +0 -13
- xinference/model/llm/pytorch/baichuan.py +0 -81
- xinference/model/llm/pytorch/falcon.py +0 -138
- xinference/model/llm/pytorch/intern_vl.py +0 -352
- xinference/model/llm/pytorch/vicuna.py +0 -69
- xinference/web/ui/build/static/js/main.17ca0398.js +0 -3
- xinference/web/ui/node_modules/.cache/babel-loader/1444c41a4d04494f1cbc2d8c1537df107b451cb569cb2c1fbf5159f3a4841a5f.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/2f40209b32e7e46a2eab6b8c8a355eb42c3caa8bc3228dd929f32fd2b3940294.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/44774c783428f952d8e2e4ad0998a9c5bc16a57cd9c68b7c5ff18aaa5a41d65c.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/5262556baf9207738bf6a8ba141ec6599d0a636345c245d61fdf88d3171998cb.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/6450605fac003812485f6251b9f0caafbf2e5bfc3bbe2f000050d9e2fdb8dcd3.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/71684495d995c7e266eecc6a0ad8ea0284cc785f80abddf863789c57a6134969.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/80acd1edf31542ab1dcccfad02cb4b38f3325cff847a781fcce97500cfd6f878.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/8a9742ddd8ba8546ef42dc14caca443f2b4524fabed7bf269e0eff3b7b64ee7d.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/d06a96a3c9c32e42689094aa3aaad41c8125894e956b8f84a70fadce6e3f65b3.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/d93730e2b5d7e8c957b4d0965d2ed1dac9045a649adbd47c220d11f255d4b1e0.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/e656dc00b4d8b387f0a81ba8fc558767df1601c66369e2eb86a5ef27cf080572.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f28b83886159d83b84f099b05d607a822dca4dd7f2d8aa6d56fe08bab0b5b086.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f3e02274cb1964e99b1fe69cbb6db233d3d8d7dd05d50ebcdb8e66d50b224b7b.json +0 -1
- /xinference/{locale → model/llm/llama_cpp}/__init__.py +0 -0
- /xinference/model/llm/{ggml → transformers}/__init__.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/compression.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/deepseek_vl.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/llama_2.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/omnilmm.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/qwen_vl.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/tensorizer_utils.py +0 -0
- /xinference/web/ui/build/static/js/{main.17ca0398.js.LICENSE.txt → main.661c7b0a.js.LICENSE.txt} +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/LICENSE +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/WHEEL +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/entry_points.txt +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Used to transcribe all audio files in one folder into another folder.
|
|
3
|
+
e.g.
|
|
4
|
+
Directory structure:
|
|
5
|
+
--pre_data_root
|
|
6
|
+
----SP_1
|
|
7
|
+
------01.wav
|
|
8
|
+
------02.wav
|
|
9
|
+
------......
|
|
10
|
+
----SP_2
|
|
11
|
+
------01.wav
|
|
12
|
+
------02.wav
|
|
13
|
+
------......
|
|
14
|
+
Use
|
|
15
|
+
python tools/whisper_asr.py --audio-dir pre_data_root/SP_1 --save-dir data/SP_1
|
|
16
|
+
to transcribe the first speaker.
|
|
17
|
+
|
|
18
|
+
Use
|
|
19
|
+
python tools/whisper_asr.py --audio-dir pre_data_root/SP_2 --save-dir data/SP_2
|
|
20
|
+
to transcribe the second speaker.
|
|
21
|
+
|
|
22
|
+
Note: Be aware of your audio sample rate, which defaults to 44.1kHz.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
import re
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
import click
|
|
29
|
+
import soundfile as sf
|
|
30
|
+
from faster_whisper import WhisperModel
|
|
31
|
+
from loguru import logger
|
|
32
|
+
from pydub import AudioSegment
|
|
33
|
+
from tqdm import tqdm
|
|
34
|
+
|
|
35
|
+
from tools.file import AUDIO_EXTENSIONS, list_files
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@click.command()
|
|
39
|
+
@click.option("--model-size", default="large-v3", help="Size of the Whisper model")
|
|
40
|
+
@click.option(
|
|
41
|
+
"--compute-type",
|
|
42
|
+
default="float16",
|
|
43
|
+
help="Computation Precision of the Whisper model [float16 / int8_float16 / int8]",
|
|
44
|
+
)
|
|
45
|
+
@click.option("--audio-dir", required=True, help="Directory containing audio files")
|
|
46
|
+
@click.option(
|
|
47
|
+
"--save-dir", required=True, help="Directory to save processed audio files"
|
|
48
|
+
)
|
|
49
|
+
@click.option(
|
|
50
|
+
"--sample-rate",
|
|
51
|
+
default=44100,
|
|
52
|
+
type=int,
|
|
53
|
+
help="Output sample rate, default to input sample rate",
|
|
54
|
+
)
|
|
55
|
+
@click.option("--device", default="cuda", help="Device to use [cuda / cpu]")
|
|
56
|
+
@click.option("--language", default="auto", help="Language of the transcription")
|
|
57
|
+
@click.option("--initial-prompt", default=None, help="Initial prompt for transcribing")
|
|
58
|
+
def main(
|
|
59
|
+
model_size,
|
|
60
|
+
compute_type,
|
|
61
|
+
audio_dir,
|
|
62
|
+
save_dir,
|
|
63
|
+
sample_rate,
|
|
64
|
+
device,
|
|
65
|
+
language,
|
|
66
|
+
initial_prompt,
|
|
67
|
+
):
|
|
68
|
+
logger.info("Loading / Downloading Faster Whisper model...")
|
|
69
|
+
|
|
70
|
+
model = WhisperModel(
|
|
71
|
+
model_size,
|
|
72
|
+
device=device,
|
|
73
|
+
compute_type=compute_type,
|
|
74
|
+
download_root="faster_whisper",
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
logger.info("Model loaded.")
|
|
78
|
+
|
|
79
|
+
save_path = Path(save_dir)
|
|
80
|
+
save_path.mkdir(parents=True, exist_ok=True)
|
|
81
|
+
|
|
82
|
+
audio_files = list_files(
|
|
83
|
+
path=audio_dir, extensions=AUDIO_EXTENSIONS, recursive=True
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
for file_path in tqdm(audio_files, desc="Processing audio file"):
|
|
87
|
+
file_stem = file_path.stem
|
|
88
|
+
file_suffix = file_path.suffix
|
|
89
|
+
|
|
90
|
+
rel_path = Path(file_path).relative_to(audio_dir)
|
|
91
|
+
(save_path / rel_path.parent).mkdir(parents=True, exist_ok=True)
|
|
92
|
+
|
|
93
|
+
audio = AudioSegment.from_file(file_path)
|
|
94
|
+
|
|
95
|
+
segments, info = model.transcribe(
|
|
96
|
+
file_path,
|
|
97
|
+
beam_size=5,
|
|
98
|
+
language=None if language == "auto" else language,
|
|
99
|
+
initial_prompt=initial_prompt,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
print(
|
|
103
|
+
"Detected language '%s' with probability %f"
|
|
104
|
+
% (info.language, info.language_probability)
|
|
105
|
+
)
|
|
106
|
+
print("Total len(ms): ", len(audio))
|
|
107
|
+
|
|
108
|
+
whole_text = None
|
|
109
|
+
for segment in segments:
|
|
110
|
+
id, start, end, text = (
|
|
111
|
+
segment.id,
|
|
112
|
+
segment.start,
|
|
113
|
+
segment.end,
|
|
114
|
+
segment.text,
|
|
115
|
+
)
|
|
116
|
+
print("Segment %03d [%.2fs -> %.2fs] %s" % (id, start, end, text))
|
|
117
|
+
if not whole_text:
|
|
118
|
+
whole_text = text
|
|
119
|
+
else:
|
|
120
|
+
whole_text += ", " + text
|
|
121
|
+
|
|
122
|
+
whole_text += "."
|
|
123
|
+
|
|
124
|
+
audio_save_path = save_path / rel_path.parent / f"{file_stem}{file_suffix}"
|
|
125
|
+
audio.export(audio_save_path, format=file_suffix[1:])
|
|
126
|
+
print(f"Exported {audio_save_path}")
|
|
127
|
+
|
|
128
|
+
transcript_save_path = save_path / rel_path.parent / f"{file_stem}.lab"
|
|
129
|
+
with open(
|
|
130
|
+
transcript_save_path,
|
|
131
|
+
"w",
|
|
132
|
+
encoding="utf-8",
|
|
133
|
+
) as f:
|
|
134
|
+
f.write(whole_text)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
main()
|
|
139
|
+
exit(0)
|
|
140
|
+
|
|
141
|
+
audio = AudioSegment.from_wav(
|
|
142
|
+
r"D:\PythonProject\原神语音中文\胡桃\vo_hutao_draw_appear.wav"
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
model_size = "large-v3"
|
|
146
|
+
|
|
147
|
+
model = WhisperModel(
|
|
148
|
+
model_size,
|
|
149
|
+
device="cuda",
|
|
150
|
+
compute_type="float16",
|
|
151
|
+
download_root="faster_whisper",
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
segments, info = model.transcribe(
|
|
155
|
+
r"D:\PythonProject\原神语音中文\胡桃\vo_hutao_draw_appear.wav",
|
|
156
|
+
beam_size=5,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
print(
|
|
160
|
+
"Detected language '%s' with probability %f"
|
|
161
|
+
% (info.language, info.language_probability)
|
|
162
|
+
)
|
|
163
|
+
print("Total len(ms): ", len(audio))
|
|
164
|
+
|
|
165
|
+
for i, segment in enumerate(segments):
|
|
166
|
+
print(
|
|
167
|
+
"Segment %03d [%.2fs -> %.2fs] %s"
|
|
168
|
+
% (i, segment.start, segment.end, segment.text)
|
|
169
|
+
)
|
|
170
|
+
start_ms = int(segment.start * 1000)
|
|
171
|
+
end_ms = int(segment.end * 1000)
|
|
172
|
+
segment_audio = audio[start_ms:end_ms]
|
|
173
|
+
segment_audio.export(f"segment_{i:03d}.wav", format="wav")
|
|
174
|
+
print(f"Exported segment_{i:03d}.wav")
|
|
175
|
+
|
|
176
|
+
print("All segments have been exported.")
|
|
File without changes
|
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Conversation prompt templates.
|
|
3
|
+
|
|
4
|
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
|
5
|
+
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import dataclasses
|
|
9
|
+
from enum import IntEnum, auto
|
|
10
|
+
from typing import Any, Dict, List, Tuple, Union
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SeparatorStyle(IntEnum):
|
|
14
|
+
"""Separator styles."""
|
|
15
|
+
|
|
16
|
+
ADD_COLON_SINGLE = auto()
|
|
17
|
+
ADD_COLON_TWO = auto()
|
|
18
|
+
ADD_COLON_SPACE_SINGLE = auto()
|
|
19
|
+
NO_COLON_SINGLE = auto()
|
|
20
|
+
NO_COLON_TWO = auto()
|
|
21
|
+
ADD_NEW_LINE_SINGLE = auto()
|
|
22
|
+
LLAMA2 = auto()
|
|
23
|
+
CHATGLM = auto()
|
|
24
|
+
CHATML = auto()
|
|
25
|
+
CHATINTERN = auto()
|
|
26
|
+
DOLLY = auto()
|
|
27
|
+
RWKV = auto()
|
|
28
|
+
PHOENIX = auto()
|
|
29
|
+
ROBIN = auto()
|
|
30
|
+
FALCON_CHAT = auto()
|
|
31
|
+
CHATGLM3 = auto()
|
|
32
|
+
INTERNVL_ZH = auto()
|
|
33
|
+
MPT = auto()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclasses.dataclass
|
|
37
|
+
class Conversation:
|
|
38
|
+
"""A class that manages prompt templates and keeps all conversation history."""
|
|
39
|
+
|
|
40
|
+
# The name of this template
|
|
41
|
+
name: str
|
|
42
|
+
# The template of the system prompt
|
|
43
|
+
system_template: str = '{system_message}'
|
|
44
|
+
# The system message
|
|
45
|
+
system_message: str = ''
|
|
46
|
+
# The names of two roles
|
|
47
|
+
roles: Tuple[str] = ('USER', 'ASSISTANT')
|
|
48
|
+
# All messages. Each item is (role, message).
|
|
49
|
+
messages: List[List[str]] = ()
|
|
50
|
+
# The number of few shot examples
|
|
51
|
+
offset: int = 0
|
|
52
|
+
# The separator style and configurations
|
|
53
|
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
|
54
|
+
sep: str = '\n'
|
|
55
|
+
sep2: str = None
|
|
56
|
+
# Stop criteria (the default one is EOS token)
|
|
57
|
+
stop_str: Union[str, List[str]] = None
|
|
58
|
+
# Stops generation if meeting any token in this list
|
|
59
|
+
stop_token_ids: List[int] = None
|
|
60
|
+
|
|
61
|
+
def get_prompt(self) -> str:
|
|
62
|
+
"""Get the prompt for generation."""
|
|
63
|
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
|
64
|
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
|
65
|
+
ret = system_prompt + self.sep
|
|
66
|
+
for role, message in self.messages:
|
|
67
|
+
if message:
|
|
68
|
+
ret += role + ': ' + message + self.sep
|
|
69
|
+
else:
|
|
70
|
+
ret += role + ':'
|
|
71
|
+
return ret
|
|
72
|
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
|
73
|
+
seps = [self.sep, self.sep2]
|
|
74
|
+
ret = system_prompt + seps[0]
|
|
75
|
+
for i, (role, message) in enumerate(self.messages):
|
|
76
|
+
if message:
|
|
77
|
+
ret += role + ': ' + message + seps[i % 2]
|
|
78
|
+
else:
|
|
79
|
+
ret += role + ':'
|
|
80
|
+
return ret
|
|
81
|
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
|
82
|
+
ret = system_prompt + self.sep
|
|
83
|
+
for role, message in self.messages:
|
|
84
|
+
if message:
|
|
85
|
+
ret += role + ': ' + message + self.sep
|
|
86
|
+
else:
|
|
87
|
+
ret += role + ': ' # must be end with a space
|
|
88
|
+
return ret
|
|
89
|
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
|
90
|
+
ret = '' if system_prompt == '' else system_prompt + self.sep
|
|
91
|
+
for role, message in self.messages:
|
|
92
|
+
if message:
|
|
93
|
+
ret += role + '\n' + message + self.sep
|
|
94
|
+
else:
|
|
95
|
+
ret += role + '\n'
|
|
96
|
+
return ret
|
|
97
|
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
|
98
|
+
ret = system_prompt
|
|
99
|
+
for role, message in self.messages:
|
|
100
|
+
if message:
|
|
101
|
+
ret += role + message + self.sep
|
|
102
|
+
else:
|
|
103
|
+
ret += role
|
|
104
|
+
return ret
|
|
105
|
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
|
106
|
+
seps = [self.sep, self.sep2]
|
|
107
|
+
ret = system_prompt
|
|
108
|
+
for i, (role, message) in enumerate(self.messages):
|
|
109
|
+
if message:
|
|
110
|
+
ret += role + message + seps[i % 2]
|
|
111
|
+
else:
|
|
112
|
+
ret += role
|
|
113
|
+
return ret
|
|
114
|
+
elif self.sep_style == SeparatorStyle.RWKV:
|
|
115
|
+
ret = system_prompt
|
|
116
|
+
for i, (role, message) in enumerate(self.messages):
|
|
117
|
+
if message:
|
|
118
|
+
ret += (
|
|
119
|
+
role
|
|
120
|
+
+ ': '
|
|
121
|
+
+ message.replace('\r\n', '\n').replace('\n\n', '\n')
|
|
122
|
+
)
|
|
123
|
+
ret += '\n\n'
|
|
124
|
+
else:
|
|
125
|
+
ret += role + ':'
|
|
126
|
+
return ret
|
|
127
|
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
|
128
|
+
seps = [self.sep, self.sep2]
|
|
129
|
+
if self.system_message:
|
|
130
|
+
ret = system_prompt
|
|
131
|
+
else:
|
|
132
|
+
ret = '[INST] '
|
|
133
|
+
for i, (role, message) in enumerate(self.messages):
|
|
134
|
+
tag = self.roles[i % 2]
|
|
135
|
+
if message:
|
|
136
|
+
if i == 0:
|
|
137
|
+
ret += message + ' '
|
|
138
|
+
else:
|
|
139
|
+
ret += tag + ' ' + message + seps[i % 2]
|
|
140
|
+
else:
|
|
141
|
+
ret += tag
|
|
142
|
+
return ret
|
|
143
|
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
|
144
|
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
|
145
|
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
|
146
|
+
round_add_n = 1 if self.name == 'chatglm2' else 0
|
|
147
|
+
if system_prompt:
|
|
148
|
+
ret = system_prompt + self.sep
|
|
149
|
+
else:
|
|
150
|
+
ret = ''
|
|
151
|
+
|
|
152
|
+
for i, (role, message) in enumerate(self.messages):
|
|
153
|
+
if i % 2 == 0:
|
|
154
|
+
ret += f'[Round {i//2 + round_add_n}]{self.sep}'
|
|
155
|
+
|
|
156
|
+
if message:
|
|
157
|
+
ret += f'{role}:{message}{self.sep}'
|
|
158
|
+
else:
|
|
159
|
+
ret += f'{role}:'
|
|
160
|
+
return ret
|
|
161
|
+
elif self.sep_style == SeparatorStyle.CHATML:
|
|
162
|
+
ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
|
|
163
|
+
for role, message in self.messages:
|
|
164
|
+
if message:
|
|
165
|
+
ret += role + '\n' + message + self.sep + '\n'
|
|
166
|
+
else:
|
|
167
|
+
ret += role + '\n'
|
|
168
|
+
return ret
|
|
169
|
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
|
170
|
+
ret = ''
|
|
171
|
+
if self.system_message:
|
|
172
|
+
ret += system_prompt
|
|
173
|
+
for role, message in self.messages:
|
|
174
|
+
if message:
|
|
175
|
+
ret += role + '\n' + ' ' + message
|
|
176
|
+
else:
|
|
177
|
+
ret += role
|
|
178
|
+
return ret
|
|
179
|
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
|
180
|
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
|
181
|
+
seps = [self.sep, self.sep2]
|
|
182
|
+
ret = system_prompt
|
|
183
|
+
for i, (role, message) in enumerate(self.messages):
|
|
184
|
+
# if i % 2 == 0:
|
|
185
|
+
# ret += "<s>"
|
|
186
|
+
if message:
|
|
187
|
+
ret += role + ':' + message + seps[i % 2] + '\n'
|
|
188
|
+
else:
|
|
189
|
+
ret += role + ':'
|
|
190
|
+
return ret
|
|
191
|
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
|
192
|
+
seps = [self.sep, self.sep2]
|
|
193
|
+
ret = system_prompt
|
|
194
|
+
for i, (role, message) in enumerate(self.messages):
|
|
195
|
+
if message:
|
|
196
|
+
ret += role + ':\n' + message + seps[i % 2]
|
|
197
|
+
if i % 2 == 1:
|
|
198
|
+
ret += '\n\n'
|
|
199
|
+
else:
|
|
200
|
+
ret += role + ':\n'
|
|
201
|
+
return ret
|
|
202
|
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
|
203
|
+
ret = system_prompt
|
|
204
|
+
for role, message in self.messages:
|
|
205
|
+
if message:
|
|
206
|
+
ret += role + ': ' + '<s>' + message + '</s>'
|
|
207
|
+
else:
|
|
208
|
+
ret += role + ': ' + '<s>'
|
|
209
|
+
return ret
|
|
210
|
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
|
211
|
+
ret = system_prompt + self.sep
|
|
212
|
+
for role, message in self.messages:
|
|
213
|
+
if message:
|
|
214
|
+
ret += role + ':\n' + message + self.sep
|
|
215
|
+
else:
|
|
216
|
+
ret += role + ':\n'
|
|
217
|
+
return ret
|
|
218
|
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
|
219
|
+
ret = ''
|
|
220
|
+
if self.system_message:
|
|
221
|
+
ret += system_prompt + self.sep
|
|
222
|
+
for role, message in self.messages:
|
|
223
|
+
if message:
|
|
224
|
+
ret += role + ': ' + message + self.sep
|
|
225
|
+
else:
|
|
226
|
+
ret += role + ':'
|
|
227
|
+
|
|
228
|
+
return ret
|
|
229
|
+
elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
|
|
230
|
+
seps = [self.sep, self.sep2]
|
|
231
|
+
ret = self.system_message + seps[0]
|
|
232
|
+
for i, (role, message) in enumerate(self.messages):
|
|
233
|
+
if message:
|
|
234
|
+
ret += role + ': ' + message + seps[i % 2]
|
|
235
|
+
else:
|
|
236
|
+
ret += role + ':'
|
|
237
|
+
return ret
|
|
238
|
+
elif self.sep_style == SeparatorStyle.MPT:
|
|
239
|
+
ret = system_prompt + self.sep
|
|
240
|
+
for role, message in self.messages:
|
|
241
|
+
if message:
|
|
242
|
+
if type(message) is tuple:
|
|
243
|
+
message, _, _ = message
|
|
244
|
+
ret += role + message + self.sep
|
|
245
|
+
else:
|
|
246
|
+
ret += role
|
|
247
|
+
return ret
|
|
248
|
+
else:
|
|
249
|
+
raise ValueError(f'Invalid style: {self.sep_style}')
|
|
250
|
+
|
|
251
|
+
def set_system_message(self, system_message: str):
|
|
252
|
+
"""Set the system message."""
|
|
253
|
+
self.system_message = system_message
|
|
254
|
+
|
|
255
|
+
def append_message(self, role: str, message: str):
|
|
256
|
+
"""Append a new message."""
|
|
257
|
+
self.messages.append([role, message])
|
|
258
|
+
|
|
259
|
+
def update_last_message(self, message: str):
|
|
260
|
+
"""Update the last output.
|
|
261
|
+
|
|
262
|
+
The last message is typically set to be None when constructing the prompt,
|
|
263
|
+
so we need to update it in-place after getting the response from a model.
|
|
264
|
+
"""
|
|
265
|
+
self.messages[-1][1] = message
|
|
266
|
+
|
|
267
|
+
def to_gradio_chatbot(self):
|
|
268
|
+
"""Convert the conversation to gradio chatbot format."""
|
|
269
|
+
ret = []
|
|
270
|
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
|
271
|
+
if i % 2 == 0:
|
|
272
|
+
ret.append([msg, None])
|
|
273
|
+
else:
|
|
274
|
+
ret[-1][-1] = msg
|
|
275
|
+
return ret
|
|
276
|
+
|
|
277
|
+
def to_openai_api_messages(self):
|
|
278
|
+
"""Convert the conversation to OpenAI chat completion format."""
|
|
279
|
+
ret = [{'role': 'system', 'content': self.system_message}]
|
|
280
|
+
|
|
281
|
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
|
282
|
+
if i % 2 == 0:
|
|
283
|
+
ret.append({'role': 'user', 'content': msg})
|
|
284
|
+
else:
|
|
285
|
+
if msg is not None:
|
|
286
|
+
ret.append({'role': 'assistant', 'content': msg})
|
|
287
|
+
return ret
|
|
288
|
+
|
|
289
|
+
def copy(self):
|
|
290
|
+
return Conversation(
|
|
291
|
+
name=self.name,
|
|
292
|
+
system_template=self.system_template,
|
|
293
|
+
system_message=self.system_message,
|
|
294
|
+
roles=self.roles,
|
|
295
|
+
messages=[[x, y] for x, y in self.messages],
|
|
296
|
+
offset=self.offset,
|
|
297
|
+
sep_style=self.sep_style,
|
|
298
|
+
sep=self.sep,
|
|
299
|
+
sep2=self.sep2,
|
|
300
|
+
stop_str=self.stop_str,
|
|
301
|
+
stop_token_ids=self.stop_token_ids,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
def dict(self):
|
|
305
|
+
return {
|
|
306
|
+
'template_name': self.name,
|
|
307
|
+
'system_message': self.system_message,
|
|
308
|
+
'roles': self.roles,
|
|
309
|
+
'messages': self.messages,
|
|
310
|
+
'offset': self.offset,
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
# A global registry for all conversation templates
|
|
315
|
+
conv_templates: Dict[str, Conversation] = {}
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def register_conv_template(template: Conversation, override: bool = False):
|
|
319
|
+
"""Register a new conversation template."""
|
|
320
|
+
if not override:
|
|
321
|
+
assert (
|
|
322
|
+
template.name not in conv_templates
|
|
323
|
+
), f'{template.name} has been registered.'
|
|
324
|
+
|
|
325
|
+
conv_templates[template.name] = template
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def get_conv_template(name: str) -> Conversation:
|
|
329
|
+
"""Get a conversation template."""
|
|
330
|
+
return conv_templates[name].copy()
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
|
|
334
|
+
# is that during training, the preprocessing function for the Hermes-2 template doesn't add
|
|
335
|
+
# <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
|
|
336
|
+
# Therefore, they are completely equivalent during inference.
|
|
337
|
+
register_conv_template(
|
|
338
|
+
Conversation(
|
|
339
|
+
name='Hermes-2',
|
|
340
|
+
system_template='<|im_start|>system\n{system_message}',
|
|
341
|
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
|
342
|
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
|
343
|
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
|
344
|
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
|
345
|
+
sep_style=SeparatorStyle.MPT,
|
|
346
|
+
sep='<|im_end|>',
|
|
347
|
+
stop_token_ids=[
|
|
348
|
+
2,
|
|
349
|
+
6,
|
|
350
|
+
7,
|
|
351
|
+
8,
|
|
352
|
+
],
|
|
353
|
+
stop_str='<|endoftext|>',
|
|
354
|
+
)
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
register_conv_template(
|
|
359
|
+
Conversation(
|
|
360
|
+
name='internlm2-chat',
|
|
361
|
+
system_template='<|im_start|>system\n{system_message}',
|
|
362
|
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
|
363
|
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
|
364
|
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
|
365
|
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
|
366
|
+
sep_style=SeparatorStyle.MPT,
|
|
367
|
+
sep='<|im_end|>',
|
|
368
|
+
stop_token_ids=[
|
|
369
|
+
2,
|
|
370
|
+
92543,
|
|
371
|
+
92542
|
|
372
|
+
]
|
|
373
|
+
)
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
register_conv_template(
|
|
378
|
+
Conversation(
|
|
379
|
+
name='phi3-chat',
|
|
380
|
+
system_template='<|system|>\n{system_message}',
|
|
381
|
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
|
382
|
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
|
383
|
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
|
384
|
+
roles=('<|user|>\n', '<|assistant|>\n'),
|
|
385
|
+
sep_style=SeparatorStyle.MPT,
|
|
386
|
+
sep='<|end|>',
|
|
387
|
+
stop_token_ids=[
|
|
388
|
+
2,
|
|
389
|
+
32000,
|
|
390
|
+
32007
|
|
391
|
+
]
|
|
392
|
+
)
|
|
393
|
+
)
|
|
@@ -3,7 +3,6 @@ import os
|
|
|
3
3
|
import pickle
|
|
4
4
|
from io import BytesIO
|
|
5
5
|
|
|
6
|
-
import cv2
|
|
7
6
|
import numpy as np
|
|
8
7
|
import torch
|
|
9
8
|
import torch.distributed as dist
|
|
@@ -75,6 +74,8 @@ def autocontrast_func(img, cutoff=0):
|
|
|
75
74
|
"""
|
|
76
75
|
same output as PIL.ImageOps.autocontrast
|
|
77
76
|
"""
|
|
77
|
+
import cv2
|
|
78
|
+
|
|
78
79
|
n_bins = 256
|
|
79
80
|
|
|
80
81
|
def tune_channel(ch):
|
|
@@ -108,6 +109,8 @@ def equalize_func(img):
|
|
|
108
109
|
same output as PIL.ImageOps.equalize
|
|
109
110
|
PIL's implementation is different from cv2.equalize
|
|
110
111
|
"""
|
|
112
|
+
import cv2
|
|
113
|
+
|
|
111
114
|
n_bins = 256
|
|
112
115
|
|
|
113
116
|
def tune_channel(ch):
|
|
@@ -131,6 +134,8 @@ def rotate_func(img, degree, fill=(0, 0, 0)):
|
|
|
131
134
|
"""
|
|
132
135
|
like PIL, rotate by degree, not radians
|
|
133
136
|
"""
|
|
137
|
+
import cv2
|
|
138
|
+
|
|
134
139
|
H, W = img.shape[0], img.shape[1]
|
|
135
140
|
center = W / 2, H / 2
|
|
136
141
|
M = cv2.getRotationMatrix2D(center, degree, 1)
|
|
@@ -194,6 +199,8 @@ def sharpness_func(img, factor):
|
|
|
194
199
|
The differences the this result and PIL are all on the 4 boundaries, the center
|
|
195
200
|
areas are same
|
|
196
201
|
"""
|
|
202
|
+
import cv2
|
|
203
|
+
|
|
197
204
|
kernel = np.ones((3, 3), dtype=np.float32)
|
|
198
205
|
kernel[1][1] = 5
|
|
199
206
|
kernel /= 13
|
|
@@ -211,6 +218,8 @@ def sharpness_func(img, factor):
|
|
|
211
218
|
|
|
212
219
|
|
|
213
220
|
def shear_x_func(img, factor, fill=(0, 0, 0)):
|
|
221
|
+
import cv2
|
|
222
|
+
|
|
214
223
|
H, W = img.shape[0], img.shape[1]
|
|
215
224
|
M = np.float32([[1, factor, 0], [0, 1, 0]])
|
|
216
225
|
out = cv2.warpAffine(
|
|
@@ -223,6 +232,8 @@ def translate_x_func(img, offset, fill=(0, 0, 0)):
|
|
|
223
232
|
"""
|
|
224
233
|
same output as PIL.Image.transform
|
|
225
234
|
"""
|
|
235
|
+
import cv2
|
|
236
|
+
|
|
226
237
|
H, W = img.shape[0], img.shape[1]
|
|
227
238
|
M = np.float32([[1, 0, -offset], [0, 1, 0]])
|
|
228
239
|
out = cv2.warpAffine(
|
|
@@ -235,6 +246,8 @@ def translate_y_func(img, offset, fill=(0, 0, 0)):
|
|
|
235
246
|
"""
|
|
236
247
|
same output as PIL.Image.transform
|
|
237
248
|
"""
|
|
249
|
+
import cv2
|
|
250
|
+
|
|
238
251
|
H, W = img.shape[0], img.shape[1]
|
|
239
252
|
M = np.float32([[1, 0, 0], [0, 1, -offset]])
|
|
240
253
|
out = cv2.warpAffine(
|
|
@@ -252,6 +265,8 @@ def posterize_func(img, bits):
|
|
|
252
265
|
|
|
253
266
|
|
|
254
267
|
def shear_y_func(img, factor, fill=(0, 0, 0)):
|
|
268
|
+
import cv2
|
|
269
|
+
|
|
255
270
|
H, W = img.shape[0], img.shape[1]
|
|
256
271
|
M = np.float32([[1, 0, 0], [factor, 1, 0]])
|
|
257
272
|
out = cv2.warpAffine(
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"files": {
|
|
3
3
|
"main.css": "./static/css/main.4bafd904.css",
|
|
4
|
-
"main.js": "./static/js/main.
|
|
4
|
+
"main.js": "./static/js/main.661c7b0a.js",
|
|
5
5
|
"static/media/icon.webp": "./static/media/icon.4603d52c63041e5dfbfd.webp",
|
|
6
6
|
"index.html": "./index.html",
|
|
7
7
|
"main.4bafd904.css.map": "./static/css/main.4bafd904.css.map",
|
|
8
|
-
"main.
|
|
8
|
+
"main.661c7b0a.js.map": "./static/js/main.661c7b0a.js.map"
|
|
9
9
|
},
|
|
10
10
|
"entrypoints": [
|
|
11
11
|
"static/css/main.4bafd904.css",
|
|
12
|
-
"static/js/main.
|
|
12
|
+
"static/js/main.661c7b0a.js"
|
|
13
13
|
]
|
|
14
14
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>Xinference</title><script defer="defer" src="./static/js/main.
|
|
1
|
+
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>Xinference</title><script defer="defer" src="./static/js/main.661c7b0a.js"></script><link href="./static/css/main.4bafd904.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|