pylizlib 0.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pylizlib-0.0.6/PKG-INFO +18 -0
- pylizlib-0.0.6/README.md +2 -0
- pylizlib-0.0.6/ai/__init__.py +0 -0
- pylizlib-0.0.6/ai/controller/__init__.py +0 -0
- pylizlib-0.0.6/ai/controller/llava_controller.py +73 -0
- pylizlib-0.0.6/ai/core/__init__.py +0 -0
- pylizlib-0.0.6/ai/core/ai_method.py +10 -0
- pylizlib-0.0.6/ai/core/ai_model_list.py +6 -0
- pylizlib-0.0.6/ai/core/ai_models.py +58 -0
- pylizlib-0.0.6/ai/core/ai_power.py +16 -0
- pylizlib-0.0.6/ai/core/ai_prompts.py +23 -0
- pylizlib-0.0.6/ai/core/ai_scan_settings.py +23 -0
- pylizlib-0.0.6/ai/core/ai_setting.py +50 -0
- pylizlib-0.0.6/ai/core/ai_source.py +41 -0
- pylizlib-0.0.6/ai/core/ai_source_type.py +7 -0
- pylizlib-0.0.6/ai/core/hg_file.py +20 -0
- pylizlib-0.0.6/ai/llm/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/entities/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/local/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/local/llamacpp.py +156 -0
- pylizlib-0.0.6/ai/llm/local/llamacpplib.py +22 -0
- pylizlib-0.0.6/ai/llm/local/whisper.py +29 -0
- pylizlib-0.0.6/ai/llm/remote/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/remote/data/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/remote/data/ollamapi.py +81 -0
- pylizlib-0.0.6/ai/llm/remote/dto/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/remote/dto/ollama_models.py +83 -0
- pylizlib-0.0.6/ai/llm/remote/dto/ollama_response.py +65 -0
- pylizlib-0.0.6/ai/llm/remote/service/__init__.py +0 -0
- pylizlib-0.0.6/ai/llm/remote/service/ollamaliz.py +66 -0
- pylizlib-0.0.6/media/__init__.py +0 -0
- pylizlib-0.0.6/media/api/__init__.py +0 -0
- pylizlib-0.0.6/media/api/data/__init__.py +0 -0
- pylizlib-0.0.6/media/api/data/eagleapi.py +33 -0
- pylizlib-0.0.6/media/api/dto/__init__.py +0 -0
- pylizlib-0.0.6/media/api/dto/eagle_dto.py +40 -0
- pylizlib-0.0.6/media/api/service/__init__.py +0 -0
- pylizlib-0.0.6/media/api/service/eagleliz.py +45 -0
- pylizlib-0.0.6/media/image_scanner.py +38 -0
- pylizlib-0.0.6/model/__init__.py +0 -0
- pylizlib-0.0.6/model/file_type.py +6 -0
- pylizlib-0.0.6/model/liz_image.py +52 -0
- pylizlib-0.0.6/model/operation.py +18 -0
- pylizlib-0.0.6/network/__init__.py +0 -0
- pylizlib-0.0.6/network/netres.py +42 -0
- pylizlib-0.0.6/network/netrestype.py +10 -0
- pylizlib-0.0.6/network/netutils.py +62 -0
- pylizlib-0.0.6/network/ssl.py +8 -0
- pylizlib-0.0.6/pylizlib.egg-info/PKG-INFO +18 -0
- pylizlib-0.0.6/pylizlib.egg-info/SOURCES.txt +68 -0
- pylizlib-0.0.6/pylizlib.egg-info/dependency_links.txt +1 -0
- pylizlib-0.0.6/pylizlib.egg-info/top_level.txt +5 -0
- pylizlib-0.0.6/setup.cfg +4 -0
- pylizlib-0.0.6/setup.py +34 -0
- pylizlib-0.0.6/test/test.py +23 -0
- pylizlib-0.0.6/test/test_ai.py +0 -0
- pylizlib-0.0.6/test/test_image_scanner.py +44 -0
- pylizlib-0.0.6/test/test_llamacpp.py +45 -0
- pylizlib-0.0.6/test/test_llamacpp_lib.py +31 -0
- pylizlib-0.0.6/util/__init__.py +0 -0
- pylizlib-0.0.6/util/cfgutils.py +57 -0
- pylizlib-0.0.6/util/datautils.py +30 -0
- pylizlib-0.0.6/util/easinit.py +31 -0
- pylizlib-0.0.6/util/fileutils.py +87 -0
- pylizlib-0.0.6/util/loggiz.py +257 -0
- pylizlib-0.0.6/util/osutils.py +121 -0
- pylizlib-0.0.6/util/pathutils.py +180 -0
- pylizlib-0.0.6/util/pylizdir.py +53 -0
- pylizlib-0.0.6/util/regutils.py +20 -0
- pylizlib-0.0.6/util/unitutils.py +4 -0
pylizlib-0.0.6/PKG-INFO
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: pylizlib
|
|
3
|
+
Version: 0.0.6
|
|
4
|
+
Summary: Personal python library for general purposes.
|
|
5
|
+
Author: Gabliz
|
|
6
|
+
Author-email:
|
|
7
|
+
Keywords: python,video,ai,utilities
|
|
8
|
+
Classifier: Development Status :: 1 - Planning
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Operating System :: Unix
|
|
12
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
13
|
+
Classifier: Operating System :: Microsoft :: Windows
|
|
14
|
+
Description-Content-Type: text/markdown
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# PYLIZ
|
|
18
|
+
My personal Python library for various tasks.
|
pylizlib-0.0.6/README.md
ADDED
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from ai.core.ai_method import AiMethod
|
|
6
|
+
from ai.core.ai_setting import AiSettings
|
|
7
|
+
from ai.llm.local.llamacpp import LlamaCpp
|
|
8
|
+
from ai.llm.remote.service.ollamaliz import Ollamaliz
|
|
9
|
+
from model.liz_image import LizImage
|
|
10
|
+
from model.operation import Operation
|
|
11
|
+
from util.pylizdir import PylizDir
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LlavaController:
|
|
15
|
+
|
|
16
|
+
def __init__(self, settings: AiSettings):
|
|
17
|
+
self.settings = settings
|
|
18
|
+
|
|
19
|
+
def __run_from_ollama(self, image_path: str) -> Operation[str]:
|
|
20
|
+
ollama = Ollamaliz(self.settings.remote_url)
|
|
21
|
+
model_name = self.settings.source.ollama_name
|
|
22
|
+
with open(image_path, "rb") as image_file:
|
|
23
|
+
image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')
|
|
24
|
+
llava_result = ollama.llava_query(self.settings.prompt.value, image_base_64, model_name)
|
|
25
|
+
if not llava_result.is_op_ok():
|
|
26
|
+
return Operation(status=False, error=llava_result.error)
|
|
27
|
+
return Operation(status=True, payload=llava_result.payload.response)
|
|
28
|
+
|
|
29
|
+
def __run_from_local_llamacpp(self, image_path: str) -> Operation[str]:
|
|
30
|
+
PylizDir.create()
|
|
31
|
+
path_install: str = os.path.join(PylizDir.get_ai_folder(), "llama.cpp")
|
|
32
|
+
path_models: str = PylizDir.get_models_folder()
|
|
33
|
+
path_logs: str = os.path.join(PylizDir.get_logs_path(), "llama.cpp")
|
|
34
|
+
obj = LlamaCpp(path_install, path_models, path_logs)
|
|
35
|
+
obj.install_llava(self.settings.power, lambda x: None, lambda x: None)
|
|
36
|
+
llava_result = obj.run_llava(self.settings.power, image_path, self.settings.prompt.value)
|
|
37
|
+
if not llava_result.is_op_ok():
|
|
38
|
+
return Operation(status=False, error=llava_result.error)
|
|
39
|
+
return Operation(status=True, payload=llava_result.payload)
|
|
40
|
+
|
|
41
|
+
def __get_image_from_json(self, output: str, image_path: str) -> Operation[LizImage]:
|
|
42
|
+
info_json = json.loads(output)
|
|
43
|
+
output_image = LizImage(image_path)
|
|
44
|
+
output_image.set_ai_filename(info_json.get("filename")) if self.settings.scan_settings.ai_rename else None
|
|
45
|
+
output_image.set_ai_description(info_json.get("description")) if self.settings.scan_settings.ai_comment else None
|
|
46
|
+
output_image.set_ai_tags(info_json.get("tags")) if self.settings.scan_settings.ai_tags else None
|
|
47
|
+
output_image.set_ai_text(info_json.get("text")) if self.settings.scan_settings.ai_ocr else None
|
|
48
|
+
output_image.set_ai_scanned(True)
|
|
49
|
+
return Operation(status=True, payload=output_image)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# def run(self, image_path: str) -> Operation[str]:
|
|
53
|
+
# if self.settings.method == AiMethod.LLAVA_OLLAMA:
|
|
54
|
+
# return self.__run_from_ollama(image_path)
|
|
55
|
+
# elif self.settings.method == AiMethod.LLAVA_LOCAL_LLAMACPP:
|
|
56
|
+
# return self.__run_from_local_llamacpp(image_path)
|
|
57
|
+
# else:
|
|
58
|
+
# raise NotImplementedError("This method is not implemented.")
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def run_and_get_liz_media(self, image_path: str) -> Operation[LizImage]:
|
|
62
|
+
if self.settings.method == AiMethod.LLAVA_OLLAMA_JSON:
|
|
63
|
+
op = self.__run_from_ollama(image_path)
|
|
64
|
+
if not op.is_op_ok():
|
|
65
|
+
return Operation(status=False, error=op.error)
|
|
66
|
+
return self.__get_image_from_json(op.payload, image_path)
|
|
67
|
+
elif self.settings.method == AiMethod.LLAVA_LLAMACPP_JSON:
|
|
68
|
+
op = self.__run_from_local_llamacpp(image_path)
|
|
69
|
+
if not op.is_op_ok():
|
|
70
|
+
return Operation(status=False, error=op.error)
|
|
71
|
+
return self.__get_image_from_json(op.payload, image_path)
|
|
72
|
+
else:
|
|
73
|
+
raise NotImplementedError("Current Aimethod is not implemented for this function.")
|
|
File without changes
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class AiMethod(Enum):
|
|
5
|
+
# LLAVA_OLLAMA = "LLAVA with ollama server"
|
|
6
|
+
# LLAVA_LOCAL_LLAMACPP = "LLAVA with local power"
|
|
7
|
+
# LLAVA_LLAMACPP_AFTER_OLLAMA = "LLAVA with ollama and llamacpp"
|
|
8
|
+
|
|
9
|
+
LLAVA_OLLAMA_JSON = "LLAVA with ollama server"
|
|
10
|
+
LLAVA_LLAMACPP_JSON = "LLAVA with local power"
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from ai.core.ai_method import AiMethod
|
|
2
|
+
from ai.core.ai_power import AiPower
|
|
3
|
+
from ai.core.ai_source import AiSource
|
|
4
|
+
from ai.core.ai_source_type import AiSourceType
|
|
5
|
+
from ai.core.hg_file import HgFile
|
|
6
|
+
from model.file_type import FileType
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AiModels:
|
|
10
|
+
|
|
11
|
+
llava_15_7b_mmproj_f16 = HgFile("mmproj-model-f16.gguf", "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf", FileType.HG_MMPROJ)
|
|
12
|
+
llava_15_7b_ggml_model_q4 = HgFile("ggml-model-q4_k.gguf", "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q4_k.gguf", FileType.HG_GGML)
|
|
13
|
+
llava_15_7b_bundle = [llava_15_7b_mmproj_f16, llava_15_7b_ggml_model_q4]
|
|
14
|
+
llava_15_7b_name = "llava157b"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Llava:
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def get_llava(power: AiPower, source: AiSourceType) -> AiSource:
|
|
26
|
+
if power == AiPower.LOW:
|
|
27
|
+
return AiModels.Llava.get_llava_power_low(source)
|
|
28
|
+
elif power == AiPower.MEDIUM:
|
|
29
|
+
return AiModels.Llava.get_llava_power_medium(source)
|
|
30
|
+
elif power == AiPower.HIGH:
|
|
31
|
+
return AiModels.Llava.get_llava_power_high(source)
|
|
32
|
+
raise Exception("No model found for the given power and method.")
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def get_llava_power_low(source: AiSourceType) -> AiSource:
|
|
36
|
+
if source == AiSourceType.OLLAMA_SERVER:
|
|
37
|
+
return AiSource(ollama_name="llava:7b")
|
|
38
|
+
if source == AiSourceType.LOCAL_LLAMACPP:
|
|
39
|
+
return AiSource(local_name=AiModels.llava_15_7b_name, hg_files=AiModels.llava_15_7b_bundle)
|
|
40
|
+
raise Exception("No model found for the given power and method.")
|
|
41
|
+
|
|
42
|
+
@staticmethod
|
|
43
|
+
def get_llava_power_medium(source: AiSourceType) -> AiSource:
|
|
44
|
+
if source == AiSourceType.OLLAMA_SERVER:
|
|
45
|
+
return AiSource(ollama_name="llava:13b")
|
|
46
|
+
if source == AiSourceType.LOCAL_LLAMACPP:
|
|
47
|
+
return AiSource(local_name=AiModels.llava_15_7b_name, hg_files=AiModels.llava_15_7b_bundle)
|
|
48
|
+
raise Exception("No model found for the given power and method.")
|
|
49
|
+
|
|
50
|
+
@staticmethod
|
|
51
|
+
def get_llava_power_high(source: AiSourceType) -> AiSource:
|
|
52
|
+
if source == AiSourceType.OLLAMA_SERVER:
|
|
53
|
+
return AiSource(ollama_name="llava:13b")
|
|
54
|
+
if source == AiSourceType.LOCAL_LLAMACPP:
|
|
55
|
+
return AiSource(local_name=AiModels.llava_15_7b_name, hg_files=AiModels.llava_15_7b_bundle)
|
|
56
|
+
raise Exception("No model found for the given power and method.")
|
|
57
|
+
|
|
58
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class AiPower(Enum):
|
|
5
|
+
LOW = "Low"
|
|
6
|
+
MEDIUM = "Medium"
|
|
7
|
+
HIGH = "High"
|
|
8
|
+
|
|
9
|
+
@staticmethod
|
|
10
|
+
def get_llava_from_power(ai_power: str) -> str:
|
|
11
|
+
if ai_power == AiPower.HIGH.value:
|
|
12
|
+
return "llava:13b"
|
|
13
|
+
elif ai_power == AiPower.MEDIUM.value:
|
|
14
|
+
return "llava:13b"
|
|
15
|
+
else:
|
|
16
|
+
return "llava:7b"
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
prompt_llava_1 = """
|
|
4
|
+
Analyze the image thoroughly and provide a detailed description of every visible element. Return a json including the following information:
|
|
5
|
+
- "description": a detailed description of the image (minimum 15-20 words), considering colors, objects, actions, and any other relevant details.
|
|
6
|
+
- "tags": a list of tags that describe the image. Include specific objects, actions, locations, and any discernible themes. (minimum 5 maximum 10 tags)
|
|
7
|
+
- "text": a list of all the text found in the image (if any).
|
|
8
|
+
- "filename": phrase that summarizes the image content (maximum 30 characters).
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
prompt_llava_detailed_STEP1 = """
|
|
12
|
+
Analyze the image thoroughly and provide a detailed description of every visible element.
|
|
13
|
+
If there are people, try to recognize them. If there are objects, try to identify them.
|
|
14
|
+
If the are texts, try to read them.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AiPrompt(Enum):
|
|
19
|
+
LLAVA_JSON = prompt_llava_1
|
|
20
|
+
LLAVA_DETAILED = prompt_llava_detailed_STEP1
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
|
|
2
|
+
class AiScanSettings:
|
|
3
|
+
|
|
4
|
+
def __init__(
|
|
5
|
+
self,
|
|
6
|
+
ai_tags: bool = False,
|
|
7
|
+
ai_file_metadata: bool = False,
|
|
8
|
+
ai_comment: bool = False,
|
|
9
|
+
ai_rename: bool = False,
|
|
10
|
+
ai_ocr: bool = False,
|
|
11
|
+
):
|
|
12
|
+
self.ai_tags = ai_tags
|
|
13
|
+
self.ai_file_metadata = ai_file_metadata
|
|
14
|
+
self.ai_comment = ai_comment
|
|
15
|
+
self.ai_rename = ai_rename
|
|
16
|
+
self.ai_ocr = ai_ocr
|
|
17
|
+
|
|
18
|
+
def set_all_true(self):
|
|
19
|
+
self.ai_tags = True
|
|
20
|
+
self.ai_file_metadata = True
|
|
21
|
+
self.ai_comment = True
|
|
22
|
+
self.ai_rename = True
|
|
23
|
+
self.ai_ocr = True
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from ai.core.ai_method import AiMethod
|
|
2
|
+
from ai.core.ai_model_list import AiModelList
|
|
3
|
+
from ai.core.ai_models import AiModels
|
|
4
|
+
from ai.core.ai_power import AiPower
|
|
5
|
+
from ai.core.ai_prompts import AiPrompt
|
|
6
|
+
from ai.core.ai_scan_settings import AiScanSettings
|
|
7
|
+
from ai.core.ai_source import AiSource
|
|
8
|
+
from ai.core.ai_source_type import AiSourceType
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AiSettings:
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
model: AiModelList,
|
|
15
|
+
source_type: AiSourceType,
|
|
16
|
+
power: AiPower,
|
|
17
|
+
prompt: AiPrompt,
|
|
18
|
+
remote_url: str | None = None,
|
|
19
|
+
scan_settings: AiScanSettings | None = None
|
|
20
|
+
):
|
|
21
|
+
self.source: AiSource | None = None
|
|
22
|
+
self.method = None
|
|
23
|
+
|
|
24
|
+
self.model = model
|
|
25
|
+
self.source_type = source_type
|
|
26
|
+
self.remote_url = remote_url
|
|
27
|
+
self.power = power
|
|
28
|
+
self.prompt = prompt
|
|
29
|
+
self.scan_settings = scan_settings
|
|
30
|
+
|
|
31
|
+
self.check()
|
|
32
|
+
self.setup()
|
|
33
|
+
|
|
34
|
+
def __setup_llava(self):
|
|
35
|
+
if self.source_type == AiSourceType.OLLAMA_SERVER and self.prompt == AiPrompt.LLAVA_JSON:
|
|
36
|
+
self.method = AiMethod.LLAVA_OLLAMA_JSON
|
|
37
|
+
elif self.source_type == AiSourceType.LOCAL_LLAMACPP and self.prompt == AiPrompt.LLAVA_JSON:
|
|
38
|
+
self.method = AiMethod.LLAVA_LLAMACPP_JSON
|
|
39
|
+
else:
|
|
40
|
+
raise NotImplementedError("This combination of source and prompt is not implemented.")
|
|
41
|
+
self.source = AiModels.Llava.get_llava(self.power, self.source_type)
|
|
42
|
+
|
|
43
|
+
def setup(self):
|
|
44
|
+
if self.model == AiModelList.LLAVA:
|
|
45
|
+
self.__setup_llava()
|
|
46
|
+
|
|
47
|
+
def check(self):
|
|
48
|
+
if self.source_type == AiSourceType.OLLAMA_SERVER and self.remote_url is None:
|
|
49
|
+
raise ValueError("Remote URL is required for Ollama Server.")
|
|
50
|
+
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
from ai.core.hg_file import HgFile
|
|
4
|
+
from model.file_type import FileType
|
|
5
|
+
from network import netutils
|
|
6
|
+
from util import unitutils
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AiSource:
|
|
10
|
+
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
ollama_name: str | None = None,
|
|
14
|
+
local_name: str | None = None,
|
|
15
|
+
url: str | None = None,
|
|
16
|
+
hg_files: List[HgFile] | None = None,
|
|
17
|
+
):
|
|
18
|
+
self.url = url
|
|
19
|
+
self.hg_files = hg_files
|
|
20
|
+
self.ollama_name = ollama_name
|
|
21
|
+
self.local_name = local_name
|
|
22
|
+
|
|
23
|
+
def get_ggml_file(self) -> HgFile:
|
|
24
|
+
for hg_file in self.hg_files:
|
|
25
|
+
if hg_file.file_type == FileType.HG_GGML:
|
|
26
|
+
return hg_file
|
|
27
|
+
raise Exception("No ggml file found in the source.")
|
|
28
|
+
|
|
29
|
+
def get_mmproj_file(self) -> HgFile:
|
|
30
|
+
for hg_file in self.hg_files:
|
|
31
|
+
if hg_file.file_type == FileType.HG_MMPROJ:
|
|
32
|
+
return hg_file
|
|
33
|
+
raise Exception("No mmproj file found in the source.")
|
|
34
|
+
|
|
35
|
+
def get_files_size_mb(self) -> float:
|
|
36
|
+
total = 0.0
|
|
37
|
+
for hg_file in self.hg_files:
|
|
38
|
+
size_byte = hg_file.get_file_size_byte()
|
|
39
|
+
size_mb = unitutils.convert_byte_to_mb(size_byte)
|
|
40
|
+
total += size_mb
|
|
41
|
+
return total
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
from model.file_type import FileType
|
|
4
|
+
from network import netutils
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class HgFile:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
file_name: str,
|
|
11
|
+
url: str,
|
|
12
|
+
file_type: FileType
|
|
13
|
+
):
|
|
14
|
+
self.file_name = file_name
|
|
15
|
+
self.url = url
|
|
16
|
+
self.file_type = file_type
|
|
17
|
+
|
|
18
|
+
def get_file_size_byte(self) -> int:
|
|
19
|
+
return netutils.get_file_size_byte(self.url)
|
|
20
|
+
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import subprocess
|
|
3
|
+
from typing import Callable
|
|
4
|
+
|
|
5
|
+
from git import Repo
|
|
6
|
+
|
|
7
|
+
from ai.core.ai_method import AiMethod
|
|
8
|
+
from ai.core.ai_models import AiModels
|
|
9
|
+
from ai.core.ai_power import AiPower
|
|
10
|
+
from ai.core.ai_source import AiSource
|
|
11
|
+
from ai.core.ai_source_type import AiSourceType
|
|
12
|
+
from model.operation import Operation
|
|
13
|
+
from util import pathutils, osutils, fileutils, datautils
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# noinspection PyMethodMayBeStatic
|
|
17
|
+
class LlamaCpp:
|
|
18
|
+
|
|
19
|
+
GITHUB_URL = "https://github.com/ggerganov/llama.cpp.git"
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
# path_install: str = os.path.join(PylizDir.get_ai_folder(), "llama.cpp"),
|
|
24
|
+
# path_models: str = PylizDir.get_models_folder(),
|
|
25
|
+
# path_logs: str = os.path.join(PylizDir.get_logs_path(), "llama.cpp")
|
|
26
|
+
path_install: str,
|
|
27
|
+
path_models: str,
|
|
28
|
+
path_logs: str
|
|
29
|
+
):
|
|
30
|
+
# Init paths
|
|
31
|
+
self.path_install = path_install
|
|
32
|
+
self.path_models = path_models
|
|
33
|
+
self.path_logs = path_logs
|
|
34
|
+
self.log_build_folder = os.path.join(self.path_logs, "build")
|
|
35
|
+
pathutils.check_path(self.log_build_folder, True)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def __clone_repo(self, on_log: Callable[[str], None] = lambda x: None):
|
|
39
|
+
on_log("Cloning LlamaCpp...")
|
|
40
|
+
# check if the folder already exists
|
|
41
|
+
if os.path.exists(self.path_install):
|
|
42
|
+
on_log("LlamaCpp already installed.")
|
|
43
|
+
return
|
|
44
|
+
else:
|
|
45
|
+
on_log("LlamaCpp not installed. Proceeding...")
|
|
46
|
+
pathutils.check_path(self.path_install, True)
|
|
47
|
+
# checking folder
|
|
48
|
+
pathutils.check_path_dir(self.path_install)
|
|
49
|
+
# Cloning github repo
|
|
50
|
+
Repo.clone_from(LlamaCpp.GITHUB_URL, self.path_install)
|
|
51
|
+
on_log("Clone successful.")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def __check_requirements(self, on_log: Callable[[str], None] = lambda x: None):
|
|
55
|
+
on_log("Checking requirements...")
|
|
56
|
+
make_ok = osutils.is_command_available("make")
|
|
57
|
+
is_os_unix = osutils.is_os_unix()
|
|
58
|
+
if not make_ok:
|
|
59
|
+
raise Exception("Make command not available. Please install make.")
|
|
60
|
+
if not is_os_unix:
|
|
61
|
+
raise Exception("This component (LlamaCPP) is only available on Unix systems.")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def __build_sources(self, on_log: Callable[[str], None] = lambda x: None):
|
|
65
|
+
on_log("Building sources...")
|
|
66
|
+
risultato = subprocess.run(["make"], check=True, cwd=self.path_install, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
|
67
|
+
on_log("Build successful.")
|
|
68
|
+
return risultato
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def __crete_build_log(self, risultato, ):
|
|
72
|
+
log_build_name = datautils.gen_timestamp_log_name("llamacpp-", ".txt")
|
|
73
|
+
log_build_path = os.path.join(self.log_build_folder, log_build_name)
|
|
74
|
+
with open(log_build_path, "w") as f:
|
|
75
|
+
f.write(risultato.stdout)
|
|
76
|
+
f.write("***********************************\n")
|
|
77
|
+
f.write(risultato.stderr)
|
|
78
|
+
|
|
79
|
+
def __download_llava_local_models(
|
|
80
|
+
self,
|
|
81
|
+
source: AiSource,
|
|
82
|
+
folder: str,
|
|
83
|
+
on_log: Callable[[str], None] = lambda x: None,
|
|
84
|
+
on_progress: Callable[[int], None] = lambda x: None
|
|
85
|
+
):
|
|
86
|
+
on_log("LLava require " + str(len(source.hg_files)) + " files to download.")
|
|
87
|
+
for hg_file in source.hg_files:
|
|
88
|
+
current_file = os.path.join(folder, hg_file.file_name)
|
|
89
|
+
already_exist = os.path.exists(current_file)
|
|
90
|
+
if already_exist:
|
|
91
|
+
on_log("Model " + hg_file.file_name + " already installed.")
|
|
92
|
+
continue
|
|
93
|
+
on_log("Downloading model " + hg_file.file_name + " from Huggingface...")
|
|
94
|
+
op = fileutils.download_file(hg_file.url, current_file, on_progress)
|
|
95
|
+
if op.status is False:
|
|
96
|
+
raise Exception("Error downloading model: " + op.error)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def clone_and_build(self, on_log: Callable[[str], None] = lambda x: None):
|
|
100
|
+
self.__clone_repo(on_log)
|
|
101
|
+
self.__check_requirements(on_log)
|
|
102
|
+
build = self.__build_sources(on_log)
|
|
103
|
+
self.__crete_build_log(build)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def install_llava(
|
|
107
|
+
self,
|
|
108
|
+
power: AiPower,
|
|
109
|
+
on_log: Callable[[str], None] = lambda x: None,
|
|
110
|
+
on_progress: Callable[[int], None] = lambda x: None
|
|
111
|
+
):
|
|
112
|
+
self.clone_and_build(on_log)
|
|
113
|
+
on_log("Installing LLava...")
|
|
114
|
+
# creating and checking files/folders
|
|
115
|
+
source = AiModels.Llava.get_llava(power, AiSourceType.LOCAL_LLAMACPP)
|
|
116
|
+
folder = os.path.join(self.path_models, source.local_name)
|
|
117
|
+
pathutils.check_path(folder, True)
|
|
118
|
+
pathutils.check_path_dir(folder)
|
|
119
|
+
# Checking available space
|
|
120
|
+
models_size = source.get_files_size_mb()
|
|
121
|
+
has_space = osutils.has_disk_free_space(folder, models_size)
|
|
122
|
+
if not has_space:
|
|
123
|
+
raise Exception("Not enough free space to install LLava.")
|
|
124
|
+
# Downloading files
|
|
125
|
+
self.__download_llava_local_models(source, folder, on_log, on_progress)
|
|
126
|
+
on_log("LLava model installed.")
|
|
127
|
+
|
|
128
|
+
def run_llava(
|
|
129
|
+
self,
|
|
130
|
+
power: AiPower,
|
|
131
|
+
image_path: str,
|
|
132
|
+
prompt: str,
|
|
133
|
+
) -> Operation[str]:
|
|
134
|
+
try:
|
|
135
|
+
# Creating variables and checking requirements
|
|
136
|
+
source = AiModels.Llava.get_llava(power, AiSourceType.LOCAL_LLAMACPP)
|
|
137
|
+
folder = os.path.join(self.path_models, source.local_name)
|
|
138
|
+
if not os.path.exists(folder):
|
|
139
|
+
raise Exception("LLava model not installed.")
|
|
140
|
+
# Run the model
|
|
141
|
+
path_model_file = os.path.join(self.path_models, source.local_name, source.get_ggml_file().file_name)
|
|
142
|
+
path_mmproj_file = os.path.join(self.path_models, source.local_name, source.get_mmproj_file().file_name)
|
|
143
|
+
command = ["./llama-llava-cli", "-m", path_model_file, "--mmproj", path_mmproj_file, "--image", image_path, "-p", prompt ]
|
|
144
|
+
# saving and extracting the result
|
|
145
|
+
log_file = os.path.join(self.path_logs, datautils.gen_timestamp_log_name("llava-result", ".txt"))
|
|
146
|
+
with open(log_file, 'w') as file:
|
|
147
|
+
result = subprocess.run(command, cwd=self.path_install, stdout=file, stderr=subprocess.PIPE)
|
|
148
|
+
if result.returncode != 0:
|
|
149
|
+
raise Exception("Error running LLava: " + result.stderr.decode())
|
|
150
|
+
with open(log_file, 'r') as file:
|
|
151
|
+
return Operation(status=True, payload=file.read())
|
|
152
|
+
except Exception as e:
|
|
153
|
+
return Operation(status=False, error=str(e))
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from huggingface_hub import hf_hub_download
|
|
2
|
+
from llama_cpp import Llama
|
|
3
|
+
|
|
4
|
+
from util.pylizdir import PylizDir
|
|
5
|
+
|
|
6
|
+
# https://github.com/abetlen/llama-cpp-python
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LlamaCppLib:
|
|
10
|
+
|
|
11
|
+
@staticmethod
|
|
12
|
+
def run_llama3(prompt: str):
|
|
13
|
+
model_name = "Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF"
|
|
14
|
+
model_file = "Lexi-Llama-3-8B-Uncensored_Q4_K_M.gguf"
|
|
15
|
+
model_path = hf_hub_download(model_name,
|
|
16
|
+
filename=model_file,
|
|
17
|
+
local_dir=PylizDir.get_models_folder(),
|
|
18
|
+
)
|
|
19
|
+
llm = Llama(model_path, n_gpu_layers=1)
|
|
20
|
+
response = llm(prompt)
|
|
21
|
+
print(response)
|
|
22
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import moviepy.editor as mp
|
|
2
|
+
import whisper
|
|
3
|
+
|
|
4
|
+
from network.ssl import ignore_context_ssl
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def transcribe(
|
|
8
|
+
video_path: str,
|
|
9
|
+
audio_path: str,
|
|
10
|
+
model_path: str,
|
|
11
|
+
transcription_path: str | None = None
|
|
12
|
+
):
|
|
13
|
+
|
|
14
|
+
ignore_context_ssl()
|
|
15
|
+
|
|
16
|
+
# Estrarre l'audio dal video
|
|
17
|
+
video = mp.VideoFileClip(video_path)
|
|
18
|
+
video.audio.write_audiofile(audio_path)
|
|
19
|
+
|
|
20
|
+
# Caricare il modello Whisper
|
|
21
|
+
model = whisper.load_model(model_path)
|
|
22
|
+
|
|
23
|
+
# Trascrivere l'audio
|
|
24
|
+
result = model.transcribe(audio_path)
|
|
25
|
+
if transcription_path is not None:
|
|
26
|
+
# Salvare la trascrizione su un file di test
|
|
27
|
+
with open(transcription_path, 'w') as f:
|
|
28
|
+
f.write(result["text"])
|
|
29
|
+
return result
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Callable
|
|
3
|
+
|
|
4
|
+
import ollama
|
|
5
|
+
from ollama import Client
|
|
6
|
+
|
|
7
|
+
from network.netres import NetResponse
|
|
8
|
+
from network.netutils import exec_get, exec_post
|
|
9
|
+
|
|
10
|
+
OLLAMA_PORT = "11434"
|
|
11
|
+
OLLAMA_HTTP_LOCALHOST_URL = "http://localhost:" + OLLAMA_PORT
|
|
12
|
+
|
|
13
|
+
# https://github.com/ollama/ollama-python
|
|
14
|
+
# https://github.com/ollama/ollama/blob/main/docs/api.md
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class OllamaApiLegacy:
|
|
18
|
+
def __init__(self):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def check_ollama_status(url) -> NetResponse:
|
|
23
|
+
return exec_get(url)
|
|
24
|
+
|
|
25
|
+
@staticmethod
|
|
26
|
+
def get_installed_models(url) -> NetResponse:
|
|
27
|
+
api_url = url + "/api/tags"
|
|
28
|
+
return exec_get(api_url)
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def send_query(
|
|
32
|
+
url: str,
|
|
33
|
+
prompt: str,
|
|
34
|
+
model_name: str,
|
|
35
|
+
) -> NetResponse:
|
|
36
|
+
api_url = url + "/api/generate"
|
|
37
|
+
payload = {
|
|
38
|
+
"model": model_name,
|
|
39
|
+
"prompt": prompt,
|
|
40
|
+
"stream": False
|
|
41
|
+
}
|
|
42
|
+
return exec_post(api_url, payload, False)
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def send_llava_query(
|
|
46
|
+
url: str,
|
|
47
|
+
prompt: str,
|
|
48
|
+
image_base_64: str,
|
|
49
|
+
model_name: str,
|
|
50
|
+
) -> NetResponse:
|
|
51
|
+
api_url = url + "/api/generate"
|
|
52
|
+
payload = {
|
|
53
|
+
"model": model_name,
|
|
54
|
+
"prompt": prompt,
|
|
55
|
+
"images": [image_base_64],
|
|
56
|
+
"format": "json",
|
|
57
|
+
"stream": False
|
|
58
|
+
}
|
|
59
|
+
return exec_post(api_url, payload, False)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class Ollamapi:
|
|
63
|
+
|
|
64
|
+
def __init__(self, url: str):
|
|
65
|
+
self.url = url
|
|
66
|
+
self.client = ollama.client = Client(host=url)
|
|
67
|
+
|
|
68
|
+
def get_models_list(self):
|
|
69
|
+
mappings = self.client.list()
|
|
70
|
+
json_str = json.dumps(mappings)
|
|
71
|
+
return json_str
|
|
72
|
+
|
|
73
|
+
def download_model(self, name: str, en_stream: bool, callback: Callable[[str], None] | None = None):
|
|
74
|
+
stream = self.client.pull(
|
|
75
|
+
model='name',
|
|
76
|
+
stream=en_stream,
|
|
77
|
+
)
|
|
78
|
+
if en_stream:
|
|
79
|
+
for data in stream:
|
|
80
|
+
if callback:
|
|
81
|
+
callback(data["status"])
|
|
File without changes
|