pygpt-net 2.4.41__py3-none-any.whl → 2.4.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +13 -0
- README.md +142 -70
- pygpt_net/CHANGELOG.txt +13 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +3 -1
- pygpt_net/controller/audio/__init__.py +2 -2
- pygpt_net/controller/camera.py +1 -10
- pygpt_net/controller/chat/attachment.py +36 -23
- pygpt_net/controller/chat/audio.py +2 -2
- pygpt_net/controller/config/placeholder.py +15 -1
- pygpt_net/controller/ui/mode.py +16 -21
- pygpt_net/core/attachments/__init__.py +1 -1
- pygpt_net/core/attachments/context.py +10 -8
- pygpt_net/core/audio/__init__.py +4 -1
- pygpt_net/core/audio/whisper.py +37 -0
- pygpt_net/core/bridge/worker.py +2 -2
- pygpt_net/core/db/__init__.py +2 -1
- pygpt_net/core/debug/events.py +22 -10
- pygpt_net/core/debug/tabs.py +6 -3
- pygpt_net/core/history.py +3 -2
- pygpt_net/core/idx/__init__.py +16 -4
- pygpt_net/core/idx/chat.py +15 -5
- pygpt_net/core/idx/indexing.py +24 -8
- pygpt_net/core/installer.py +2 -4
- pygpt_net/core/models.py +62 -17
- pygpt_net/core/modes.py +11 -13
- pygpt_net/core/notepad.py +4 -4
- pygpt_net/core/plugins.py +27 -16
- pygpt_net/core/presets.py +20 -9
- pygpt_net/core/profile.py +11 -3
- pygpt_net/core/render/web/parser.py +3 -1
- pygpt_net/core/settings.py +5 -5
- pygpt_net/core/tabs/tab.py +10 -2
- pygpt_net/core/tokens.py +8 -6
- pygpt_net/core/web/__init__.py +105 -0
- pygpt_net/core/{web.py → web/helpers.py} +93 -67
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/modes.json +3 -3
- pygpt_net/data/locale/locale.en.ini +1 -0
- pygpt_net/data/locale/plugin.cmd_web.en.ini +2 -0
- pygpt_net/data/locale/plugin.mailer.en.ini +21 -0
- pygpt_net/item/ctx.py +66 -3
- pygpt_net/migrations/Version20241215110000.py +25 -0
- pygpt_net/migrations/__init__.py +3 -1
- pygpt_net/plugin/agent/__init__.py +7 -2
- pygpt_net/plugin/audio_output/__init__.py +6 -1
- pygpt_net/plugin/base/plugin.py +58 -26
- pygpt_net/plugin/base/worker.py +20 -17
- pygpt_net/plugin/cmd_history/config.py +2 -2
- pygpt_net/plugin/cmd_web/__init__.py +3 -4
- pygpt_net/plugin/cmd_web/config.py +71 -3
- pygpt_net/plugin/cmd_web/websearch.py +20 -12
- pygpt_net/plugin/cmd_web/worker.py +67 -4
- pygpt_net/plugin/idx_llama_index/config.py +3 -3
- pygpt_net/plugin/mailer/__init__.py +123 -0
- pygpt_net/plugin/mailer/config.py +149 -0
- pygpt_net/plugin/mailer/runner.py +285 -0
- pygpt_net/plugin/mailer/worker.py +123 -0
- pygpt_net/provider/agents/base.py +5 -2
- pygpt_net/provider/agents/openai.py +4 -2
- pygpt_net/provider/agents/openai_assistant.py +4 -2
- pygpt_net/provider/agents/planner.py +4 -2
- pygpt_net/provider/agents/react.py +4 -2
- pygpt_net/provider/audio_output/openai_tts.py +5 -11
- pygpt_net/provider/core/assistant/base.py +5 -3
- pygpt_net/provider/core/assistant/json_file.py +8 -5
- pygpt_net/provider/core/assistant_file/base.py +4 -3
- pygpt_net/provider/core/assistant_file/db_sqlite/__init__.py +4 -3
- pygpt_net/provider/core/assistant_file/db_sqlite/storage.py +3 -2
- pygpt_net/provider/core/assistant_store/base.py +6 -4
- pygpt_net/provider/core/assistant_store/db_sqlite/__init__.py +5 -4
- pygpt_net/provider/core/assistant_store/db_sqlite/storage.py +5 -3
- pygpt_net/provider/core/attachment/base.py +5 -3
- pygpt_net/provider/core/attachment/json_file.py +4 -3
- pygpt_net/provider/core/calendar/base.py +5 -3
- pygpt_net/provider/core/calendar/db_sqlite/__init__.py +6 -5
- pygpt_net/provider/core/calendar/db_sqlite/storage.py +5 -4
- pygpt_net/provider/core/config/base.py +8 -6
- pygpt_net/provider/core/config/json_file.py +9 -7
- pygpt_net/provider/core/ctx/base.py +27 -25
- pygpt_net/provider/core/ctx/db_sqlite/__init__.py +51 -35
- pygpt_net/provider/core/ctx/db_sqlite/storage.py +92 -38
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +37 -11
- pygpt_net/provider/core/index/base.py +129 -23
- pygpt_net/provider/core/index/db_sqlite/__init__.py +130 -23
- pygpt_net/provider/core/index/db_sqlite/storage.py +130 -23
- pygpt_net/provider/core/index/db_sqlite/utils.py +4 -2
- pygpt_net/provider/core/mode/base.py +5 -3
- pygpt_net/provider/core/mode/json_file.py +7 -6
- pygpt_net/provider/core/model/base.py +6 -4
- pygpt_net/provider/core/model/json_file.py +9 -7
- pygpt_net/provider/core/notepad/base.py +5 -3
- pygpt_net/provider/core/notepad/db_sqlite/__init__.py +5 -4
- pygpt_net/provider/core/notepad/db_sqlite/storage.py +4 -3
- pygpt_net/provider/core/plugin_preset/base.py +4 -2
- pygpt_net/provider/core/plugin_preset/json_file.py +5 -3
- pygpt_net/provider/core/preset/base.py +6 -4
- pygpt_net/provider/core/preset/json_file.py +9 -9
- pygpt_net/provider/core/prompt/base.py +6 -3
- pygpt_net/provider/core/prompt/json_file.py +11 -6
- pygpt_net/provider/gpt/assistants.py +15 -6
- pygpt_net/provider/gpt/audio.py +5 -5
- pygpt_net/provider/gpt/chat.py +7 -5
- pygpt_net/provider/gpt/completion.py +8 -4
- pygpt_net/provider/gpt/image.py +3 -3
- pygpt_net/provider/gpt/store.py +46 -12
- pygpt_net/provider/gpt/vision.py +16 -11
- pygpt_net/provider/llms/anthropic.py +7 -2
- pygpt_net/provider/llms/azure_openai.py +26 -5
- pygpt_net/provider/llms/base.py +47 -9
- pygpt_net/provider/llms/google.py +7 -2
- pygpt_net/provider/llms/hugging_face.py +13 -3
- pygpt_net/provider/llms/hugging_face_api.py +18 -4
- pygpt_net/provider/llms/local.py +7 -2
- pygpt_net/provider/llms/ollama.py +30 -6
- pygpt_net/provider/llms/openai.py +32 -6
- pygpt_net/provider/vector_stores/__init__.py +45 -14
- pygpt_net/provider/vector_stores/base.py +35 -8
- pygpt_net/provider/vector_stores/chroma.py +13 -3
- pygpt_net/provider/vector_stores/ctx_attachment.py +32 -13
- pygpt_net/provider/vector_stores/elasticsearch.py +12 -3
- pygpt_net/provider/vector_stores/pinecode.py +12 -3
- pygpt_net/provider/vector_stores/redis.py +12 -3
- pygpt_net/provider/vector_stores/simple.py +12 -3
- pygpt_net/provider/vector_stores/temp.py +16 -4
- pygpt_net/provider/web/base.py +10 -3
- pygpt_net/provider/web/google_custom_search.py +9 -3
- pygpt_net/provider/web/microsoft_bing.py +9 -3
- pygpt_net/tools/__init__.py +13 -5
- pygpt_net/tools/audio_transcriber/__init__.py +4 -3
- pygpt_net/tools/base.py +15 -8
- pygpt_net/tools/code_interpreter/__init__.py +4 -3
- pygpt_net/tools/html_canvas/__init__.py +4 -3
- pygpt_net/tools/image_viewer/__init__.py +10 -4
- pygpt_net/tools/indexer/__init__.py +8 -7
- pygpt_net/tools/media_player/__init__.py +4 -3
- pygpt_net/tools/text_editor/__init__.py +36 -10
- pygpt_net/ui/layout/chat/output.py +2 -2
- pygpt_net/ui/layout/ctx/ctx_list.py +1 -1
- pygpt_net/ui/menu/audio.py +12 -1
- {pygpt_net-2.4.41.dist-info → pygpt_net-2.4.43.dist-info}/METADATA +143 -71
- {pygpt_net-2.4.41.dist-info → pygpt_net-2.4.43.dist-info}/RECORD +146 -138
- {pygpt_net-2.4.41.dist-info → pygpt_net-2.4.43.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.41.dist-info → pygpt_net-2.4.43.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.41.dist-info → pygpt_net-2.4.43.dist-info}/entry_points.txt +0 -0
pygpt_net/provider/gpt/image.py
CHANGED
@@ -6,12 +6,12 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.14
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
13
13
|
import os
|
14
|
-
from typing import Optional
|
14
|
+
from typing import Optional, Dict, Any
|
15
15
|
|
16
16
|
import requests
|
17
17
|
|
@@ -36,7 +36,7 @@ class Image:
|
|
36
36
|
def generate(
|
37
37
|
self,
|
38
38
|
context: BridgeContext,
|
39
|
-
extra: Optional[
|
39
|
+
extra: Optional[Dict[str, Any]] = None,
|
40
40
|
sync: bool = True
|
41
41
|
):
|
42
42
|
"""
|
pygpt_net/provider/gpt/store.py
CHANGED
@@ -6,11 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.14
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
13
|
-
from typing import Optional
|
13
|
+
from typing import Optional, List
|
14
14
|
|
15
15
|
from pygpt_net.item.assistant import AssistantStoreItem
|
16
16
|
|
@@ -32,7 +32,11 @@ class Store:
|
|
32
32
|
"""
|
33
33
|
return self.window.core.gpt.get_client()
|
34
34
|
|
35
|
-
def log(
|
35
|
+
def log(
|
36
|
+
self,
|
37
|
+
msg: str,
|
38
|
+
callback: Optional[callable] = None
|
39
|
+
):
|
36
40
|
"""
|
37
41
|
Log message
|
38
42
|
|
@@ -133,7 +137,7 @@ class Store:
|
|
133
137
|
)
|
134
138
|
return items
|
135
139
|
|
136
|
-
def get_files_ids(self) ->
|
140
|
+
def get_files_ids(self) -> List[str]:
|
137
141
|
"""
|
138
142
|
Get all files IDs
|
139
143
|
|
@@ -149,7 +153,10 @@ class Store:
|
|
149
153
|
items.append(id)
|
150
154
|
return items
|
151
155
|
|
152
|
-
def remove_files(
|
156
|
+
def remove_files(
|
157
|
+
self,
|
158
|
+
callback: Optional[callable] = None
|
159
|
+
) -> int:
|
153
160
|
"""
|
154
161
|
Remove all files
|
155
162
|
|
@@ -168,7 +175,11 @@ class Store:
|
|
168
175
|
self.log(msg, callback)
|
169
176
|
return num
|
170
177
|
|
171
|
-
def remove_store_files(
|
178
|
+
def remove_store_files(
|
179
|
+
self,
|
180
|
+
store_id: str,
|
181
|
+
callback: Optional[callable] = None
|
182
|
+
) -> int:
|
172
183
|
"""
|
173
184
|
Remove all files from store
|
174
185
|
|
@@ -240,7 +251,11 @@ class Store:
|
|
240
251
|
)
|
241
252
|
return items
|
242
253
|
|
243
|
-
def create_store(
|
254
|
+
def create_store(
|
255
|
+
self,
|
256
|
+
name: str,
|
257
|
+
expire_days: int = 0
|
258
|
+
):
|
244
259
|
"""
|
245
260
|
Create vector store
|
246
261
|
|
@@ -262,7 +277,12 @@ class Store:
|
|
262
277
|
if vector_store is not None:
|
263
278
|
return vector_store
|
264
279
|
|
265
|
-
def update_store(
|
280
|
+
def update_store(
|
281
|
+
self,
|
282
|
+
id: str,
|
283
|
+
name: str,
|
284
|
+
expire_days: int = 0
|
285
|
+
):
|
266
286
|
"""
|
267
287
|
Update vector store
|
268
288
|
|
@@ -427,7 +447,10 @@ class Store:
|
|
427
447
|
num += 1
|
428
448
|
return num
|
429
449
|
|
430
|
-
def remove_all(
|
450
|
+
def remove_all(
|
451
|
+
self,
|
452
|
+
callback: Optional[callable] = None
|
453
|
+
) -> int:
|
431
454
|
"""
|
432
455
|
Remove all vector stores
|
433
456
|
|
@@ -446,7 +469,11 @@ class Store:
|
|
446
469
|
self.log(msg, callback)
|
447
470
|
return num
|
448
471
|
|
449
|
-
def add_file(
|
472
|
+
def add_file(
|
473
|
+
self,
|
474
|
+
store_id: str,
|
475
|
+
file_id: str
|
476
|
+
):
|
450
477
|
"""
|
451
478
|
Add file to vector store
|
452
479
|
|
@@ -480,7 +507,11 @@ class Store:
|
|
480
507
|
if deleted_file is not None:
|
481
508
|
return deleted_file.id
|
482
509
|
|
483
|
-
def delete_store_file(
|
510
|
+
def delete_store_file(
|
511
|
+
self,
|
512
|
+
store_id: str,
|
513
|
+
file_id: str
|
514
|
+
):
|
484
515
|
"""
|
485
516
|
Delete file from vector store
|
486
517
|
|
@@ -496,7 +527,10 @@ class Store:
|
|
496
527
|
if vector_store_file is not None:
|
497
528
|
return vector_store_file
|
498
529
|
|
499
|
-
def import_stores_files(
|
530
|
+
def import_stores_files(
|
531
|
+
self,
|
532
|
+
callback: Optional[callable] = None
|
533
|
+
) -> int:
|
500
534
|
"""
|
501
535
|
Import all vector stores files
|
502
536
|
|
pygpt_net/provider/gpt/vision.py
CHANGED
@@ -6,18 +6,19 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.14
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import base64
|
13
13
|
import os
|
14
14
|
import re
|
15
|
-
from typing import Optional, Union
|
15
|
+
from typing import Optional, Union, Dict, Any, List
|
16
16
|
|
17
17
|
from pygpt_net.core.types import (
|
18
18
|
MODE_VISION,
|
19
19
|
)
|
20
20
|
from pygpt_net.core.bridge.context import BridgeContext
|
21
|
+
from pygpt_net.item.attachment import AttachmentItem
|
21
22
|
from pygpt_net.item.ctx import CtxItem
|
22
23
|
from pygpt_net.item.model import ModelItem
|
23
24
|
|
@@ -34,7 +35,11 @@ class Vision:
|
|
34
35
|
self.urls = []
|
35
36
|
self.input_tokens = 0
|
36
37
|
|
37
|
-
def send(
|
38
|
+
def send(
|
39
|
+
self,
|
40
|
+
context: BridgeContext,
|
41
|
+
extra: Optional[Dict[str, Any]] = None
|
42
|
+
):
|
38
43
|
"""
|
39
44
|
Call OpenAI API for chat with vision
|
40
45
|
|
@@ -78,9 +83,9 @@ class Vision:
|
|
78
83
|
prompt: str,
|
79
84
|
system_prompt: str,
|
80
85
|
model: ModelItem,
|
81
|
-
history: Optional[
|
82
|
-
attachments: Optional[
|
83
|
-
) ->
|
86
|
+
history: Optional[List[CtxItem]] = None,
|
87
|
+
attachments: Optional[Dict[str, AttachmentItem]] = None,
|
88
|
+
) -> List[dict]:
|
84
89
|
"""
|
85
90
|
Build chat messages list
|
86
91
|
|
@@ -162,8 +167,8 @@ class Vision:
|
|
162
167
|
def build_content(
|
163
168
|
self,
|
164
169
|
content: Union[str, list],
|
165
|
-
attachments: Optional[
|
166
|
-
) ->
|
170
|
+
attachments: Optional[Dict[str, AttachmentItem]] = None,
|
171
|
+
) -> List[dict]:
|
167
172
|
"""
|
168
173
|
Build vision content
|
169
174
|
|
@@ -219,7 +224,7 @@ class Vision:
|
|
219
224
|
|
220
225
|
return content
|
221
226
|
|
222
|
-
def extract_urls(self, text: str) ->
|
227
|
+
def extract_urls(self, text: str) -> List[str]:
|
223
228
|
"""
|
224
229
|
Extract img urls from text
|
225
230
|
|
@@ -261,7 +266,7 @@ class Vision:
|
|
261
266
|
"""Reset input tokens counter"""
|
262
267
|
self.input_tokens = 0
|
263
268
|
|
264
|
-
def get_attachments(self) ->
|
269
|
+
def get_attachments(self) -> Dict[str, str]:
|
265
270
|
"""
|
266
271
|
Get attachments
|
267
272
|
|
@@ -269,7 +274,7 @@ class Vision:
|
|
269
274
|
"""
|
270
275
|
return self.attachments
|
271
276
|
|
272
|
-
def get_urls(self) ->
|
277
|
+
def get_urls(self) -> List[str]:
|
273
278
|
"""
|
274
279
|
Get urls
|
275
280
|
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from llama_index.llms.anthropic import Anthropic
|
@@ -32,7 +32,12 @@ class AnthropicLLM(BaseLLM):
|
|
32
32
|
self.id = "anthropic"
|
33
33
|
self.type = [MODE_LLAMA_INDEX]
|
34
34
|
|
35
|
-
def llama(
|
35
|
+
def llama(
|
36
|
+
self,
|
37
|
+
window,
|
38
|
+
model: ModelItem,
|
39
|
+
stream: bool = False
|
40
|
+
) -> LlamaBaseLLM:
|
36
41
|
"""
|
37
42
|
Return LLM provider instance for llama
|
38
43
|
|
@@ -6,9 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
|
+
from typing import Optional, List, Dict
|
13
|
+
|
12
14
|
from langchain_openai import AzureOpenAI
|
13
15
|
from langchain_openai import AzureChatOpenAI
|
14
16
|
|
@@ -39,7 +41,12 @@ class AzureOpenAILLM(BaseLLM):
|
|
39
41
|
self.id = "azure_openai"
|
40
42
|
self.type = [MODE_LANGCHAIN, MODE_LLAMA_INDEX, "embeddings"]
|
41
43
|
|
42
|
-
def completion(
|
44
|
+
def completion(
|
45
|
+
self,
|
46
|
+
window,
|
47
|
+
model: ModelItem,
|
48
|
+
stream: bool = False
|
49
|
+
):
|
43
50
|
"""
|
44
51
|
Return LLM provider instance for completion
|
45
52
|
|
@@ -51,7 +58,12 @@ class AzureOpenAILLM(BaseLLM):
|
|
51
58
|
args = self.parse_args(model.langchain)
|
52
59
|
return AzureOpenAI(**args)
|
53
60
|
|
54
|
-
def chat(
|
61
|
+
def chat(
|
62
|
+
self,
|
63
|
+
window,
|
64
|
+
model: ModelItem,
|
65
|
+
stream: bool = False
|
66
|
+
):
|
55
67
|
"""
|
56
68
|
Return LLM provider instance for chat
|
57
69
|
|
@@ -63,7 +75,12 @@ class AzureOpenAILLM(BaseLLM):
|
|
63
75
|
args = self.parse_args(model.langchain)
|
64
76
|
return AzureChatOpenAI(**args)
|
65
77
|
|
66
|
-
def llama(
|
78
|
+
def llama(
|
79
|
+
self,
|
80
|
+
window,
|
81
|
+
model: ModelItem,
|
82
|
+
stream: bool = False
|
83
|
+
) -> LlamaBaseLLM:
|
67
84
|
"""
|
68
85
|
Return LLM provider instance for llama
|
69
86
|
|
@@ -75,7 +92,11 @@ class AzureOpenAILLM(BaseLLM):
|
|
75
92
|
args = self.parse_args(model.llama_index)
|
76
93
|
return LlamaAzureOpenAI(**args)
|
77
94
|
|
78
|
-
def get_embeddings_model(
|
95
|
+
def get_embeddings_model(
|
96
|
+
self,
|
97
|
+
window,
|
98
|
+
config: Optional[List[Dict]] = None
|
99
|
+
) -> BaseEmbedding:
|
79
100
|
"""
|
80
101
|
Return provider instance for embeddings
|
81
102
|
|
pygpt_net/provider/llms/base.py
CHANGED
@@ -6,10 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
13
|
+
from typing import Optional, List, Dict
|
13
14
|
|
14
15
|
from llama_index.core.base.embeddings.base import BaseEmbedding
|
15
16
|
from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
|
@@ -30,7 +31,13 @@ class BaseLLM:
|
|
30
31
|
self.type = [] # langchain, llama_index, embeddings
|
31
32
|
self.description = ""
|
32
33
|
|
33
|
-
def init(
|
34
|
+
def init(
|
35
|
+
self,
|
36
|
+
window,
|
37
|
+
model: ModelItem,
|
38
|
+
mode: str,
|
39
|
+
sub_mode: str = None
|
40
|
+
):
|
34
41
|
"""
|
35
42
|
Initialize provider
|
36
43
|
|
@@ -53,7 +60,11 @@ class BaseLLM:
|
|
53
60
|
except Exception as e:
|
54
61
|
pass
|
55
62
|
|
56
|
-
def init_embeddings(
|
63
|
+
def init_embeddings(
|
64
|
+
self,
|
65
|
+
window,
|
66
|
+
env: Optional[List[Dict]] = None
|
67
|
+
):
|
57
68
|
"""
|
58
69
|
Initialize embeddings provider
|
59
70
|
|
@@ -69,7 +80,10 @@ class BaseLLM:
|
|
69
80
|
except Exception as e:
|
70
81
|
pass
|
71
82
|
|
72
|
-
def parse_args(
|
83
|
+
def parse_args(
|
84
|
+
self,
|
85
|
+
options: dict
|
86
|
+
) -> dict:
|
73
87
|
"""
|
74
88
|
Parse extra args
|
75
89
|
|
@@ -82,7 +96,12 @@ class BaseLLM:
|
|
82
96
|
args = parse_args(options['args'])
|
83
97
|
return args
|
84
98
|
|
85
|
-
def completion(
|
99
|
+
def completion(
|
100
|
+
self,
|
101
|
+
window,
|
102
|
+
model: ModelItem,
|
103
|
+
stream: bool = False
|
104
|
+
) -> any:
|
86
105
|
"""
|
87
106
|
Return LLM provider instance for completion in langchain mode
|
88
107
|
|
@@ -93,7 +112,12 @@ class BaseLLM:
|
|
93
112
|
"""
|
94
113
|
pass
|
95
114
|
|
96
|
-
def chat(
|
115
|
+
def chat(
|
116
|
+
self,
|
117
|
+
window,
|
118
|
+
model: ModelItem,
|
119
|
+
stream: bool = False
|
120
|
+
) -> any:
|
97
121
|
"""
|
98
122
|
Return LLM provider instance for chat in langchain mode
|
99
123
|
|
@@ -104,7 +128,12 @@ class BaseLLM:
|
|
104
128
|
"""
|
105
129
|
pass
|
106
130
|
|
107
|
-
def llama(
|
131
|
+
def llama(
|
132
|
+
self,
|
133
|
+
window,
|
134
|
+
model: ModelItem,
|
135
|
+
stream: bool = False
|
136
|
+
) -> LlamaBaseLLM:
|
108
137
|
"""
|
109
138
|
Return LLM provider instance for llama index query and chat
|
110
139
|
|
@@ -115,7 +144,12 @@ class BaseLLM:
|
|
115
144
|
"""
|
116
145
|
pass
|
117
146
|
|
118
|
-
def llama_multimodal(
|
147
|
+
def llama_multimodal(
|
148
|
+
self,
|
149
|
+
window,
|
150
|
+
model: ModelItem,
|
151
|
+
stream: bool = False
|
152
|
+
) -> LlamaMultiModalLLM:
|
119
153
|
"""
|
120
154
|
Return multimodal LLM provider instance for llama
|
121
155
|
|
@@ -126,7 +160,11 @@ class BaseLLM:
|
|
126
160
|
"""
|
127
161
|
pass
|
128
162
|
|
129
|
-
def get_embeddings_model(
|
163
|
+
def get_embeddings_model(
|
164
|
+
self,
|
165
|
+
window,
|
166
|
+
config: Optional[List[Dict]] = None
|
167
|
+
) -> BaseEmbedding:
|
130
168
|
"""
|
131
169
|
Return provider instance for embeddings
|
132
170
|
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from llama_index.llms.gemini import Gemini
|
@@ -32,7 +32,12 @@ class GoogleLLM(BaseLLM):
|
|
32
32
|
self.id = "google"
|
33
33
|
self.type = [MODE_LLAMA_INDEX]
|
34
34
|
|
35
|
-
def llama(
|
35
|
+
def llama(
|
36
|
+
self,
|
37
|
+
window,
|
38
|
+
model: ModelItem,
|
39
|
+
stream: bool = False
|
40
|
+
) -> LlamaBaseLLM:
|
36
41
|
"""
|
37
42
|
Return LLM provider instance for llama
|
38
43
|
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from langchain_community.llms import HuggingFaceHub
|
@@ -24,7 +24,12 @@ class HuggingFaceLLM(BaseLLM):
|
|
24
24
|
self.id = "huggingface"
|
25
25
|
self.type = [MODE_LANGCHAIN]
|
26
26
|
|
27
|
-
def completion(
|
27
|
+
def completion(
|
28
|
+
self,
|
29
|
+
window,
|
30
|
+
model: ModelItem,
|
31
|
+
stream: bool = False
|
32
|
+
):
|
28
33
|
"""
|
29
34
|
Return LLM provider instance for completion
|
30
35
|
|
@@ -36,7 +41,12 @@ class HuggingFaceLLM(BaseLLM):
|
|
36
41
|
args = self.parse_args(model.langchain)
|
37
42
|
return HuggingFaceHub(**args)
|
38
43
|
|
39
|
-
def chat(
|
44
|
+
def chat(
|
45
|
+
self,
|
46
|
+
window,
|
47
|
+
model: ModelItem,
|
48
|
+
stream: bool = False
|
49
|
+
):
|
40
50
|
"""
|
41
51
|
Return LLM provider instance for chat
|
42
52
|
|
@@ -6,10 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
13
|
+
from typing import Optional, List, Dict
|
13
14
|
|
14
15
|
from pygpt_net.core.types import (
|
15
16
|
MODE_LLAMA_INDEX,
|
@@ -28,7 +29,12 @@ class HuggingFaceApiLLM(BaseLLM):
|
|
28
29
|
self.id = "huggingface_api"
|
29
30
|
self.type = [MODE_LLAMA_INDEX, "embeddings"]
|
30
31
|
|
31
|
-
def llama(
|
32
|
+
def llama(
|
33
|
+
self,
|
34
|
+
window,
|
35
|
+
model: ModelItem,
|
36
|
+
stream: bool = False
|
37
|
+
) -> LlamaBaseLLM:
|
32
38
|
"""
|
33
39
|
Return LLM provider instance for llama
|
34
40
|
|
@@ -40,7 +46,11 @@ class HuggingFaceApiLLM(BaseLLM):
|
|
40
46
|
args = self.parse_args(model.llama_index)
|
41
47
|
return HuggingFaceInferenceAPI(**args)
|
42
48
|
|
43
|
-
def get_embeddings_model(
|
49
|
+
def get_embeddings_model(
|
50
|
+
self,
|
51
|
+
window,
|
52
|
+
config: Optional[List[Dict]] = None
|
53
|
+
) -> BaseEmbedding:
|
44
54
|
"""
|
45
55
|
Return provider instance for embeddings
|
46
56
|
|
@@ -55,7 +65,11 @@ class HuggingFaceApiLLM(BaseLLM):
|
|
55
65
|
})
|
56
66
|
return HuggingFaceAPIEmbedding(**args)
|
57
67
|
|
58
|
-
def init_embeddings(
|
68
|
+
def init_embeddings(
|
69
|
+
self,
|
70
|
+
window,
|
71
|
+
env: Optional[List[Dict]] = None
|
72
|
+
):
|
59
73
|
"""
|
60
74
|
Initialize embeddings provider
|
61
75
|
|
pygpt_net/provider/llms/local.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from llama_index.llms.openai_like import OpenAILike
|
@@ -25,7 +25,12 @@ class LocalLLM(BaseLLM):
|
|
25
25
|
self.id = "local_ai"
|
26
26
|
self.type = [MODE_LLAMA_INDEX]
|
27
27
|
|
28
|
-
def llama(
|
28
|
+
def llama(
|
29
|
+
self,
|
30
|
+
window,
|
31
|
+
model: ModelItem,
|
32
|
+
stream: bool = False
|
33
|
+
) -> LlamaBaseLLM:
|
29
34
|
"""
|
30
35
|
Return LLM provider instance for llama
|
31
36
|
|
@@ -6,10 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.
|
9
|
+
# Updated Date: 2024.12.14 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
13
|
+
from typing import Optional, List, Dict
|
13
14
|
|
14
15
|
from langchain_community.chat_models import ChatOllama
|
15
16
|
|
@@ -33,7 +34,12 @@ class OllamaLLM(BaseLLM):
|
|
33
34
|
self.id = "ollama"
|
34
35
|
self.type = [MODE_LANGCHAIN, MODE_LLAMA_INDEX, "embeddings"]
|
35
36
|
|
36
|
-
def completion(
|
37
|
+
def completion(
|
38
|
+
self,
|
39
|
+
window,
|
40
|
+
model: ModelItem,
|
41
|
+
stream: bool = False
|
42
|
+
):
|
37
43
|
"""
|
38
44
|
Return LLM provider instance for completion
|
39
45
|
|
@@ -44,7 +50,12 @@ class OllamaLLM(BaseLLM):
|
|
44
50
|
"""
|
45
51
|
return None
|
46
52
|
|
47
|
-
def chat(
|
53
|
+
def chat(
|
54
|
+
self,
|
55
|
+
window,
|
56
|
+
model: ModelItem,
|
57
|
+
stream: bool = False
|
58
|
+
):
|
48
59
|
"""
|
49
60
|
Return LLM provider instance for chat
|
50
61
|
|
@@ -56,7 +67,12 @@ class OllamaLLM(BaseLLM):
|
|
56
67
|
args = self.parse_args(model.langchain)
|
57
68
|
return ChatOllama(**args)
|
58
69
|
|
59
|
-
def llama(
|
70
|
+
def llama(
|
71
|
+
self,
|
72
|
+
window,
|
73
|
+
model: ModelItem,
|
74
|
+
stream: bool = False
|
75
|
+
) -> LlamaBaseLLM:
|
60
76
|
"""
|
61
77
|
Return LLM provider instance for llama
|
62
78
|
|
@@ -69,7 +85,11 @@ class OllamaLLM(BaseLLM):
|
|
69
85
|
args = self.parse_args(model.llama_index)
|
70
86
|
return Ollama(**args)
|
71
87
|
|
72
|
-
def get_embeddings_model(
|
88
|
+
def get_embeddings_model(
|
89
|
+
self,
|
90
|
+
window,
|
91
|
+
config: Optional[List[Dict]] = None
|
92
|
+
) -> BaseEmbedding:
|
73
93
|
"""
|
74
94
|
Return provider instance for embeddings
|
75
95
|
|
@@ -84,7 +104,11 @@ class OllamaLLM(BaseLLM):
|
|
84
104
|
})
|
85
105
|
return OllamaEmbedding(**args)
|
86
106
|
|
87
|
-
def init_embeddings(
|
107
|
+
def init_embeddings(
|
108
|
+
self,
|
109
|
+
window,
|
110
|
+
env: Optional[List[Dict]] = None
|
111
|
+
):
|
88
112
|
"""
|
89
113
|
Initialize embeddings provider
|
90
114
|
|