pygpt-net 2.6.24__py3-none-any.whl → 2.6.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.08 19:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -28,7 +28,7 @@ class HuggingFaceRouterLLM(BaseLLM):
28
28
  super(HuggingFaceRouterLLM, self).__init__(*args, **kwargs)
29
29
  self.id = "huggingface_router"
30
30
  self.name = "HuggingFace Router"
31
- self.type = [MODE_CHAT, MODE_LLAMA_INDEX]
31
+ self.type = [MODE_CHAT, MODE_LLAMA_INDEX, "embeddings"]
32
32
 
33
33
  def completion(
34
34
  self,
@@ -78,6 +78,10 @@ class HuggingFaceRouterLLM(BaseLLM):
78
78
  """
79
79
  from llama_index.llms.openai_like import OpenAILike
80
80
  args = self.parse_args(model.llama_index, window)
81
+ if "model" not in args:
82
+ args["model"] = model.id
83
+ if "api_key" not in args or args["api_key"] == "":
84
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
81
85
  return OpenAILike(**args)
82
86
 
83
87
  def llama_multimodal(
@@ -108,7 +112,17 @@ class HuggingFaceRouterLLM(BaseLLM):
108
112
  :param config: config keyword arguments list
109
113
  :return: Embedding provider instance
110
114
  """
111
- pass
115
+ from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding
116
+ args = {}
117
+ if config is not None:
118
+ args = self.parse_args({
119
+ "args": config,
120
+ }, window)
121
+ if "api_key" not in args or args["api_key"] == "":
122
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
123
+ if "model" in args and "model_name" not in args:
124
+ args["model_name"] = args.pop("model")
125
+ return HuggingFaceInferenceAPIEmbedding(**args)
112
126
 
113
127
  def get_models(
114
128
  self,
File without changes
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
+ # ================================================== #
11
+
12
+ import asyncio
13
+ from typing import Any, List, Optional
14
+
15
+ import xai_sdk
16
+ from llama_index.core.embeddings import BaseEmbedding
17
+
18
+
19
+ class XAIEmbedding(BaseEmbedding):
20
+ """
21
+ LlamaIndex xAI Embedding SDK wrapper.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ model_name: str,
27
+ api_key: Optional[str] = None,
28
+ api_host: str = "api.x.ai",
29
+ **kwargs: Any,
30
+ ) -> None:
31
+ super().__init__(model_name=model_name, **kwargs)
32
+ self._api_key = api_key
33
+ self._api_host = api_host
34
+ self._client = xai_sdk.Client(api_key=api_key, api_host=api_host)
35
+
36
+ def _run_async(self, coro):
37
+ try:
38
+ loop = asyncio.get_running_loop()
39
+ except RuntimeError:
40
+ loop = None
41
+
42
+ if loop and loop.is_running():
43
+ return asyncio.run_coroutine_threadsafe(coro, loop).result()
44
+ else:
45
+ return asyncio.run(coro)
46
+
47
+ async def _aembed_many(self, texts: List[str]) -> List[List[float]]:
48
+ embeddings: List[List[float]] = []
49
+ async for (values, _shape) in self._client.embedder.embed(
50
+ texts=texts, model_name=self.model_name
51
+ ):
52
+ embeddings.append(list(values))
53
+ return embeddings
54
+
55
+ def _get_query_embedding(self, query: str) -> List[float]:
56
+ return self._run_async(self._aembed_many([query]))[0]
57
+
58
+ async def _aget_query_embedding(self, query: str) -> List[float]:
59
+ return (await self._aembed_many([query]))[0]
60
+
61
+ def _get_text_embedding(self, text: str) -> List[float]:
62
+ return self._run_async(self._aembed_many([text]))[0]
63
+
64
+ async def _aget_text_embedding(self, text: str) -> List[float]:
65
+ return (await self._aembed_many([text]))[0]
66
+
67
+ def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
68
+ return self._run_async(self._aembed_many(texts))
69
+
70
+ async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
71
+ return await self._aembed_many(texts)
@@ -8,7 +8,9 @@
8
8
  # Created By : Marcin Szczygliński #
9
9
  # Updated Date: 2025.08.06 01:00:00 #
10
10
  # ================================================== #
11
+ from typing import Optional, Dict, List
11
12
 
13
+ from llama_index.core.base.embeddings.base import BaseEmbedding
12
14
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
13
15
 
14
16
  from pygpt_net.core.types import (
@@ -23,7 +25,29 @@ class LocalLLM(BaseLLM):
23
25
  super(LocalLLM, self).__init__(*args, **kwargs)
24
26
  self.id = "local_ai"
25
27
  self.name = "Local model (OpenAI API compatible)"
26
- self.type = [MODE_LLAMA_INDEX]
28
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
29
+
30
+ def get_embeddings_model(
31
+ self,
32
+ window,
33
+ config: Optional[List[Dict]] = None
34
+ ) -> BaseEmbedding:
35
+ """
36
+ Return provider instance for embeddings
37
+
38
+ :param window: window instance
39
+ :param config: config keyword arguments list
40
+ :return: Embedding provider instance
41
+ """
42
+ from llama_index.embeddings.openai import OpenAIEmbedding
43
+ args = {}
44
+ if config is not None:
45
+ args = self.parse_args({
46
+ "args": config,
47
+ }, window)
48
+ if "model" in args and "model_name" not in args:
49
+ args["model_name"] = args.pop("model")
50
+ return OpenAIEmbedding(**args)
27
51
 
28
52
  def llama(
29
53
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -47,6 +47,8 @@ class MistralAILLM(BaseLLM):
47
47
  args = self.parse_args(model.llama_index, window)
48
48
  if "model" not in args:
49
49
  args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_mistral", "")
50
52
  return MistralAI(**args)
51
53
 
52
54
  def get_embeddings_model(
@@ -67,6 +69,10 @@ class MistralAILLM(BaseLLM):
67
69
  args = self.parse_args({
68
70
  "args": config,
69
71
  }, window)
72
+ if "api_key" not in args or args["api_key"] == "":
73
+ args["api_key"] = window.core.config.get("api_key_mistral", "")
74
+ if "model" in args and "model_name" not in args:
75
+ args["model_name"] = args.pop("model")
70
76
  return MistralAIEmbedding(**args)
71
77
 
72
78
  def init_embeddings(
@@ -87,3 +93,25 @@ class MistralAILLM(BaseLLM):
87
93
  if ('OPENAI_API_KEY' not in os.environ
88
94
  and (window.core.config.get('api_key') is None or window.core.config.get('api_key') == "")):
89
95
  os.environ['OPENAI_API_KEY'] = "_"
96
+
97
+ def get_models(
98
+ self,
99
+ window,
100
+ ) -> List[Dict]:
101
+ """
102
+ Return list of models for the provider
103
+
104
+ :param window: window instance
105
+ :return: list of models
106
+ """
107
+ items = []
108
+ client = self.get_client(window)
109
+ models_list = client.models.list()
110
+ if models_list.data:
111
+ for item in models_list.data:
112
+ id = item.id
113
+ items.append({
114
+ "id": id,
115
+ "name": id,
116
+ })
117
+ return items
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -118,6 +118,8 @@ class OllamaLLM(BaseLLM):
118
118
  if 'OLLAMA_API_BASE' in os.environ:
119
119
  if "base_url" not in args:
120
120
  args["base_url"] = os.environ['OLLAMA_API_BASE']
121
+ if "model" in args and "model_name" not in args:
122
+ args["model_name"] = args.pop("model")
121
123
  return OllamaEmbedding(**args)
122
124
 
123
125
  def init_embeddings(
@@ -6,10 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
- import json
13
12
  from typing import Optional, List, Dict
14
13
 
15
14
  # from langchain_openai import OpenAI
@@ -93,6 +92,8 @@ class OpenAILLM(BaseLLM):
93
92
  from .llama_index.openai import OpenAI as LlamaOpenAI
94
93
  from .llama_index.openai import OpenAIResponses as LlamaOpenAIResponses
95
94
  args = self.parse_args(model.llama_index, window)
95
+ if "api_key" not in args:
96
+ args["api_key"] = window.core.config.get("api_key", "")
96
97
  if "model" not in args:
97
98
  args["model"] = model.id
98
99
 
@@ -148,6 +149,10 @@ class OpenAILLM(BaseLLM):
148
149
  args = self.parse_args({
149
150
  "args": config,
150
151
  }, window)
152
+ if "api_key" not in args:
153
+ args["api_key"] = window.core.config.get("api_key", "")
154
+ if "model" in args and "model_name" not in args:
155
+ args["model_name"] = args.pop("model")
151
156
  return OpenAIEmbedding(**args)
152
157
 
153
158
  def get_models(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -28,7 +28,7 @@ class xAILLM(BaseLLM):
28
28
  super(xAILLM, self).__init__(*args, **kwargs)
29
29
  self.id = "x_ai"
30
30
  self.name = "xAI"
31
- self.type = [MODE_CHAT, MODE_LLAMA_INDEX]
31
+ self.type = [MODE_CHAT, MODE_LLAMA_INDEX, "embeddings"]
32
32
 
33
33
  def completion(
34
34
  self,
@@ -78,6 +78,12 @@ class xAILLM(BaseLLM):
78
78
  """
79
79
  from llama_index.llms.openai_like import OpenAILike
80
80
  args = self.parse_args(model.llama_index, window)
81
+ if "model" not in args:
82
+ args["model"] = model.id
83
+ if "api_key" not in args or args["api_key"] == "":
84
+ args["api_key"] = window.core.config.get("api_key_xai", "")
85
+ if "api_base" not in args or args["api_base"] == "":
86
+ args["api_base"] = window.core.config.get("api_endpoint_xai", "https://api.x.ai/v1")
81
87
  return OpenAILike(**args)
82
88
 
83
89
  def llama_multimodal(
@@ -108,7 +114,17 @@ class xAILLM(BaseLLM):
108
114
  :param config: config keyword arguments list
109
115
  :return: Embedding provider instance
110
116
  """
111
- pass
117
+ from .llama_index.x_ai.embedding import XAIEmbedding
118
+ args = {}
119
+ if config is not None:
120
+ args = self.parse_args({
121
+ "args": config,
122
+ }, window)
123
+ if "api_key" not in args or args["api_key"] == "":
124
+ args["api_key"] = window.core.config.get("api_key_xai", "")
125
+ if "model" in args and "model_name" not in args:
126
+ args["model_name"] = args.pop("model")
127
+ return XAIEmbedding(**args)
112
128
 
113
129
  def get_models(
114
130
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pygpt-net
3
- Version: 2.6.24
3
+ Version: 2.6.25
4
4
  Summary: Desktop AI Assistant powered by: OpenAI GPT-5, GPT-4, o1, o3, Gemini, Claude, Grok, DeepSeek, and other models supported by Llama Index, and Ollama. Chatbot, agents, completion, image generation, vision analysis, speech-to-text, plugins, internet access, file handling, command execution and more.
5
5
  License: MIT
6
6
  Keywords: ai,api,api key,app,assistant,bielik,chat,chatbot,chatgpt,claude,dall-e,deepseek,desktop,gemini,gpt,gpt-3.5,gpt-4,gpt-4-vision,gpt-4o,gpt-5,gpt-oss,gpt3.5,gpt4,grok,langchain,llama-index,llama3,mistral,o1,o3,ollama,openai,presets,py-gpt,py_gpt,pygpt,pyside,qt,text completion,tts,ui,vision,whisper
@@ -21,39 +21,42 @@ Requires-Dist: PyAudio (>=0.2.14,<0.3.0)
21
21
  Requires-Dist: PyAutoGUI (>=0.9.54,<0.10.0)
22
22
  Requires-Dist: PySide6 (==6.9.1)
23
23
  Requires-Dist: Pygments (>=2.19.2,<3.0.0)
24
- Requires-Dist: SQLAlchemy (>=2.0.41,<3.0.0)
24
+ Requires-Dist: SQLAlchemy (>=2.0.43,<3.0.0)
25
25
  Requires-Dist: SpeechRecognition (>=3.14.3,<4.0.0)
26
26
  Requires-Dist: Telethon (>=1.40.0,<2.0.0)
27
27
  Requires-Dist: anthropic (>=0.54.0,<0.55.0)
28
- Requires-Dist: azure-core (>=1.34.0,<2.0.0)
29
- Requires-Dist: beautifulsoup4 (>=4.13.4,<5.0.0)
30
- Requires-Dist: boto3 (>=1.38.41,<2.0.0)
28
+ Requires-Dist: azure-core (>=1.35.0,<2.0.0)
29
+ Requires-Dist: beautifulsoup4 (>=4.13.5,<5.0.0)
30
+ Requires-Dist: boto3 (>=1.40.17,<2.0.0)
31
31
  Requires-Dist: chromadb (>=0.5.20,<0.6.0)
32
32
  Requires-Dist: croniter (>=2.0.7,<3.0.0)
33
33
  Requires-Dist: docker (>=7.1.0,<8.0.0)
34
34
  Requires-Dist: docx2txt (>=0.8,<0.9)
35
35
  Requires-Dist: gkeepapi (>=0.15.1,<0.16.0)
36
- Requires-Dist: google-api-python-client (>=2.173.0,<3.0.0)
36
+ Requires-Dist: google-api-python-client (>=2.179.0,<3.0.0)
37
37
  Requires-Dist: google-generativeai (>=0.8.5,<0.9.0)
38
+ Requires-Dist: grpcio (>=1.74.0,<2.0.0)
38
39
  Requires-Dist: httpx (>=0.28.1,<0.29.0)
39
40
  Requires-Dist: httpx-socks (>=0.10.1,<0.11.0)
40
- Requires-Dist: ipykernel (>=6.29.5,<7.0.0)
41
+ Requires-Dist: huggingface-hub (>=0.33.5,<0.34.0)
42
+ Requires-Dist: ipykernel (>=6.30.1,<7.0.0)
41
43
  Requires-Dist: jupyter_client (>=8.6.3,<9.0.0)
42
44
  Requires-Dist: llama-index (>=0.12.44,<0.13.0)
43
- Requires-Dist: llama-index-agent-openai (>=0.4.8,<0.5.0)
45
+ Requires-Dist: llama-index-agent-openai (>=0.4.12,<0.5.0)
44
46
  Requires-Dist: llama-index-core (==0.12.44)
45
- Requires-Dist: llama-index-embeddings-azure-openai (>=0.3.8,<0.4.0)
47
+ Requires-Dist: llama-index-embeddings-azure-openai (>=0.3.9,<0.4.0)
46
48
  Requires-Dist: llama-index-embeddings-gemini (>=0.3.2,<0.4.0)
47
49
  Requires-Dist: llama-index-embeddings-google-genai (>=0.2.1,<0.3.0)
48
50
  Requires-Dist: llama-index-embeddings-huggingface-api (>=0.3.1,<0.4.0)
49
51
  Requires-Dist: llama-index-embeddings-mistralai (>=0.3.0,<0.4.0)
50
52
  Requires-Dist: llama-index-embeddings-ollama (>=0.5.0,<0.6.0)
51
53
  Requires-Dist: llama-index-embeddings-openai (>=0.3.1,<0.4.0)
52
- Requires-Dist: llama-index-llms-anthropic (>=0.6.12,<0.7.0)
53
- Requires-Dist: llama-index-llms-azure-openai (>=0.3.2,<0.4.0)
54
- Requires-Dist: llama-index-llms-deepseek (>=0.1.1,<0.2.0)
54
+ Requires-Dist: llama-index-embeddings-voyageai (>=0.3.6,<0.4.0)
55
+ Requires-Dist: llama-index-llms-anthropic (>=0.6.19,<0.7.0)
56
+ Requires-Dist: llama-index-llms-azure-openai (>=0.3.4,<0.4.0)
57
+ Requires-Dist: llama-index-llms-deepseek (>=0.1.2,<0.2.0)
55
58
  Requires-Dist: llama-index-llms-gemini (>=0.5.0,<0.6.0)
56
- Requires-Dist: llama-index-llms-google-genai (>=0.2.2,<0.3.0)
59
+ Requires-Dist: llama-index-llms-google-genai (>=0.2.6,<0.3.0)
57
60
  Requires-Dist: llama-index-llms-huggingface-api (>=0.3.1,<0.4.0)
58
61
  Requires-Dist: llama-index-llms-mistralai (>=0.6.1,<0.7.0)
59
62
  Requires-Dist: llama-index-llms-ollama (>=0.6.2,<0.7.0)
@@ -63,7 +66,7 @@ Requires-Dist: llama-index-llms-perplexity (>=0.3.7,<0.4.0)
63
66
  Requires-Dist: llama-index-multi-modal-llms-openai (>=0.5.1,<0.6.0)
64
67
  Requires-Dist: llama-index-readers-chatgpt-plugin (>=0.3.0,<0.4.0)
65
68
  Requires-Dist: llama-index-readers-database (>=0.3.0,<0.4.0)
66
- Requires-Dist: llama-index-readers-file (>=0.4.9,<0.5.0)
69
+ Requires-Dist: llama-index-readers-file (>=0.4.11,<0.5.0)
67
70
  Requires-Dist: llama-index-readers-github (>=0.5.0,<0.6.0)
68
71
  Requires-Dist: llama-index-readers-google (>=0.5.0,<0.6.0)
69
72
  Requires-Dist: llama-index-readers-microsoft-onedrive (>=0.3.0,<0.4.0)
@@ -80,26 +83,27 @@ Requires-Dist: onnxruntime (==1.20.1)
80
83
  Requires-Dist: openai (==1.96.1)
81
84
  Requires-Dist: openai-agents (>=0.2.3,<0.3.0)
82
85
  Requires-Dist: opencv-python (>=4.11.0.86,<5.0.0.0)
83
- Requires-Dist: packaging (>=24.2,<25.0)
86
+ Requires-Dist: packaging (>=25.0,<26.0)
84
87
  Requires-Dist: pandas (>=2.2.3,<3.0.0)
85
88
  Requires-Dist: paramiko (>=4.0.0,<5.0.0)
86
89
  Requires-Dist: pillow (>=10.4.0,<11.0.0)
87
- Requires-Dist: pinecone-client (>=3.2.2,<4.0.0)
90
+ Requires-Dist: pinecone (>=7.0.1,<8.0.0)
88
91
  Requires-Dist: psutil (>=7.0.0,<8.0.0)
89
92
  Requires-Dist: pydub (>=0.25.1,<0.26.0)
90
93
  Requires-Dist: pygame (>=2.6.1,<3.0.0)
91
94
  Requires-Dist: pynput (>=1.8.1,<2.0.0)
92
- Requires-Dist: pypdf (>=5.6.0,<6.0.0)
95
+ Requires-Dist: pypdf (>=5.9.0,<6.0.0)
93
96
  Requires-Dist: pyserial (>=3.5,<4.0)
94
97
  Requires-Dist: python-markdown-math (>=0.8,<0.9)
95
98
  Requires-Dist: qasync (>=0.27.1,<0.28.0)
96
99
  Requires-Dist: qt-material (>=2.17,<3.0)
97
- Requires-Dist: redis (>=5.3.0,<6.0.0)
100
+ Requires-Dist: redis (>=5.3.1,<6.0.0)
98
101
  Requires-Dist: show-in-file-manager (>=1.1.5,<2.0.0)
99
102
  Requires-Dist: tiktoken (>=0.9.0,<0.10.0)
100
103
  Requires-Dist: transformers (==4.48.3)
101
104
  Requires-Dist: urllib3 (>=1.26.20,<2.0.0)
102
105
  Requires-Dist: wikipedia (>=1.4.0,<2.0.0)
106
+ Requires-Dist: xai-sdk (>=1.1.0,<2.0.0)
103
107
  Requires-Dist: youtube-transcript-api (>=0.6.3,<0.7.0)
104
108
  Project-URL: Changelog, https://github.com/szczyglis-dev/py-gpt/blob/master/CHANGELOG.md
105
109
  Project-URL: Documentation, https://pygpt.readthedocs.io/
@@ -112,7 +116,7 @@ Description-Content-Type: text/markdown
112
116
 
113
117
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
114
118
 
115
- Release: **2.6.24** | build: **2025-08-26** | Python: **>=3.10, <3.14**
119
+ Release: **2.6.25** | build: **2025-08-26** | Python: **>=3.10, <3.14**
116
120
 
117
121
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
118
122
  >
@@ -1245,6 +1249,8 @@ PyGPT has built-in support for models (as of 2025-07-26):
1245
1249
  All models are specified in the configuration file `models.json`, which you can customize.
1246
1250
  This file is located in your working directory. You can add new models provided directly by `OpenAI API` (or compatible) and those supported by `LlamaIndex` or `Ollama` to this file. Configuration for LlamaIndex is placed in `llama_index` key.
1247
1251
 
1252
+ **Tip**: Anthropic and Deepseek API providers use VoyageAI for embeddings, so you must also configure the Voyage API key if you want to use embeddings from these providers.
1253
+
1248
1254
  ## Adding a custom model
1249
1255
 
1250
1256
  You can add your own models. See the section `Extending PyGPT / Adding a new model` for more info.
@@ -1340,8 +1346,7 @@ Define parameters like model name and Ollama base URL in the Embeddings provider
1340
1346
 
1341
1347
  ### Google Gemini, Anthropic Claude, xAI Grok, etc.
1342
1348
 
1343
- To use `Gemini`, `Grok`, or `Claude` models, select the `Chat` or `Chat with Files` mode in PyGPT and select a predefined model. `Chat` mode works via OpenAI SDK, `Chat with Files` mode works via LlamaIndex.
1344
- If you want to use `Chat with Files` mode (LlamaIndex), then remember to configure the required parameters like API keys in the model ENV config fields:
1349
+ If you want to use non-OpenAI models in `Chat with Files` and `Agents (LlamaIndex)` modes, then remember to configure the required parameters like API keys in the model config fields. `Chat` mode works via OpenAI SDK (compatible API), `Chat with Files` and `Agents (LlamaIndex)` modes works via LlamaIndex.
1345
1350
 
1346
1351
  **Google Gemini**
1347
1352
 
@@ -2484,11 +2489,13 @@ Enable/disable remote tools, like Web Search or Image generation to use in OpenA
2484
2489
 
2485
2490
  **Embeddings**
2486
2491
 
2487
- - `Embeddings provider`: Embeddings provider.
2492
+ - `Embeddings provider`: Global embeddings provider (for indexing and Chat with Files).
2493
+
2494
+ - `Embeddings provider (ENV)`: ENV vars for global embeddings provider (API keys, etc.).
2488
2495
 
2489
- - `Embeddings provider (ENV)`: ENV vars to embeddings provider (API keys, etc.).
2496
+ - `Embeddings provider (**kwargs)`: Keyword arguments for global embeddings provider (model_name, etc.).
2490
2497
 
2491
- - `Embeddings provider (**kwargs)`: Keyword arguments for embeddings provider (model name, etc.).
2498
+ - `Default embedding providers for attachments`: Define embedding model by provider to use in attachments.
2492
2499
 
2493
2500
  - `RPM limit for embeddings API calls`: Specify the limit of maximum requests per minute (RPM), 0 = no limit.
2494
2501
 
@@ -2538,13 +2545,15 @@ Enable/disable remote tools, like Web Search or Image generation to use in OpenA
2538
2545
 
2539
2546
  **Agents (LlamaIndex / OpenAI)**
2540
2547
 
2541
- - `Max steps (per iteration)` - Max steps is one iteration before goal achieved
2548
+ - `Max steps (per iteration)`: Max steps is one iteration before goal achieved
2542
2549
 
2543
- - `Max evaluation steps in loop` - Maximum evaluation steps to achieve the final result, set 0 to infinity
2550
+ - `Max evaluation steps in loop`: Maximum evaluation steps to achieve the final result, set 0 to infinity
2544
2551
 
2545
- - `Append and compare previous evaluation prompt in next evaluation` - If enabled, previous improvement prompt will be checked in next eval in loop, default: False
2552
+ - `Model for evaluation`: Model used for evaluation with score/percentage (loop). If not selected, then current active model will be used.
2546
2553
 
2547
- - `Split response messages` - Split response messages to separated context items in OpenAI Agents mode.
2554
+ - `Append and compare previous evaluation prompt in next evaluation`: If enabled, previous improvement prompt will be checked in next eval in loop, default: False
2555
+
2556
+ - `Split response messages`: Split response messages to separated context items in OpenAI Agents mode.
2548
2557
 
2549
2558
  **Autonomous (Legacy agents)**
2550
2559
 
@@ -3026,6 +3035,8 @@ PyGPT can be extended with:
3026
3035
 
3027
3036
  - custom web search engine providers
3028
3037
 
3038
+ - custom agents
3039
+
3029
3040
  **Examples (tutorial files)**
3030
3041
 
3031
3042
  See the `examples` directory in this repository with examples of custom launcher, plugin, vector store, LLM (LlamaIndex) provider and data loader:
@@ -3529,6 +3540,12 @@ may consume additional tokens that are not displayed in the main window.
3529
3540
 
3530
3541
  ## Recent changes:
3531
3542
 
3543
+ **2.6.25 (2025-08-26)**
3544
+
3545
+ - Fixed the empty agent ID issue in OpenAI Agents evaluation.
3546
+ - Added the ability to select a custom model for evaluation.
3547
+ - Added embedding providers: Anthropic, Deepseek, MistralAI, xAI, VoyageAI.
3548
+
3532
3549
  **2.6.24 (2025-08-26)**
3533
3550
 
3534
3551
  - Added a new option: LlamaIndex -> Embeddings -> Default embedding providers for attachments.
@@ -1,6 +1,6 @@
1
- pygpt_net/CHANGELOG.txt,sha256=CelRTQEoRdFZzFkEvyV1hcafcEd6kQIiy9YUWYwLQKc,101925
1
+ pygpt_net/CHANGELOG.txt,sha256=H2S4Ef-StXOJORUfHzAlXuDM2RfGCcqfjX1phBCXdvM,102146
2
2
  pygpt_net/LICENSE,sha256=dz9sfFgYahvu2NZbx4C1xCsVn9GVer2wXcMkFRBvqzY,1146
3
- pygpt_net/__init__.py,sha256=fuXSfpRaKO8SHnOaAHsKG-CzdLXT7lUXV9YYIznwW18,1373
3
+ pygpt_net/__init__.py,sha256=nUePp_QEAhrDVl76LALHaSxBux29fjPz6LxnAoBSKIg,1373
4
4
  pygpt_net/app.py,sha256=8Yz8r16FpNntiVn2pN-_qnoYHd-jVdugKP0P_MOiYSA,21133
5
5
  pygpt_net/config.py,sha256=LCKrqQfePVNrAvH3EY_1oZx1Go754sDoyUneJ0iGWFI,16660
6
6
  pygpt_net/container.py,sha256=NsMSHURaEC_eW8vrCNdztwqkxB7jui3yVlzUOMYvCHg,4124
@@ -59,7 +59,7 @@ pygpt_net/controller/config/field/dictionary.py,sha256=m3nSL8xAp0NRnr_rVmTZA5uTQ
59
59
  pygpt_net/controller/config/field/input.py,sha256=Dx04ivrwM1KqA41uHYNGzwq1c7O-zPnU_NI-3I45hPY,3992
60
60
  pygpt_net/controller/config/field/slider.py,sha256=dYbICd3ID-aLlc2a-bvFgWS4jceVz2UliTQKYy7Pl1Q,4560
61
61
  pygpt_net/controller/config/field/textarea.py,sha256=Ln545IHzXBeFIjnfMIpmlUr-V3wNYjw4qGiz4NYRw34,2796
62
- pygpt_net/controller/config/placeholder.py,sha256=Q4csMg6q1xpV5Yz8jb2eh33lqdvqE5AAUg7L9ProRRY,16030
62
+ pygpt_net/controller/config/placeholder.py,sha256=tbjSgX8OzT5eLsJgfxfhLxiN2PMqTqdy8_EB0ULoyb0,16138
63
63
  pygpt_net/controller/ctx/__init__.py,sha256=0wH7ziC75WscBW8cxpeGBwEz5tolo_kCxGPoz2udI_E,507
64
64
  pygpt_net/controller/ctx/common.py,sha256=1jjRfEK1S4IqnzEGg1CIF-QqSN_83NLpaVtfB610NcM,6592
65
65
  pygpt_net/controller/ctx/ctx.py,sha256=xJqgfHGXa_DmvIdGMnoiJZWENYaeGQYgtcZuUUv1CXk,39209
@@ -99,7 +99,7 @@ pygpt_net/controller/mode/__init__.py,sha256=1Kcz0xHc2IW_if9S9eQozBUvIu69eLAe7T-
99
99
  pygpt_net/controller/mode/mode.py,sha256=F3rERGN_sAgAqDITFYd1Nj56_4MiBIS9TwjjSPH1uEc,7437
100
100
  pygpt_net/controller/model/__init__.py,sha256=mQXq9u269D8TD3u_44J6DFFyHKkaZplk-tRFCssBGbE,509
101
101
  pygpt_net/controller/model/editor.py,sha256=_WDVFTrgZKM5Y8MZiWur4e5oSuRbXr-Q3PDozVtZ9fw,16384
102
- pygpt_net/controller/model/importer.py,sha256=gb-ETuoqaAZpr-KX7vIuBn1vB1-cBZgm7DwYJebDcFQ,22148
102
+ pygpt_net/controller/model/importer.py,sha256=BO7nIN83qGq-sxROwY5C9v2OhcVwKcHx3-IJTz8nZX4,22996
103
103
  pygpt_net/controller/model/model.py,sha256=E0VfgIwNn75pjnB_v3RnqHr6jV1Eeua8VgpreQlA8vI,9132
104
104
  pygpt_net/controller/notepad/__init__.py,sha256=ZbMh4D6nsGuI4AwYMdegfij5ubmUznEE_UcqSSDjSPk,511
105
105
  pygpt_net/controller/notepad/notepad.py,sha256=mQgXalIMKkYTVKGkUD1mEHkHIzhTlt3QSiSb5eIhZgo,10767
@@ -156,7 +156,7 @@ pygpt_net/core/agents/runners/llama_assistant.py,sha256=a_Abkc8u1S8vr6lUIDRrzTM9
156
156
  pygpt_net/core/agents/runners/llama_plan.py,sha256=CC3WPG9KUxd_dRjPZROOrmPQrWQ_u8C0nRx0TCzi9bE,13391
157
157
  pygpt_net/core/agents/runners/llama_steps.py,sha256=1SBLp5t4TUsxpYIUtSSnBy5Sd2AxheDlv2AXimls-Vg,7328
158
158
  pygpt_net/core/agents/runners/llama_workflow.py,sha256=rWCUge9IBxvOITceqxd6rrgzg-3RDyo28oJBorsjwyc,12717
159
- pygpt_net/core/agents/runners/loop.py,sha256=PLM6dAYqYuHQto5YygDZCeCo4ihDfS5C2KUP11I4lk8,6142
159
+ pygpt_net/core/agents/runners/loop.py,sha256=7Vu50yidu2HSOifOX6bhbyTEHBS5An4GmGUBMYdHOco,7273
160
160
  pygpt_net/core/agents/runners/openai_workflow.py,sha256=d5aWFckj34Zca5p35dYIzEECz17Z757J6ZzY94damG8,8766
161
161
  pygpt_net/core/agents/tools.py,sha256=UW5-3q-cPpmx_FlDyuF2qymbgIJRmkklNmng3IokEUM,22116
162
162
  pygpt_net/core/assistants/__init__.py,sha256=FujLn0ia5S3-7nX-Td_0S5Zqiw6Yublh58c4Di7rRgY,514
@@ -347,8 +347,8 @@ pygpt_net/css_rc.py,sha256=i13kX7irhbYCWZ5yJbcMmnkFp_UfS4PYnvRFSPF7XXo,11349
347
347
  pygpt_net/data/audio/click_off.mp3,sha256=aNiRDP1pt-Jy7ija4YKCNFBwvGWbzU460F4pZWZDS90,65201
348
348
  pygpt_net/data/audio/click_on.mp3,sha256=qfdsSnthAEHVXzeyN4LlC0OvXuyW8p7stb7VXtlvZ1k,65201
349
349
  pygpt_net/data/audio/ok.mp3,sha256=LTiV32pEBkpUGBkKkcOdOFB7Eyt_QoP2Nv6c5AaXftk,32256
350
- pygpt_net/data/config/config.json,sha256=A1UKmT4CGf0b2G_-bwvIXVbiF4IhYVNIKQvSFjAsqYw,25284
351
- pygpt_net/data/config/models.json,sha256=LHxbLINmi1WPN8c3m_VTOR5UYmHihrHlur-aYJ73Yis,110162
350
+ pygpt_net/data/config/config.json,sha256=YpcIHA_-OsuXiTmf_PoqW_UBadC8M7H7VAUgwxopj2w,25678
351
+ pygpt_net/data/config/models.json,sha256=6qBHttexFm4ehoQGa5oXrIGjafHn44XmS7R7VXncTXw,110162
352
352
  pygpt_net/data/config/modes.json,sha256=M882iiqX_R2sNQl9cqZ3k-uneEvO9wpARtHRMLx_LHw,2265
353
353
  pygpt_net/data/config/presets/agent_code_act.json,sha256=GYHqhxtKFLUCvRI3IJAJ7Qe1k8yD9wGGNwManldWzlI,754
354
354
  pygpt_net/data/config/presets/agent_openai.json,sha256=bpDJgLRey_effQkzFRoOEGd4aHUrmzeODSDdNzrf62I,730
@@ -383,7 +383,7 @@ pygpt_net/data/config/presets/current.vision.json,sha256=x1ll5B3ROSKYQA6l27PRGXU
383
383
  pygpt_net/data/config/presets/dalle_white_cat.json,sha256=esqUb43cqY8dAo7B5u99tRC0MBV5lmlrVLnJhTSkL8w,552
384
384
  pygpt_net/data/config/presets/joke_agent.json,sha256=R6n9P7KRb0s-vZWZE7kHdlOfXAx1yYrPmUw8uLyw8OE,474
385
385
  pygpt_net/data/config/presets/joke_expert.json,sha256=jjcoIYEOaEp8kLoIbecxQROiq4J3Zess5w8_HmngPOY,671
386
- pygpt_net/data/config/settings.json,sha256=A3HI0xqGzzcpd1q4WBL2yOQmd6VAClqNJBFUOi6QVbc,67059
386
+ pygpt_net/data/config/settings.json,sha256=PzWl89Gi3mbmEtZtsG3koVlQIPw_4gwun9HTCGbsmhc,68019
387
387
  pygpt_net/data/config/settings_section.json,sha256=OLWgjs3hHFzk50iwzVyUpcFW7dfochOnbZS0vDoMlDU,1158
388
388
  pygpt_net/data/css/fix_windows.css,sha256=Mks14Vg25ncbMqZJfAMStrhvZmgHF6kU75ohTWRZeI8,664
389
389
  pygpt_net/data/css/fix_windows.dark.css,sha256=7hGbT_qI5tphYC_WlFpJRDAcmjBb0AQ2Yc-y-_Zzf2M,161
@@ -1608,7 +1608,7 @@ pygpt_net/data/js/katex/katex.min.css,sha256=lVaKnUaQNG4pI71WHffQZVALLQF4LMZEk4n
1608
1608
  pygpt_net/data/js/katex/katex.min.js,sha256=KLASOtKS2x8pUxWVzCDmlWJ4jhuLb0vtrgakbD6gDDo,276757
1609
1609
  pygpt_net/data/languages.csv,sha256=fvtER6vnTXFHQslCh-e0xCfZDQ-ijgW4GYpOJG4U7LY,8289
1610
1610
  pygpt_net/data/locale/locale.de.ini,sha256=-9uJDmILG7HXvU-L4HbYGmKEwHLqbH0CKVfuZnkguz4,102617
1611
- pygpt_net/data/locale/locale.en.ini,sha256=fYULHBTXfq81D41JDSYl8igDpgfSnxUFprpKisCKbKI,93270
1611
+ pygpt_net/data/locale/locale.en.ini,sha256=pXzxi_iuUSZmK8UUsrXPMQIaHEmluwGglWCA2FC6t3c,93624
1612
1612
  pygpt_net/data/locale/locale.es.ini,sha256=OuXZrLbmPDDYWXgzREf74F4t7KYvGHRJZtb_EbreyCQ,103268
1613
1613
  pygpt_net/data/locale/locale.fr.ini,sha256=6Qu9cL_MM6mQ6dWXt3_-zKh4W_mmAVA4woDylOZXavk,106069
1614
1614
  pygpt_net/data/locale/locale.it.ini,sha256=6FhyvqOs_jtlawtTBoDp5D2q9yOU8j3W0MnvruvkXvo,101076
@@ -2038,7 +2038,7 @@ pygpt_net/provider/core/calendar/db_sqlite/storage.py,sha256=QDclQCQdr4QyRIqjgGX
2038
2038
  pygpt_net/provider/core/config/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
2039
2039
  pygpt_net/provider/core/config/base.py,sha256=cbvzbMNqL2XgC-36gGubnU37t94AX7LEw0lecb2Nm80,1365
2040
2040
  pygpt_net/provider/core/config/json_file.py,sha256=GCcpCRQnBiSLWwlGbG9T3ZgiHkTfp5Jsg2KYkZcakBw,6789
2041
- pygpt_net/provider/core/config/patch.py,sha256=vcRb36T1VVKW6JP_CNb5srvFqj7wR5gvkavUBidv8OQ,121650
2041
+ pygpt_net/provider/core/config/patch.py,sha256=7eIeCvxm6Bn3tck6blq22LD5W8xQ5f9brQigX4EvqFA,123347
2042
2042
  pygpt_net/provider/core/ctx/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
2043
2043
  pygpt_net/provider/core/ctx/base.py,sha256=Tfb4MDNe9BXXPU3lbzpdYwJF9S1oa2-mzgu5XT4It9g,3003
2044
2044
  pygpt_net/provider/core/ctx/db_sqlite/__init__.py,sha256=0dP8VhI4bnFsQQKxAkaleKFlyaMycDD_cnE7gBCa57Y,512
@@ -2112,27 +2112,29 @@ pygpt_net/provider/gpt/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
2112
2112
  pygpt_net/provider/gpt/worker/assistants.py,sha256=AzoY9wAaK57STRw2-6LIuhJ1RRDbQLRcjIZJtxr3lmQ,21510
2113
2113
  pygpt_net/provider/gpt/worker/importer.py,sha256=4O4J_bEeiw7iaRv4HqE1AHfiywhc0td5SztA2uIfQiU,16308
2114
2114
  pygpt_net/provider/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2115
- pygpt_net/provider/llms/anthropic.py,sha256=UtX-5NieNtm9idy-bQuDnKWYgqED5yC-Vn8aLNIr9j8,2434
2116
- pygpt_net/provider/llms/azure_openai.py,sha256=aUE0ztKPWopoIDDzB20Ez1I11WdzU2eLWvq6WS28fYI,3522
2115
+ pygpt_net/provider/llms/anthropic.py,sha256=j31yrHFFMOP_robMOarMz0oOPXpyyO2h3DxhMSi4e54,3609
2116
+ pygpt_net/provider/llms/azure_openai.py,sha256=QxgK3KeqEEa38B-ezro6AJyd4D4bR9d8E3fW0a6Mc0Q,3812
2117
2117
  pygpt_net/provider/llms/base.py,sha256=YIkISmtDIABv4YMho-fvu5Jwn1zPwT0Gqvi3zFI0RYA,6347
2118
- pygpt_net/provider/llms/deepseek_api.py,sha256=9inBaiHUFcN4FmEpcwmQ6IzFr5izQizl29_tzBolnRA,2023
2119
- pygpt_net/provider/llms/google.py,sha256=2fegINTcG8r7IikgWNefXHHdibHDTukL3h6BY1UZOBU,3056
2118
+ pygpt_net/provider/llms/deepseek_api.py,sha256=Jljj6Ce123q3eCrIizfFPQIsf47OzRaBK4jIbNZdLzg,3267
2119
+ pygpt_net/provider/llms/google.py,sha256=3Td8JsvJ6fclSY_zn4YaEWilPcBCFqeIBzWcZ7Gg1nI,3432
2120
2120
  pygpt_net/provider/llms/hugging_face.py,sha256=qWyGVqosDw9WVsKbZc5IG7j4jjfVPeCKr6gPAn8Tyus,1800
2121
- pygpt_net/provider/llms/hugging_face_api.py,sha256=IOEbX2sErfeort7U8w1fB_IRj1iHrTHDt1_3tjwAjJQ,3032
2122
- pygpt_net/provider/llms/hugging_face_router.py,sha256=gVqlwvmRrXxP6mXIUIDA82Qq8NrZ-Sp1uU3Nh9bOWYE,3730
2121
+ pygpt_net/provider/llms/hugging_face_api.py,sha256=oY5dE-9rFv8Cd6Hv8ZriHvL46ZrT3KyvJFuLokNaNRY,3402
2122
+ pygpt_net/provider/llms/hugging_face_router.py,sha256=db_P5DktU0SNObKqumaFfzEcyR1Wn7f9FwoLVlN4u2U,4477
2123
2123
  pygpt_net/provider/llms/llama_index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2124
2124
  pygpt_net/provider/llms/llama_index/openai/__init__.py,sha256=nkQoENFdv-dmjf50Ic-iZuVhZisCfFwwDSpK4h6iLVM,183
2125
2125
  pygpt_net/provider/llms/llama_index/openai/base.py,sha256=Z6NAmZ3OBvGLDNmYdJfB3Vp95BdHSTQriPVsCTYPeoU,42259
2126
2126
  pygpt_net/provider/llms/llama_index/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2127
2127
  pygpt_net/provider/llms/llama_index/openai/responses.py,sha256=dOzrPV0u1CQRloU2cws7GDI4OJwrnivo9uoqKACLV8Y,36159
2128
2128
  pygpt_net/provider/llms/llama_index/openai/utils.py,sha256=IdvbjJ2y5zWDkntoPgBZ2pGbcrYIbGbg1smoju2XkUI,29243
2129
- pygpt_net/provider/llms/local.py,sha256=617jP82B1mM8QrBdWLoTLKXmKzAZzMUI_CmQXDABlYA,1467
2130
- pygpt_net/provider/llms/mistral.py,sha256=YAcAUU8EXMEFSqhGapCSgaqrvUunyeEO6lXI0JH7g0o,2921
2131
- pygpt_net/provider/llms/ollama.py,sha256=TnQtvpJg2teptXUu35BpE22XroEZRAfJ5tT0lAIOAmA,4326
2129
+ pygpt_net/provider/llms/llama_index/x_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2130
+ pygpt_net/provider/llms/llama_index/x_ai/embedding.py,sha256=QrGgpkD0F4Jm5cMJgN6oYai7UK_bJ0YoGr7Uvy5GtRU,2458
2131
+ pygpt_net/provider/llms/local.py,sha256=7vU0xWlMrFLzLMc6Os_xD-oAMxLEitvEHQcSdvCEY50,2290
2132
+ pygpt_net/provider/llms/mistral.py,sha256=e8pcWyNT8HjA3KLZL1vO7z4FlBxer-QCVpaGtKgQ4UY,3858
2133
+ pygpt_net/provider/llms/ollama.py,sha256=vVqA22eH-APgyfHCaHSvJlAgxLSvspvZSaOCeaKWQCw,4434
2132
2134
  pygpt_net/provider/llms/ollama_custom.py,sha256=WVbLiEEwnz5loKiLy7EYmpuWz0Tp5Vhd1vOUB2051kI,24167
2133
- pygpt_net/provider/llms/openai.py,sha256=7f7oAhb5f56A6xl4P7R0CXUedMTb9CjWjhGTRhAsBhw,5119
2135
+ pygpt_net/provider/llms/openai.py,sha256=oaPCEffOQI3TGe_l15Ta3Mt_MKshxsONRCSc59fINaE,5419
2134
2136
  pygpt_net/provider/llms/perplexity.py,sha256=DO5RZaUEDmRhps0Hoa1OX05no5n4uxT4JjwOGWPshPY,3899
2135
- pygpt_net/provider/llms/x_ai.py,sha256=ZC2zOaENGEgMZc5ozN-kik8yl16XNcgZAU2eaTpP52M,3673
2137
+ pygpt_net/provider/llms/x_ai.py,sha256=TkOdSce3OndH4lSoYgBB7FkljM0eqbSX3OT5UtT0Cqc,4509
2136
2138
  pygpt_net/provider/loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2137
2139
  pygpt_net/provider/loaders/base.py,sha256=3-qzzGAF2jxhriNHjE3Y2GtDXxs1_2_BIloaVJS4qzQ,3101
2138
2140
  pygpt_net/provider/loaders/file_csv.py,sha256=br4zlMFXMVkhq1n71tqCNk2CS1wBxvClpn9vivq3l2g,1266
@@ -2445,8 +2447,8 @@ pygpt_net/ui/widget/textarea/web.py,sha256=cqs5i67bD19_BNgcYL7NXlwYBei4UYSL_IYPZ
2445
2447
  pygpt_net/ui/widget/vision/__init__.py,sha256=8HT4tQFqQogEEpGYTv2RplKBthlsFKcl5egnv4lzzEw,488
2446
2448
  pygpt_net/ui/widget/vision/camera.py,sha256=v1qEncaZr5pXocO5Cpk_lsgfCMvfFigdJmzsYfzvCl0,1877
2447
2449
  pygpt_net/utils.py,sha256=GBAXOpp_Wjfu7Al7TnTV62-R-JPMiP9GuPXLJ0HmeJU,8906
2448
- pygpt_net-2.6.24.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
2449
- pygpt_net-2.6.24.dist-info/METADATA,sha256=5WOXzrV4OoizgSRsLW0CdDT6xtKnWEa4Y9SUKG6zyKs,157048
2450
- pygpt_net-2.6.24.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
2451
- pygpt_net-2.6.24.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
2452
- pygpt_net-2.6.24.dist-info/RECORD,,
2450
+ pygpt_net-2.6.25.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
2451
+ pygpt_net-2.6.25.dist-info/METADATA,sha256=4FP0zTdSkVksnIePP4ra6wSV5C7bYGowxWagKQ6eCEc,157900
2452
+ pygpt_net-2.6.25.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
2453
+ pygpt_net-2.6.25.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
2454
+ pygpt_net-2.6.25.dist-info/RECORD,,