pygpt-net 2.6.23__py3-none-any.whl → 2.6.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/response.py +6 -5
  4. pygpt_net/controller/config/placeholder.py +3 -1
  5. pygpt_net/controller/model/importer.py +28 -5
  6. pygpt_net/core/agents/runners/loop.py +36 -3
  7. pygpt_net/core/attachments/context.py +4 -4
  8. pygpt_net/core/idx/chat.py +1 -1
  9. pygpt_net/core/idx/indexing.py +3 -3
  10. pygpt_net/core/idx/llm.py +61 -2
  11. pygpt_net/data/config/config.json +41 -4
  12. pygpt_net/data/config/models.json +3 -3
  13. pygpt_net/data/config/settings.json +56 -1
  14. pygpt_net/data/locale/locale.de.ini +46 -0
  15. pygpt_net/data/locale/locale.en.ini +53 -1
  16. pygpt_net/data/locale/locale.es.ini +46 -0
  17. pygpt_net/data/locale/locale.fr.ini +46 -0
  18. pygpt_net/data/locale/locale.it.ini +46 -0
  19. pygpt_net/data/locale/locale.pl.ini +47 -1
  20. pygpt_net/data/locale/locale.uk.ini +46 -0
  21. pygpt_net/data/locale/locale.zh.ini +46 -0
  22. pygpt_net/provider/agents/llama_index/codeact_workflow.py +8 -7
  23. pygpt_net/provider/agents/llama_index/planner_workflow.py +11 -10
  24. pygpt_net/provider/agents/llama_index/supervisor_workflow.py +9 -8
  25. pygpt_net/provider/agents/openai/agent_b2b.py +30 -17
  26. pygpt_net/provider/agents/openai/agent_planner.py +29 -29
  27. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +21 -23
  28. pygpt_net/provider/agents/openai/agent_with_feedback.py +21 -23
  29. pygpt_net/provider/agents/openai/bot_researcher.py +25 -30
  30. pygpt_net/provider/agents/openai/evolve.py +37 -39
  31. pygpt_net/provider/agents/openai/supervisor.py +16 -18
  32. pygpt_net/provider/core/config/patch.py +45 -1
  33. pygpt_net/provider/llms/anthropic.py +38 -7
  34. pygpt_net/provider/llms/azure_openai.py +9 -4
  35. pygpt_net/provider/llms/deepseek_api.py +36 -3
  36. pygpt_net/provider/llms/google.py +9 -3
  37. pygpt_net/provider/llms/hugging_face_api.py +9 -3
  38. pygpt_net/provider/llms/hugging_face_router.py +17 -3
  39. pygpt_net/provider/llms/llama_index/x_ai/__init__.py +0 -0
  40. pygpt_net/provider/llms/llama_index/x_ai/embedding.py +71 -0
  41. pygpt_net/provider/llms/local.py +25 -1
  42. pygpt_net/provider/llms/mistral.py +29 -1
  43. pygpt_net/provider/llms/ollama.py +3 -1
  44. pygpt_net/provider/llms/openai.py +7 -2
  45. pygpt_net/provider/llms/x_ai.py +19 -3
  46. pygpt_net/ui/widget/textarea/input.py +3 -3
  47. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/METADATA +54 -28
  48. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/RECORD +51 -49
  49. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/LICENSE +0 -0
  50. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/WHEEL +0 -0
  51. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/entry_points.txt +0 -0
@@ -6,9 +6,12 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
- from typing import List, Dict
11
+
12
+ from typing import List, Dict, Optional
13
+
14
+ from llama_index.core.base.embeddings.base import BaseEmbedding
12
15
 
13
16
  from pygpt_net.core.types import (
14
17
  MODE_LLAMA_INDEX,
@@ -24,7 +27,7 @@ class DeepseekApiLLM(BaseLLM):
24
27
  super(DeepseekApiLLM, self).__init__(*args, **kwargs)
25
28
  self.id = "deepseek_api"
26
29
  self.name = "Deepseek API"
27
- self.type = [MODE_LLAMA_INDEX]
30
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
28
31
 
29
32
  def llama(
30
33
  self,
@@ -42,8 +45,38 @@ class DeepseekApiLLM(BaseLLM):
42
45
  """
43
46
  from llama_index.llms.deepseek import DeepSeek
44
47
  args = self.parse_args(model.llama_index, window)
48
+ if "model" not in args:
49
+ args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_deepseek", "")
45
52
  return DeepSeek(**args)
46
53
 
54
+ def get_embeddings_model(
55
+ self,
56
+ window,
57
+ config: Optional[List[Dict]] = None
58
+ ) -> BaseEmbedding:
59
+ """
60
+ Return provider instance for embeddings
61
+
62
+ :param window: window instance
63
+ :param config: config keyword arguments list
64
+ :return: Embedding provider instance
65
+ """
66
+ from llama_index.embeddings.voyageai import VoyageEmbedding
67
+ args = {}
68
+ if config is not None:
69
+ args = self.parse_args({
70
+ "args": config,
71
+ }, window)
72
+ if "api_key" in args:
73
+ args["voyage_api_key"] = args.pop("api_key")
74
+ if "voyage_api_key" not in args or args["voyage_api_key"] == "":
75
+ args["voyage_api_key"] = window.core.config.get("api_key_voyage", "")
76
+ if "model" in args and "model_name" not in args:
77
+ args["model_name"] = args.pop("model")
78
+ return VoyageEmbedding(**args)
79
+
47
80
  def get_models(
48
81
  self,
49
82
  window,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -53,6 +53,8 @@ class GoogleLLM(BaseLLM):
53
53
  args = self.parse_args(model.llama_index, window)
54
54
  if "model" not in args:
55
55
  args["model"] = model.id
56
+ if "api_key" not in args or args["api_key"] == "":
57
+ args["api_key"] = window.core.config.get("api_key_google", "")
56
58
  return GoogleGenAI(**args)
57
59
 
58
60
  def get_embeddings_model(
@@ -67,13 +69,17 @@ class GoogleLLM(BaseLLM):
67
69
  :param config: config keyword arguments list
68
70
  :return: Embedding provider instance
69
71
  """
70
- from llama_index.embeddings.gemini import GeminiEmbedding
72
+ from llama_index.embeddings.google_genai import GoogleGenAIEmbedding
71
73
  args = {}
72
74
  if config is not None:
73
75
  args = self.parse_args({
74
76
  "args": config,
75
77
  }, window)
76
- return GeminiEmbedding(**args)
78
+ if "api_key" not in args or args["api_key"] == "":
79
+ args["api_key"] = window.core.config.get("api_key_google", "")
80
+ if "model" in args and "model_name" not in args:
81
+ args["model_name"] = args.pop("model")
82
+ return GoogleGenAIEmbedding(**args)
77
83
 
78
84
  def get_models(
79
85
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -47,6 +47,8 @@ class HuggingFaceApiLLM(BaseLLM):
47
47
  args = self.parse_args(model.llama_index, window)
48
48
  if "model" not in args:
49
49
  args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
50
52
  return HuggingFaceInferenceAPI(**args)
51
53
 
52
54
  def get_embeddings_model(
@@ -61,13 +63,17 @@ class HuggingFaceApiLLM(BaseLLM):
61
63
  :param config: config keyword arguments list
62
64
  :return: Embedding provider instance
63
65
  """
64
- from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding as HuggingFaceAPIEmbedding
66
+ from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding
65
67
  args = {}
66
68
  if config is not None:
67
69
  args = self.parse_args({
68
70
  "args": config,
69
71
  }, window)
70
- return HuggingFaceAPIEmbedding(**args)
72
+ if "api_key" not in args or args["api_key"] == "":
73
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
74
+ if "model" in args and "model_name" not in args:
75
+ args["model_name"] = args.pop("model")
76
+ return HuggingFaceInferenceAPIEmbedding(**args)
71
77
 
72
78
  def init_embeddings(
73
79
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.08 19:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -28,7 +28,7 @@ class HuggingFaceRouterLLM(BaseLLM):
28
28
  super(HuggingFaceRouterLLM, self).__init__(*args, **kwargs)
29
29
  self.id = "huggingface_router"
30
30
  self.name = "HuggingFace Router"
31
- self.type = [MODE_CHAT, MODE_LLAMA_INDEX]
31
+ self.type = [MODE_CHAT, MODE_LLAMA_INDEX, "embeddings"]
32
32
 
33
33
  def completion(
34
34
  self,
@@ -78,6 +78,10 @@ class HuggingFaceRouterLLM(BaseLLM):
78
78
  """
79
79
  from llama_index.llms.openai_like import OpenAILike
80
80
  args = self.parse_args(model.llama_index, window)
81
+ if "model" not in args:
82
+ args["model"] = model.id
83
+ if "api_key" not in args or args["api_key"] == "":
84
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
81
85
  return OpenAILike(**args)
82
86
 
83
87
  def llama_multimodal(
@@ -108,7 +112,17 @@ class HuggingFaceRouterLLM(BaseLLM):
108
112
  :param config: config keyword arguments list
109
113
  :return: Embedding provider instance
110
114
  """
111
- pass
115
+ from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding
116
+ args = {}
117
+ if config is not None:
118
+ args = self.parse_args({
119
+ "args": config,
120
+ }, window)
121
+ if "api_key" not in args or args["api_key"] == "":
122
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
123
+ if "model" in args and "model_name" not in args:
124
+ args["model_name"] = args.pop("model")
125
+ return HuggingFaceInferenceAPIEmbedding(**args)
112
126
 
113
127
  def get_models(
114
128
  self,
File without changes
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
+ # ================================================== #
11
+
12
+ import asyncio
13
+ from typing import Any, List, Optional
14
+
15
+ import xai_sdk
16
+ from llama_index.core.embeddings import BaseEmbedding
17
+
18
+
19
+ class XAIEmbedding(BaseEmbedding):
20
+ """
21
+ LlamaIndex xAI Embedding SDK wrapper.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ model_name: str,
27
+ api_key: Optional[str] = None,
28
+ api_host: str = "api.x.ai",
29
+ **kwargs: Any,
30
+ ) -> None:
31
+ super().__init__(model_name=model_name, **kwargs)
32
+ self._api_key = api_key
33
+ self._api_host = api_host
34
+ self._client = xai_sdk.Client(api_key=api_key, api_host=api_host)
35
+
36
+ def _run_async(self, coro):
37
+ try:
38
+ loop = asyncio.get_running_loop()
39
+ except RuntimeError:
40
+ loop = None
41
+
42
+ if loop and loop.is_running():
43
+ return asyncio.run_coroutine_threadsafe(coro, loop).result()
44
+ else:
45
+ return asyncio.run(coro)
46
+
47
+ async def _aembed_many(self, texts: List[str]) -> List[List[float]]:
48
+ embeddings: List[List[float]] = []
49
+ async for (values, _shape) in self._client.embedder.embed(
50
+ texts=texts, model_name=self.model_name
51
+ ):
52
+ embeddings.append(list(values))
53
+ return embeddings
54
+
55
+ def _get_query_embedding(self, query: str) -> List[float]:
56
+ return self._run_async(self._aembed_many([query]))[0]
57
+
58
+ async def _aget_query_embedding(self, query: str) -> List[float]:
59
+ return (await self._aembed_many([query]))[0]
60
+
61
+ def _get_text_embedding(self, text: str) -> List[float]:
62
+ return self._run_async(self._aembed_many([text]))[0]
63
+
64
+ async def _aget_text_embedding(self, text: str) -> List[float]:
65
+ return (await self._aembed_many([text]))[0]
66
+
67
+ def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
68
+ return self._run_async(self._aembed_many(texts))
69
+
70
+ async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
71
+ return await self._aembed_many(texts)
@@ -8,7 +8,9 @@
8
8
  # Created By : Marcin Szczygliński #
9
9
  # Updated Date: 2025.08.06 01:00:00 #
10
10
  # ================================================== #
11
+ from typing import Optional, Dict, List
11
12
 
13
+ from llama_index.core.base.embeddings.base import BaseEmbedding
12
14
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
13
15
 
14
16
  from pygpt_net.core.types import (
@@ -23,7 +25,29 @@ class LocalLLM(BaseLLM):
23
25
  super(LocalLLM, self).__init__(*args, **kwargs)
24
26
  self.id = "local_ai"
25
27
  self.name = "Local model (OpenAI API compatible)"
26
- self.type = [MODE_LLAMA_INDEX]
28
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
29
+
30
+ def get_embeddings_model(
31
+ self,
32
+ window,
33
+ config: Optional[List[Dict]] = None
34
+ ) -> BaseEmbedding:
35
+ """
36
+ Return provider instance for embeddings
37
+
38
+ :param window: window instance
39
+ :param config: config keyword arguments list
40
+ :return: Embedding provider instance
41
+ """
42
+ from llama_index.embeddings.openai import OpenAIEmbedding
43
+ args = {}
44
+ if config is not None:
45
+ args = self.parse_args({
46
+ "args": config,
47
+ }, window)
48
+ if "model" in args and "model_name" not in args:
49
+ args["model_name"] = args.pop("model")
50
+ return OpenAIEmbedding(**args)
27
51
 
28
52
  def llama(
29
53
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -47,6 +47,8 @@ class MistralAILLM(BaseLLM):
47
47
  args = self.parse_args(model.llama_index, window)
48
48
  if "model" not in args:
49
49
  args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_mistral", "")
50
52
  return MistralAI(**args)
51
53
 
52
54
  def get_embeddings_model(
@@ -67,6 +69,10 @@ class MistralAILLM(BaseLLM):
67
69
  args = self.parse_args({
68
70
  "args": config,
69
71
  }, window)
72
+ if "api_key" not in args or args["api_key"] == "":
73
+ args["api_key"] = window.core.config.get("api_key_mistral", "")
74
+ if "model" in args and "model_name" not in args:
75
+ args["model_name"] = args.pop("model")
70
76
  return MistralAIEmbedding(**args)
71
77
 
72
78
  def init_embeddings(
@@ -87,3 +93,25 @@ class MistralAILLM(BaseLLM):
87
93
  if ('OPENAI_API_KEY' not in os.environ
88
94
  and (window.core.config.get('api_key') is None or window.core.config.get('api_key') == "")):
89
95
  os.environ['OPENAI_API_KEY'] = "_"
96
+
97
+ def get_models(
98
+ self,
99
+ window,
100
+ ) -> List[Dict]:
101
+ """
102
+ Return list of models for the provider
103
+
104
+ :param window: window instance
105
+ :return: list of models
106
+ """
107
+ items = []
108
+ client = self.get_client(window)
109
+ models_list = client.models.list()
110
+ if models_list.data:
111
+ for item in models_list.data:
112
+ id = item.id
113
+ items.append({
114
+ "id": id,
115
+ "name": id,
116
+ })
117
+ return items
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -118,6 +118,8 @@ class OllamaLLM(BaseLLM):
118
118
  if 'OLLAMA_API_BASE' in os.environ:
119
119
  if "base_url" not in args:
120
120
  args["base_url"] = os.environ['OLLAMA_API_BASE']
121
+ if "model" in args and "model_name" not in args:
122
+ args["model_name"] = args.pop("model")
121
123
  return OllamaEmbedding(**args)
122
124
 
123
125
  def init_embeddings(
@@ -6,10 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
- import json
13
12
  from typing import Optional, List, Dict
14
13
 
15
14
  # from langchain_openai import OpenAI
@@ -93,6 +92,8 @@ class OpenAILLM(BaseLLM):
93
92
  from .llama_index.openai import OpenAI as LlamaOpenAI
94
93
  from .llama_index.openai import OpenAIResponses as LlamaOpenAIResponses
95
94
  args = self.parse_args(model.llama_index, window)
95
+ if "api_key" not in args:
96
+ args["api_key"] = window.core.config.get("api_key", "")
96
97
  if "model" not in args:
97
98
  args["model"] = model.id
98
99
 
@@ -148,6 +149,10 @@ class OpenAILLM(BaseLLM):
148
149
  args = self.parse_args({
149
150
  "args": config,
150
151
  }, window)
152
+ if "api_key" not in args:
153
+ args["api_key"] = window.core.config.get("api_key", "")
154
+ if "model" in args and "model_name" not in args:
155
+ args["model_name"] = args.pop("model")
151
156
  return OpenAIEmbedding(**args)
152
157
 
153
158
  def get_models(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -28,7 +28,7 @@ class xAILLM(BaseLLM):
28
28
  super(xAILLM, self).__init__(*args, **kwargs)
29
29
  self.id = "x_ai"
30
30
  self.name = "xAI"
31
- self.type = [MODE_CHAT, MODE_LLAMA_INDEX]
31
+ self.type = [MODE_CHAT, MODE_LLAMA_INDEX, "embeddings"]
32
32
 
33
33
  def completion(
34
34
  self,
@@ -78,6 +78,12 @@ class xAILLM(BaseLLM):
78
78
  """
79
79
  from llama_index.llms.openai_like import OpenAILike
80
80
  args = self.parse_args(model.llama_index, window)
81
+ if "model" not in args:
82
+ args["model"] = model.id
83
+ if "api_key" not in args or args["api_key"] == "":
84
+ args["api_key"] = window.core.config.get("api_key_xai", "")
85
+ if "api_base" not in args or args["api_base"] == "":
86
+ args["api_base"] = window.core.config.get("api_endpoint_xai", "https://api.x.ai/v1")
81
87
  return OpenAILike(**args)
82
88
 
83
89
  def llama_multimodal(
@@ -108,7 +114,17 @@ class xAILLM(BaseLLM):
108
114
  :param config: config keyword arguments list
109
115
  :return: Embedding provider instance
110
116
  """
111
- pass
117
+ from .llama_index.x_ai.embedding import XAIEmbedding
118
+ args = {}
119
+ if config is not None:
120
+ args = self.parse_args({
121
+ "args": config,
122
+ }, window)
123
+ if "api_key" not in args or args["api_key"] == "":
124
+ args["api_key"] = window.core.config.get("api_key_xai", "")
125
+ if "model" in args and "model_name" not in args:
126
+ args["model_name"] = args.pop("model")
127
+ return XAIEmbedding(**args)
112
128
 
113
129
  def get_models(
114
130
  self,
@@ -35,7 +35,7 @@ class ChatInput(QTextEdit):
35
35
  self.value = self.window.core.config.data['font_size.input']
36
36
  self.max_font_size = 42
37
37
  self.min_font_size = 8
38
- self._text_top_padding = 10
38
+ self._text_top_padding = 12
39
39
  self.textChanged.connect(self.window.controller.ui.update_tokens)
40
40
  self.setProperty('class', 'layout-input')
41
41
 
@@ -195,12 +195,12 @@ class ChatInput(QTextEdit):
195
195
  return
196
196
  super().wheelEvent(event)
197
197
 
198
- # --- Added: attachment button (top-left) ---------------------------------
198
+ # -------------------- Attachment button (top-left) --------------------
199
199
 
200
200
  def _init_attachment_button(self):
201
201
  """Create and place the '+' attachment button pinned in the top-left corner."""
202
202
  self._attach_margin = 6 # inner padding around the button
203
- self._attach_offset_y = -6 # shift the button 2px up
203
+ self._attach_offset_y = -4 # shift the button 2px up
204
204
 
205
205
  self._attach_btn = QPushButton(self)
206
206
  self._attach_btn.setObjectName("chatInputAttachBtn")