pygpt-net 2.5.6__py3-none-any.whl → 2.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. CHANGELOG.md +14 -0
  2. README.md +25 -3
  3. pygpt_net/CHANGELOG.txt +14 -0
  4. pygpt_net/__init__.py +3 -3
  5. pygpt_net/controller/chat/stream.py +6 -1
  6. pygpt_net/controller/chat/text.py +2 -2
  7. pygpt_net/controller/chat/vision.py +2 -0
  8. pygpt_net/controller/config/placeholder.py +3 -2
  9. pygpt_net/controller/lang/custom.py +3 -1
  10. pygpt_net/controller/notepad/__init__.py +2 -2
  11. pygpt_net/controller/presets/editor.py +8 -1
  12. pygpt_net/controller/theme/menu.py +5 -1
  13. pygpt_net/controller/ui/__init__.py +17 -7
  14. pygpt_net/core/agents/legacy.py +2 -0
  15. pygpt_net/core/bridge/__init__.py +10 -2
  16. pygpt_net/core/ctx/__init__.py +4 -1
  17. pygpt_net/core/debug/presets.py +3 -1
  18. pygpt_net/core/events/control.py +2 -1
  19. pygpt_net/core/experts/__init__.py +3 -1
  20. pygpt_net/core/models/__init__.py +6 -1
  21. pygpt_net/core/modes/__init__.py +3 -1
  22. pygpt_net/core/presets/__init__.py +5 -1
  23. pygpt_net/core/render/web/helpers.py +8 -1
  24. pygpt_net/core/tokens/__init__.py +5 -3
  25. pygpt_net/core/types/mode.py +3 -2
  26. pygpt_net/data/config/config.json +6 -4
  27. pygpt_net/data/config/models.json +424 -3
  28. pygpt_net/data/config/modes.json +9 -3
  29. pygpt_net/data/config/presets/current.research.json +35 -0
  30. pygpt_net/data/config/settings.json +19 -0
  31. pygpt_net/data/css/web-blocks.css +8 -0
  32. pygpt_net/data/css/web-chatgpt.css +8 -0
  33. pygpt_net/data/css/web-chatgpt_wide.css +8 -0
  34. pygpt_net/data/locale/locale.en.ini +6 -0
  35. pygpt_net/item/preset.py +5 -1
  36. pygpt_net/plugin/openai_dalle/__init__.py +3 -1
  37. pygpt_net/plugin/openai_vision/__init__.py +3 -1
  38. pygpt_net/provider/core/config/patch.py +18 -1
  39. pygpt_net/provider/core/model/patch.py +7 -1
  40. pygpt_net/provider/core/preset/json_file.py +4 -0
  41. pygpt_net/provider/gpt/__init__.py +30 -6
  42. pygpt_net/provider/gpt/chat.py +4 -7
  43. pygpt_net/ui/dialog/preset.py +3 -1
  44. pygpt_net/ui/layout/ctx/__init__.py +1 -5
  45. pygpt_net/ui/layout/ctx/ctx_list.py +6 -1
  46. {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/METADATA +26 -4
  47. {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/RECORD +50 -49
  48. {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/LICENSE +0 -0
  49. {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/WHEEL +0 -0
  50. {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/entry_points.txt +0 -0
CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # CHANGELOG
2
2
 
3
+ ## 2.5.8 (2025-03-02)
4
+
5
+ - Added a new mode: Research (Perplexity) powered by: https://perplexity.ai - beta.
6
+ - Added Perplexity models: sonar, sonar-pro, sonar-deep-research, sonar-reasoning, sonar-reasoning-pro, r1-1776.
7
+ - Added a new OpenAI model: gpt-4.5-preview.
8
+
9
+ ## 2.5.7 (2025-02-26)
10
+
11
+ - Stream mode has been enabled in o1 models.
12
+ - CSS styling for <think> tags (reasoning models) has been added.
13
+ - The search input has been moved to the top.
14
+ - The ChatGPT-based style is now set as default.
15
+ - Fix: Display of max tokens in models with a context window greater than 128k.
16
+
3
17
  ## 2.5.6 (2025-02-03)
4
18
 
5
19
  - Fix: disabled index initialization if embedding provider is OpenAI and no API KEY is provided.
README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
4
4
 
5
- Release: **2.5.6** | build: **2025.02.03** | Python: **>=3.10, <3.13**
5
+ Release: **2.5.8** | build: **2025.03.02** | Python: **>=3.10, <3.13**
6
6
 
7
7
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
8
8
  >
@@ -38,7 +38,7 @@ You can download compiled 64-bit versions for Windows and Linux here: https://py
38
38
 
39
39
  - Desktop AI Assistant for `Linux`, `Windows` and `Mac`, written in Python.
40
40
  - Works similarly to `ChatGPT`, but locally (on a desktop computer).
41
- - 11 modes of operation: Chat, Vision, Completion, Assistant, Image generation, LangChain, Chat with Files, Chat with Audio, Experts, Autonomous Mode and Agents.
41
+ - 12 modes of operation: Chat, Vision, Completion, Assistant, Image generation, LangChain, Chat with Files, Chat with Audio, Research (Perplexity), Experts, Autonomous Mode and Agents.
42
42
  - Supports multiple models: `o1`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LangChain`, `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `DeepSeek V3/R1`, `Bielik`, etc.
43
43
  - Chat with your own Files: integrated `LlamaIndex` support: chat with data such as: `txt`, `pdf`, `csv`, `html`, `md`, `docx`, `json`, `epub`, `xlsx`, `xml`, webpages, `Google`, `GitHub`, video/audio, images and other data types, or use conversation history as additional context provided to the model.
44
44
  - Built-in vector databases support and automated files and data embedding.
@@ -398,7 +398,15 @@ More info: https://platform.openai.com/docs/guides/audio/quickstart
398
398
 
399
399
  Currently, in beta. Tool and function calls are not enabled in this mode.
400
400
 
401
- **INFO:** The execution of commands and tools in this mode is temporarily unavailable.
401
+ ## Research (Perplexity)
402
+
403
+ 2025-03-02: currently in beta.
404
+
405
+ Mode operates using the Perplexity API: https://perplexity.ai.
406
+
407
+ It allows for deep web searching and utilizes Sonar models, available in `Perplexity AI`.
408
+
409
+ It requires a Perplexity API key, which can be generated at: https://perplexity.ai.
402
410
 
403
411
  ## Completion
404
412
 
@@ -3964,6 +3972,20 @@ may consume additional tokens that are not displayed in the main window.
3964
3972
 
3965
3973
  ## Recent changes:
3966
3974
 
3975
+ **2.5.8 (2025-03-02)**
3976
+
3977
+ - Added a new mode: Research (Perplexity) powered by: https://perplexity.ai - beta.
3978
+ - Added Perplexity models: sonar, sonar-pro, sonar-deep-research, sonar-reasoning, sonar-reasoning-pro, r1-1776.
3979
+ - Added a new OpenAI model: gpt-4.5-preview.
3980
+
3981
+ **2.5.7 (2025-02-26)**
3982
+
3983
+ - Stream mode has been enabled in o1 models.
3984
+ - CSS styling for <think> tags (reasoning models) has been added.
3985
+ - The search input has been moved to the top.
3986
+ - The ChatGPT-based style is now set as default.
3987
+ - Fix: Display of max tokens in models with a context window greater than 128k.
3988
+
3967
3989
  **2.5.6 (2025-02-03)**
3968
3990
 
3969
3991
  - Fix: disabled index initialization if embedding provider is OpenAI and no API KEY is provided.
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,17 @@
1
+ 2.5.8 (2025-03-02)
2
+
3
+ - Added a new mode: Research (Perplexity) powered by: https://perplexity.ai - beta.
4
+ - Added Perplexity models: sonar, sonar-pro, sonar-deep-research, sonar-reasoning, sonar-reasoning-pro, r1-1776.
5
+ - Added a new OpenAI model: gpt-4.5-preview.
6
+
7
+ 2.5.7 (2025-02-26)
8
+
9
+ - Stream mode has been enabled in o1 models.
10
+ - CSS styling for <think> tags (reasoning models) has been added.
11
+ - The search input has been moved to the top.
12
+ - The ChatGPT-based style is now set as default.
13
+ - Fix: Display of max tokens in models with a context window greater than 128k.
14
+
1
15
  2.5.6 (2025-02-03)
2
16
 
3
17
  - Fix: disabled index initialization if embedding provider is OpenAI and no API KEY is provided.
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.03 02:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.5.6"
17
- __build__ = "2025.02.03"
16
+ __version__ = "2.5.8"
17
+ __build__ = "2025-03-02"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.31 22:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -81,7 +81,12 @@ class Stream:
81
81
 
82
82
  # OpenAI chat completion
83
83
  if chunk_type == "api_chat":
84
+ citations = None
84
85
  if chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
86
+ if citations is None:
87
+ if chunk and hasattr(chunk, 'citations') and chunk.citations is not None:
88
+ citations = chunk.citations
89
+ ctx.urls = citations
85
90
  response = chunk.choices[0].delta.content
86
91
  if chunk.choices[0].delta and chunk.choices[0].delta.tool_calls:
87
92
  tool_chunks = chunk.choices[0].delta.tool_calls
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.16 20:00:00 #
9
+ # Updated Date: 2025.02.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -86,7 +86,7 @@ class Text:
86
86
  idx_mode = self.window.core.config.get('llama.idx.mode')
87
87
 
88
88
  # o1 models: disable stream mode
89
- if model.startswith("o1") or mode in [MODE_AGENT_LLAMA, MODE_AUDIO]:
89
+ if mode in [MODE_AGENT_LLAMA, MODE_AUDIO]:
90
90
  stream_mode = False
91
91
  if mode in [MODE_LLAMA_INDEX] and idx_mode == "retrieval":
92
92
  stream_mode = False
@@ -17,6 +17,7 @@ from pygpt_net.core.types import (
17
17
  MODE_LANGCHAIN,
18
18
  MODE_LLAMA_INDEX,
19
19
  MODE_VISION,
20
+ MODE_RESEARCH,
20
21
  )
21
22
 
22
23
  class Vision:
@@ -36,6 +37,7 @@ class Vision:
36
37
  MODE_LLAMA_INDEX,
37
38
  MODE_AGENT,
38
39
  MODE_AGENT_LLAMA,
40
+ MODE_RESEARCH,
39
41
  ]
40
42
 
41
43
  def setup(self):
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.01 11:00:00 #
9
+ # Updated Date: 2025.02.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List
@@ -325,7 +325,8 @@ class Placeholder:
325
325
  styles.sort()
326
326
  data = []
327
327
  for id in styles:
328
- data.append({id: id})
328
+ name = id
329
+ data.append({id: name})
329
330
  return data
330
331
 
331
332
  def get_keys(self) -> List[Dict[str, str]]:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.17 02:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtCore import Qt
@@ -22,6 +22,7 @@ from pygpt_net.core.types import (
22
22
  MODE_LANGCHAIN,
23
23
  MODE_LLAMA_INDEX,
24
24
  MODE_VISION,
25
+ MODE_RESEARCH,
25
26
  )
26
27
  from pygpt_net.utils import trans
27
28
 
@@ -61,6 +62,7 @@ class Custom:
61
62
  self.window.ui.config['preset'][MODE_AGENT_LLAMA].box.setText(trans("preset.agent_llama"))
62
63
  self.window.ui.config['preset'][MODE_EXPERT].box.setText(trans("preset.expert"))
63
64
  self.window.ui.config['preset'][MODE_AUDIO].box.setText(trans("preset.audio"))
65
+ self.window.ui.config['preset'][MODE_RESEARCH].box.setText(trans("preset.research"))
64
66
 
65
67
  self.window.ui.config['global']['img_raw'].setText(trans("img.raw"))
66
68
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.02.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, Tuple
@@ -283,6 +283,6 @@ class Notepad:
283
283
  if tab.type == Tab.TAB_NOTEPAD:
284
284
  idx = tab.data_id
285
285
  if idx in self.window.ui.notepad:
286
+ self.window.ui.notepad[idx].scroll_to_bottom()
286
287
  if not self.window.ui.notepad[idx].opened:
287
- self.window.ui.notepad[idx].scroll_to_bottom()
288
288
  self.window.ui.notepad[idx].opened = True
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import datetime
@@ -25,6 +25,7 @@ from pygpt_net.core.types import (
25
25
  MODE_LLAMA_INDEX,
26
26
  MODE_VISION,
27
27
  MODE_IMAGE,
28
+ MODE_RESEARCH,
28
29
  )
29
30
  from pygpt_net.item.preset import PresetItem
30
31
  from pygpt_net.utils import trans
@@ -93,6 +94,10 @@ class Editor:
93
94
  "type": "bool",
94
95
  "label": "preset.audio",
95
96
  },
97
+ MODE_RESEARCH: {
98
+ "type": "bool",
99
+ "label": "preset.research",
100
+ },
96
101
  # "assistant": {
97
102
  # "type": "bool",
98
103
  # "label": "preset.assistant",
@@ -302,6 +307,8 @@ class Editor:
302
307
  data.agent_llama = True
303
308
  elif mode == MODE_AUDIO:
304
309
  data.audio = True
310
+ elif mode == MODE_RESEARCH:
311
+ data.research = True
305
312
 
306
313
  options = {}
307
314
  data_dict = data.to_dict()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.07 21:00:00 #
9
+ # Updated Date: 2025.02.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtGui import QAction
@@ -36,6 +36,10 @@ class Menu:
36
36
  for style in styles:
37
37
  style_id = style.lower()
38
38
  title = style.replace('_', ' ').title()
39
+ if title == "Chatgpt":
40
+ title = "ChatGPT"
41
+ elif title == "Chatgpt Wide":
42
+ title = "ChatGPT (wide)"
39
43
  self.window.ui.menu['theme_style'][style_id] = QAction(title, self.window, checkable=True)
40
44
  self.window.ui.menu['theme_style'][style_id].triggered.connect(
41
45
  lambda checked=None, style=style_id: self.window.controller.theme.toggle_style(style))
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 00:00:00 #
9
+ # Updated Date: 2025.02.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -95,6 +95,20 @@ class UI:
95
95
  self.window.controller.assistant.refresh()
96
96
  self.window.controller.idx.refresh()
97
97
 
98
+ def format_tokens(self, num: int) -> str:
99
+ """
100
+ Format tokens
101
+ :param num: number of tokens
102
+ :return: formatted string
103
+ """
104
+ num = int(num)
105
+ if num >= 1_000_000:
106
+ return f"{num // 1_000_000}M"
107
+ elif num >= 1_000:
108
+ return f"{num // 1_000}k"
109
+ else:
110
+ return str(num)
111
+
98
112
  def update_tokens(self):
99
113
  """Update tokens counter in real-time"""
100
114
  prompt = str(self.window.ui.nodes['input'].toPlainText().strip())
@@ -112,12 +126,8 @@ class UI:
112
126
  )
113
127
  self.window.ui.nodes['prompt.context'].setText(ctx_string)
114
128
 
115
- # input tokens
116
- parsed_sum = str(int(sum_tokens))
117
- parsed_sum = parsed_sum.replace("000000", "M").replace("000", "k")
118
-
119
- parsed_max_current = str(int(max_current))
120
- parsed_max_current = parsed_max_current.replace("000000", "M").replace("000", "k")
129
+ parsed_sum = self.format_tokens(sum_tokens)
130
+ parsed_max_current = self.format_tokens(max_current)
121
131
 
122
132
  input_string = "{} + {} + {} + {} + {} = {} / {}".format(
123
133
  input_tokens,
@@ -18,6 +18,7 @@ from pygpt_net.core.types import (
18
18
  MODE_LLAMA_INDEX,
19
19
  MODE_VISION,
20
20
  MODE_AUDIO,
21
+ MODE_RESEARCH,
21
22
  )
22
23
 
23
24
  class Legacy:
@@ -35,6 +36,7 @@ class Legacy:
35
36
  MODE_LANGCHAIN,
36
37
  MODE_LLAMA_INDEX,
37
38
  MODE_AUDIO,
39
+ MODE_RESEARCH,
38
40
  ]
39
41
 
40
42
  def get_allowed_modes(self) -> List[str]:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import time
@@ -21,6 +21,7 @@ from pygpt_net.core.types import (
21
21
  MODE_LANGCHAIN,
22
22
  MODE_LLAMA_INDEX,
23
23
  MODE_VISION,
24
+ MODE_RESEARCH,
24
25
  )
25
26
 
26
27
  from .context import BridgeContext
@@ -195,7 +196,7 @@ class Bridge:
195
196
 
196
197
  if context.model is not None:
197
198
  # check if model is supported by chat API, if not then try to use llama-index or langchain call
198
- if not context.model.is_supported(MODE_CHAT):
199
+ if not context.model.is_supported(MODE_CHAT) and not context.model.is_supported(MODE_RESEARCH):
199
200
 
200
201
  # tmp switch to: llama-index
201
202
  if context.model.is_supported(MODE_LLAMA_INDEX):
@@ -231,6 +232,13 @@ class Bridge:
231
232
  self.window.core.debug.error(e)
232
233
  return ""
233
234
 
235
+ # if model is research model, then switch to research / Perplexity endpoint
236
+ if context.mode is None or context.mode == MODE_CHAT:
237
+ if context.model is not None:
238
+ if not context.model.is_supported(MODE_CHAT):
239
+ if context.model.is_supported(MODE_RESEARCH):
240
+ context.mode = MODE_RESEARCH
241
+
234
242
  # default: OpenAI API call
235
243
  return self.window.core.gpt.quick_call(
236
244
  context=context,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -28,6 +28,7 @@ from pygpt_net.core.types import (
28
28
  MODE_LANGCHAIN,
29
29
  MODE_LLAMA_INDEX,
30
30
  MODE_VISION,
31
+ MODE_RESEARCH,
31
32
  )
32
33
  from pygpt_net.item.ctx import CtxItem, CtxMeta, CtxGroup
33
34
  from pygpt_net.provider.core.ctx.base import BaseProvider
@@ -81,6 +82,7 @@ class Ctx:
81
82
  MODE_AGENT,
82
83
  MODE_EXPERT,
83
84
  MODE_AUDIO,
85
+ MODE_RESEARCH,
84
86
  ]
85
87
  self.allowed_modes = {
86
88
  MODE_CHAT: self.all_modes,
@@ -94,6 +96,7 @@ class Ctx:
94
96
  MODE_EXPERT: self.all_modes,
95
97
  MODE_AUDIO: self.all_modes,
96
98
  MODE_AGENT_LLAMA: [MODE_AGENT_LLAMA],
99
+ MODE_RESEARCH: self.all_modes,
97
100
  }
98
101
  self.current_sys_prompt = ""
99
102
  self.groups_loaded = False
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 19:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -23,6 +23,7 @@ from pygpt_net.core.types import (
23
23
  MODE_LANGCHAIN,
24
24
  MODE_LLAMA_INDEX,
25
25
  MODE_VISION,
26
+ MODE_RESEARCH,
26
27
  )
27
28
 
28
29
  class PresetsDebug:
@@ -66,6 +67,7 @@ class PresetsDebug:
66
67
  MODE_AGENT_LLAMA: preset.agent_llama,
67
68
  MODE_EXPERT: preset.expert,
68
69
  MODE_AUDIO: preset.audio,
70
+ MODE_RESEARCH: preset.research,
69
71
  'temperature': preset.temperature,
70
72
  'version': preset.version,
71
73
  }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -48,6 +48,7 @@ class ControlEvent(BaseEvent):
48
48
  INPUT_SEND = "input.send"
49
49
  INPUT_APPEND = "input.append"
50
50
  MODE_CHAT = "mode.chat"
51
+ MODE_RESEARCH = "mode.research"
51
52
  MODE_LLAMA_INDEX = "mode.llama_index"
52
53
  MODE_NEXT = "mode.next"
53
54
  MODE_PREV = "mode.prev"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, List
@@ -20,6 +20,7 @@ from pygpt_net.core.types import (
20
20
  MODE_LLAMA_INDEX,
21
21
  MODE_VISION,
22
22
  MODE_AUDIO,
23
+ MODE_RESEARCH,
23
24
  )
24
25
  from pygpt_net.core.bridge.context import BridgeContext
25
26
  from pygpt_net.core.events import Event, KernelEvent, RenderEvent
@@ -42,6 +43,7 @@ class Experts:
42
43
  MODE_LANGCHAIN,
43
44
  MODE_LLAMA_INDEX,
44
45
  MODE_AUDIO,
46
+ MODE_RESEARCH,
45
47
  ]
46
48
  self.allowed_cmds = ["expert_call"]
47
49
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.01 11:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -18,6 +18,7 @@ from pygpt_net.core.types import (
18
18
  MODE_CHAT,
19
19
  MODE_LANGCHAIN,
20
20
  MODE_LLAMA_INDEX,
21
+ MODE_RESEARCH,
21
22
  )
22
23
  from pygpt_net.item.model import ModelItem
23
24
  from pygpt_net.provider.core.model.json_file import JsonFileProvider
@@ -385,6 +386,10 @@ class Models:
385
386
  self.window.core.debug.info(
386
387
  "WARNING: Switching to chat mode (model not supported in: {})".format(mode))
387
388
  mode = MODE_CHAT
389
+ elif model.is_supported(MODE_RESEARCH):
390
+ self.window.core.debug.info(
391
+ "WARNING: Switching to research mode (model not supported in: {})".format(mode))
392
+ mode = MODE_RESEARCH
388
393
  elif model.is_supported(MODE_LLAMA_INDEX):
389
394
  self.window.core.debug.info(
390
395
  "WARNING: Switching to llama_index mode (model not supported in: {})".format(mode))
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, List
@@ -24,6 +24,7 @@ from pygpt_net.core.types import (
24
24
  MODE_LANGCHAIN,
25
25
  MODE_LLAMA_INDEX,
26
26
  MODE_VISION,
27
+ MODE_RESEARCH,
27
28
  )
28
29
 
29
30
 
@@ -50,6 +51,7 @@ class Modes:
50
51
  MODE_LANGCHAIN,
51
52
  MODE_LLAMA_INDEX,
52
53
  MODE_VISION,
54
+ MODE_RESEARCH,
53
55
  ]
54
56
  self.items = {}
55
57
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -26,6 +26,7 @@ from pygpt_net.core.types import (
26
26
  MODE_LANGCHAIN,
27
27
  MODE_LLAMA_INDEX,
28
28
  MODE_VISION,
29
+ MODE_RESEARCH,
29
30
  )
30
31
  from pygpt_net.item.preset import PresetItem
31
32
  from pygpt_net.provider.core.preset.json_file import JsonFileProvider
@@ -233,6 +234,8 @@ class Presets:
233
234
  return MODE_EXPERT
234
235
  if preset.audio:
235
236
  return MODE_AUDIO
237
+ if preset.research:
238
+ return MODE_RESEARCH
236
239
  return None
237
240
 
238
241
  def has(self, mode: str, id: str) -> bool:
@@ -303,6 +306,7 @@ class Presets:
303
306
  or (mode == MODE_AGENT and self.items[id].agent) \
304
307
  or (mode == MODE_AGENT_LLAMA and self.items[id].agent_llama) \
305
308
  or (mode == MODE_EXPERT and self.items[id].expert) \
309
+ or (mode == MODE_RESEARCH and self.items[id].research) \
306
310
  or (mode == MODE_AUDIO and self.items[id].audio):
307
311
  presets[id] = self.items[id]
308
312
  return presets
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.11 23:00:00 #
9
+ # Updated Date: 2025.02.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
@@ -48,6 +48,13 @@ class Helpers:
48
48
  text = text.replace("#~###~", "~###~") # fix for #~###~ in text (previous versions)
49
49
  text = text.replace("# ~###~", "~###~") # fix for # ~###~ in text (previous versions)
50
50
 
51
+ #text = text.replace("<think>", "{{{{think}}}}")
52
+ #text = text.replace("</think>", "{{{{/think}}}}")
53
+ #text = text.replace("<", "&lt;")
54
+ #text = text.replace(">", "&gt;")
55
+ #text = text.replace("{{{{think}}}}", "<think>")
56
+ #text = text.replace("{{{{/think}}}}", "</think>")
57
+
51
58
  # replace cmd tags
52
59
  text = self.replace_code_tags(text)
53
60
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.31 22:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Tuple, List
@@ -22,6 +22,7 @@ from pygpt_net.core.types import (
22
22
  MODE_ASSISTANT,
23
23
  MODE_AUDIO,
24
24
  MODE_CHAT,
25
+ MODE_RESEARCH,
25
26
  MODE_COMPLETION,
26
27
  MODE_EXPERT,
27
28
  MODE_LANGCHAIN,
@@ -40,6 +41,7 @@ CHAT_MODES = [
40
41
  MODE_AGENT_LLAMA,
41
42
  MODE_EXPERT,
42
43
  MODE_AUDIO,
44
+ MODE_RESEARCH,
43
45
  ]
44
46
 
45
47
  class Tokens:
@@ -375,7 +377,7 @@ class Tokens:
375
377
 
376
378
  # check model max allowed ctx tokens
377
379
  max_current = max_total_tokens
378
- model_ctx = self.window.core.models.get_num_ctx(model_id)
380
+ model_ctx = self.window.core.models.get_num_ctx(model)
379
381
  if max_current > model_ctx:
380
382
  max_current = model_ctx
381
383
 
@@ -414,7 +416,7 @@ class Tokens:
414
416
  model_id = self.window.core.models.get_id(model)
415
417
  mode = self.window.core.config.get('mode')
416
418
  tokens = 0
417
- if mode in [MODE_CHAT, MODE_VISION, MODE_AUDIO]:
419
+ if mode in [MODE_CHAT, MODE_VISION, MODE_AUDIO, MODE_RESEARCH]:
418
420
  tokens += self.from_prompt(system_prompt, "", model_id) # system prompt
419
421
  tokens += self.from_text("system", model_id)
420
422
  tokens += self.from_prompt(input_prompt, "", model_id) # input prompt
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 19:00:00 #
9
+ # Updated Date: 2025.03.02 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  MODE_AUDIO = "audio"
@@ -19,4 +19,5 @@ MODE_LANGCHAIN = "langchain"
19
19
  MODE_LLAMA_INDEX = "llama_index"
20
20
  MODE_AGENT = "agent"
21
21
  MODE_AGENT_LLAMA = "agent_llama"
22
- MODE_EXPERT = "expert"
22
+ MODE_EXPERT = "expert"
23
+ MODE_RESEARCH = "research"