pygpt-net 2.4.45__py3-none-any.whl → 2.4.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. CHANGELOG.md +6 -0
  2. README.md +28 -8
  3. pygpt_net/CHANGELOG.txt +6 -0
  4. pygpt_net/__init__.py +2 -2
  5. pygpt_net/controller/chat/text.py +5 -2
  6. pygpt_net/controller/idx/__init__.py +2 -1
  7. pygpt_net/controller/settings/__init__.py +2 -2
  8. pygpt_net/core/idx/chat.py +71 -11
  9. pygpt_net/data/config/config.json +7 -2
  10. pygpt_net/data/config/models.json +311 -14
  11. pygpt_net/data/config/modes.json +2 -2
  12. pygpt_net/data/config/settings.json +104 -11
  13. pygpt_net/data/config/settings_section.json +3 -0
  14. pygpt_net/data/locale/locale.de.ini +17 -1
  15. pygpt_net/data/locale/locale.en.ini +19 -2
  16. pygpt_net/data/locale/locale.es.ini +17 -1
  17. pygpt_net/data/locale/locale.fr.ini +17 -1
  18. pygpt_net/data/locale/locale.it.ini +17 -1
  19. pygpt_net/data/locale/locale.pl.ini +17 -1
  20. pygpt_net/data/locale/locale.uk.ini +17 -1
  21. pygpt_net/data/locale/locale.zh.ini +17 -1
  22. pygpt_net/data/locale/plugin.cmd_web.de.ini +1 -1
  23. pygpt_net/data/locale/plugin.cmd_web.en.ini +5 -5
  24. pygpt_net/data/locale/plugin.cmd_web.es.ini +1 -1
  25. pygpt_net/data/locale/plugin.cmd_web.fr.ini +1 -1
  26. pygpt_net/data/locale/plugin.cmd_web.it.ini +1 -1
  27. pygpt_net/data/locale/plugin.cmd_web.pl.ini +1 -1
  28. pygpt_net/data/locale/plugin.cmd_web.uk.ini +1 -1
  29. pygpt_net/data/locale/plugin.cmd_web.zh.ini +1 -1
  30. pygpt_net/data/locale/plugin.mailer.en.ini +9 -9
  31. pygpt_net/provider/core/config/patch.py +16 -1
  32. pygpt_net/provider/core/model/patch.py +156 -1
  33. pygpt_net/ui/dialog/settings.py +22 -7
  34. {pygpt_net-2.4.45.dist-info → pygpt_net-2.4.46.dist-info}/METADATA +29 -9
  35. {pygpt_net-2.4.45.dist-info → pygpt_net-2.4.46.dist-info}/RECORD +38 -38
  36. {pygpt_net-2.4.45.dist-info → pygpt_net-2.4.46.dist-info}/LICENSE +0 -0
  37. {pygpt_net-2.4.45.dist-info → pygpt_net-2.4.46.dist-info}/WHEEL +0 -0
  38. {pygpt_net-2.4.45.dist-info → pygpt_net-2.4.46.dist-info}/entry_points.txt +0 -0
CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # CHANGELOG
2
2
 
3
+ ## 2.4.46 (2024-12-16)
4
+
5
+ - Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
6
+ - Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
7
+ - Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
8
+
3
9
  ## 2.4.45 (2024-12-16)
4
10
 
5
11
  - Enhanced web data loaders UI.
README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
4
4
 
5
- Release: **2.4.45** | build: **2024.12.16** | Python: **>=3.10, <3.12**
5
+ Release: **2.4.46** | build: **2024.12.16** | Python: **>=3.10, <3.12**
6
6
 
7
7
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
8
8
  >
@@ -329,12 +329,12 @@ During the initial launch, you must configure your API key within the applicatio
329
329
  To do so, navigate to the menu:
330
330
 
331
331
  ``` ini
332
- Config -> Settings...
332
+ Config -> Settings -> API Keys
333
333
  ```
334
334
 
335
335
  and then paste the API key into the `OpenAI API KEY` field.
336
336
 
337
- ![v2_settings](https://github.com/user-attachments/assets/003b0f86-8225-4478-8525-fb9324ac5c88)
337
+ ![v2_api_keys](https://github.com/user-attachments/assets/8564add8-364b-471d-80d5-7e99ae77e129)
338
338
 
339
339
  The API key can be obtained by registering on the OpenAI website:
340
340
 
@@ -354,6 +354,8 @@ Your API keys will be available here:
354
354
 
355
355
  This mode in **PyGPT** mirrors `ChatGPT`, allowing you to chat with models such as `o1`, `GPT-4`, `GPT-4o` and `GPT-3.5`. It works by using the `ChatCompletion` OpenAI API.
356
356
 
357
+ **Tip: This mode directly uses the OpenAI API. If you want to use models other than GPT (such as Gemini, Claude, or Llama3), use `Chat with Files` mode.**
358
+
357
359
  The main part of the interface is a chat window where you see your conversations. Below it is a message box for typing. On the right side, you can set up or change the model and system prompt. You can also save these settings as presets to easily switch between models or tasks.
358
360
 
359
361
  Above where you type your messages, the interface shows you the number of tokens your message will use up as you type it – this helps to keep track of usage. There is also a feature to attach and upload files in this area. Go to the `Files and Attachments` section for more information on how to use attachments.
@@ -2723,7 +2725,17 @@ Config -> Settings...
2723
2725
 
2724
2726
  **General**
2725
2727
 
2726
- - `OpenAI API KEY`: The personal API key you'll need to enter into the application for it to function.
2728
+ - `Minimize to tray on exit`: Minimize to tray icon on exit. Tray icon enabled is required for this option to work. Default: False.
2729
+
2730
+ - `Render engine`: chat output render engine: `WebEngine / Chromium` - for full HTML/CSS and `Legacy (markdown)` for legacy, simple markdown CSS output. Default: WebEngine / Chromium.
2731
+
2732
+ - `OpenGL hardware acceleration`: enables hardware acceleration in `WebEngine / Chromium` renderer. Default: False.
2733
+
2734
+ - `Application environment (os.environ)`: Additional environment vars to set on application start.
2735
+
2736
+ **API Keys**
2737
+
2738
+ - `OpenAI API KEY`: Required for the OpenAI API. If you wish to use custom endpoints or local APIs, then you may enter any value here.
2727
2739
 
2728
2740
  - `OpenAI ORGANIZATION KEY`: The organization's API key, which is optional for use within the application.
2729
2741
 
@@ -2731,13 +2743,15 @@ Config -> Settings...
2731
2743
 
2732
2744
  - `Proxy address`: Proxy address to be used for connection; supports HTTP/SOCKS.
2733
2745
 
2734
- - `Minimize to tray on exit`: Minimize to tray icon on exit. Tray icon enabled is required for this option to work. Default: False.
2746
+ - `Google API KEY`: Required for the Google API and Gemini models.
2735
2747
 
2736
- - `Render engine`: chat output render engine: `WebEngine / Chromium` - for full HTML/CSS and `Legacy (markdown)` for legacy, simple markdown CSS output. Default: WebEngine / Chromium.
2748
+ - `Anthropic API KEY`: Required for the Anthropic API and Claude models.
2737
2749
 
2738
- - `OpenGL hardware acceleration`: enables hardware acceleration in `WebEngine / Chromium` renderer. Default: False.
2750
+ - `HuggingFace API KEY`: Required for the HuggingFace API.
2739
2751
 
2740
- - `Application environment (os.environ)`: Additional environment vars to set on application start.
2752
+ - `OpenAI API version`: Azure OpenAI API version, e.g. 2023-07-01-preview
2753
+
2754
+ - `Azure OpenAI API endpoint`: Azure OpenAI API endpoint, https://<your-resource-name>.openai.azure.com/
2741
2755
 
2742
2756
  **Layout**
2743
2757
 
@@ -3938,6 +3952,12 @@ may consume additional tokens that are not displayed in the main window.
3938
3952
 
3939
3953
  ## Recent changes:
3940
3954
 
3955
+ **2.4.46 (2024-12-16)**
3956
+
3957
+ - Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
3958
+ - Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
3959
+ - Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
3960
+
3941
3961
  **2.4.45 (2024-12-16)**
3942
3962
 
3943
3963
  - Enhanced web data loaders UI.
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,9 @@
1
+ 2.4.46 (2024-12-16)
2
+
3
+ - Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
4
+ - Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
5
+ - Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
6
+
1
7
  2.4.45 (2024-12-16)
2
8
 
3
9
  - Enhanced web data loaders UI.
pygpt_net/__init__.py CHANGED
@@ -6,14 +6,14 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.16 02:00:00 #
9
+ # Updated Date: 2024.12.16 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2024, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.4.45"
16
+ __version__ = "2.4.46"
17
17
  __build__ = "2024.12.16"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 00:00:00 #
9
+ # Updated Date: 2024.12.16 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -83,10 +83,13 @@ class Text:
83
83
  base_mode = mode # store parent mode
84
84
  functions = [] # functions to call
85
85
  tools_outputs = [] # tools outputs (assistant only)
86
+ idx_mode = self.window.core.config.get('llama.idx.mode')
86
87
 
87
88
  # o1 models: disable stream mode
88
89
  if model.startswith("o1") or mode in [MODE_AGENT_LLAMA, MODE_AUDIO]:
89
90
  stream_mode = False
91
+ if mode in [MODE_LLAMA_INDEX] and idx_mode == "retrieval":
92
+ stream_mode = False
90
93
 
91
94
  # create ctx item
92
95
  ctx = CtxItem()
@@ -234,7 +237,7 @@ class Text:
234
237
  file_ids=self.window.controller.files.uploaded_ids, # uploaded files IDs
235
238
  assistant_id=assistant_id,
236
239
  idx=self.window.controller.idx.current_idx, # current idx
237
- idx_mode=self.window.core.config.get('llama.idx.mode'), # llama index mode (chat or query)
240
+ idx_mode=idx_mode, # llama index mode (chat or query)
238
241
  external_functions=functions, # external functions
239
242
  tools_outputs=tools_outputs, # if not empty then will submit outputs to assistant
240
243
  max_tokens=max_tokens, # max output tokens
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2024.12.16 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import datetime
@@ -65,6 +65,7 @@ class Idx:
65
65
  return [
66
66
  {"chat": trans('toolbox.llama_index.mode.chat')},
67
67
  {"query": trans('toolbox.llama_index.mode.query')},
68
+ {"retrieval": trans('toolbox.llama_index.mode.retrieval')},
68
69
  ]
69
70
 
70
71
  def select_mode(self, mode: str):
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 00:00:00 #
9
+ # Updated Date: 2024.12.16 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -230,7 +230,7 @@ class Settings:
230
230
 
231
231
  def welcome_settings(self):
232
232
  """Open settings at first launch (if no API key yet)"""
233
- self.open_section("general")
233
+ self.open_section("api_keys")
234
234
  self.window.ui.config['config']['api_key'].setFocus()
235
235
  self.window.ui.dialogs.close('info.start')
236
236
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2024.12.16 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -61,6 +61,11 @@ class Chat:
61
61
  context=context,
62
62
  extra=extra,
63
63
  )
64
+ elif idx_mode == "retrieval": # retrieval mode
65
+ return self.retrieval(
66
+ context=context,
67
+ extra=extra,
68
+ )
64
69
 
65
70
  # if not raw, check if chat mode is available
66
71
  if MODE_CHAT in model.llama_index['mode']:
@@ -163,6 +168,50 @@ class Chat:
163
168
  return True
164
169
  return False
165
170
 
171
+ def retrieval(
172
+ self,
173
+ context: BridgeContext,
174
+ extra: Optional[Dict[str, Any]] = None
175
+ ) -> bool:
176
+ """
177
+ Retrieve documents from index only
178
+
179
+ :param context: Bridge context
180
+ :param extra: Extra arguments
181
+ :return: True if success
182
+ """
183
+ idx = context.idx
184
+ model = context.model
185
+ ctx = context.ctx
186
+ query = ctx.input # user input
187
+ verbose = self.window.core.config.get("log.llama", False)
188
+
189
+ self.log("Retrieval...")
190
+ self.log("Idx: {}, retrieve only: {}".format(
191
+ idx,
192
+ query,
193
+ ))
194
+
195
+ index, service_context = self.get_index(idx, model)
196
+ retriever = index.as_retriever()
197
+ nodes = retriever.retrieve(query)
198
+ outputs = []
199
+ self.log("Retrieved {} nodes...".format(len(nodes)))
200
+ for node in nodes:
201
+ outputs.append({
202
+ "text": node.text,
203
+ "score": node.score,
204
+ })
205
+ if outputs:
206
+ response = ""
207
+ for output in outputs:
208
+ response += "**Score: {}**\n\n{}".format(output["score"], output["text"])
209
+ if output != outputs[-1]:
210
+ response += "\n\n-------\n\n"
211
+ ctx.set_output(response)
212
+ ctx.add_doc_meta(self.get_metadata(nodes))
213
+ return True
214
+
166
215
  def chat(
167
216
  self,
168
217
  context: BridgeContext,
@@ -225,6 +274,7 @@ class Chat:
225
274
  )
226
275
 
227
276
  if use_index:
277
+ # CMD: commands are applied to system prompt here
228
278
  # index as query engine
229
279
  chat_engine = index.as_chat_engine(
230
280
  llm=llm,
@@ -238,6 +288,7 @@ class Chat:
238
288
  else:
239
289
  response = chat_engine.chat(query)
240
290
  else:
291
+ # CMD: commands are applied to system prompt here
241
292
  # prepare tools (native calls if enabled)
242
293
  tools = self.window.core.agents.tools.prepare(context, extra)
243
294
 
@@ -245,15 +296,25 @@ class Chat:
245
296
  history.insert(0, self.context.add_system(system_prompt))
246
297
  history.append(self.context.add_user(query))
247
298
  if stream:
248
- response = llm.stream_chat_with_tools(
249
- tools=tools,
250
- messages=history,
251
- )
299
+ if hasattr(llm, "stream_chat_with_tools"):
300
+ response = llm.stream_chat_with_tools(
301
+ tools=tools,
302
+ messages=history,
303
+ )
304
+ else:
305
+ response = llm.stream_chat(
306
+ messages=history,
307
+ )
252
308
  else:
253
- response = llm.chat_with_tools(
254
- tools=tools,
255
- messages=history,
256
- )
309
+ if hasattr(llm, "chat_with_tools"):
310
+ response = llm.chat_with_tools(
311
+ tools=tools,
312
+ messages=history,
313
+ )
314
+ else:
315
+ response = llm.chat(
316
+ messages=history,
317
+ )
257
318
 
258
319
  # handle response
259
320
  if response:
@@ -275,10 +336,9 @@ class Chat:
275
336
  if output is None:
276
337
  output = ""
277
338
  ctx.set_output(output, "")
278
- print("output", output)
279
339
  ctx.add_doc_meta(self.get_metadata(response.source_nodes)) # store metadata
280
340
  else:
281
- # from LLM
341
+ # from LLM directly
282
342
  if stream:
283
343
  # tools handled in stream output controller
284
344
  ctx.stream = response # chunk is in response.delta
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.4.45",
4
- "app.version": "2.4.45",
3
+ "version": "2.4.46",
4
+ "app.version": "2.4.46",
5
5
  "updated_at": "2024-12-16T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
@@ -58,8 +58,13 @@
58
58
  "agent.llama.verbose": false,
59
59
  "agent.mode": "chat",
60
60
  "ai_name": "",
61
+ "api_azure_version": "2023-07-01-preview",
62
+ "api_azure_endpoint": "https://<your-resource-name>.openai.azure.com/",
61
63
  "api_endpoint": "https://api.openai.com/v1",
62
64
  "api_key": "",
65
+ "api_key_google": "",
66
+ "api_key_anthropic": "",
67
+ "api_key_hugging_face": "",
63
68
  "api_proxy": "",
64
69
  "app.env": [],
65
70
  "assistant": "",