pygpt-net 2.5.18__py3-none-any.whl → 2.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +8 -4
  4. pygpt_net/container.py +3 -3
  5. pygpt_net/controller/chat/command.py +4 -4
  6. pygpt_net/controller/chat/input.py +3 -3
  7. pygpt_net/controller/chat/stream.py +6 -2
  8. pygpt_net/controller/config/placeholder.py +28 -14
  9. pygpt_net/controller/lang/custom.py +2 -2
  10. pygpt_net/controller/mode/__init__.py +22 -1
  11. pygpt_net/controller/model/__init__.py +2 -2
  12. pygpt_net/controller/model/editor.py +6 -63
  13. pygpt_net/controller/model/importer.py +9 -7
  14. pygpt_net/controller/presets/editor.py +8 -8
  15. pygpt_net/core/agents/legacy.py +2 -2
  16. pygpt_net/core/bridge/__init__.py +6 -3
  17. pygpt_net/core/bridge/worker.py +5 -2
  18. pygpt_net/core/command/__init__.py +10 -8
  19. pygpt_net/core/debug/presets.py +2 -2
  20. pygpt_net/core/experts/__init__.py +2 -2
  21. pygpt_net/core/idx/chat.py +7 -20
  22. pygpt_net/core/idx/llm.py +27 -28
  23. pygpt_net/core/llm/__init__.py +25 -3
  24. pygpt_net/core/models/__init__.py +83 -9
  25. pygpt_net/core/modes/__init__.py +2 -2
  26. pygpt_net/core/presets/__init__.py +3 -3
  27. pygpt_net/core/prompt/__init__.py +5 -5
  28. pygpt_net/core/tokens/__init__.py +3 -3
  29. pygpt_net/core/updater/__init__.py +5 -3
  30. pygpt_net/data/config/config.json +8 -3
  31. pygpt_net/data/config/models.json +1051 -2605
  32. pygpt_net/data/config/modes.json +4 -10
  33. pygpt_net/data/config/settings.json +94 -0
  34. pygpt_net/data/locale/locale.en.ini +17 -2
  35. pygpt_net/item/model.py +56 -33
  36. pygpt_net/plugin/base/plugin.py +6 -5
  37. pygpt_net/provider/core/config/patch.py +23 -1
  38. pygpt_net/provider/core/model/json_file.py +7 -7
  39. pygpt_net/provider/core/model/patch.py +60 -7
  40. pygpt_net/provider/core/preset/json_file.py +4 -4
  41. pygpt_net/provider/gpt/__init__.py +18 -15
  42. pygpt_net/provider/gpt/chat.py +91 -21
  43. pygpt_net/provider/gpt/responses.py +58 -21
  44. pygpt_net/provider/llms/anthropic.py +2 -1
  45. pygpt_net/provider/llms/azure_openai.py +11 -7
  46. pygpt_net/provider/llms/base.py +3 -2
  47. pygpt_net/provider/llms/deepseek_api.py +3 -1
  48. pygpt_net/provider/llms/google.py +2 -1
  49. pygpt_net/provider/llms/hugging_face.py +8 -5
  50. pygpt_net/provider/llms/hugging_face_api.py +3 -1
  51. pygpt_net/provider/llms/local.py +2 -1
  52. pygpt_net/provider/llms/ollama.py +8 -6
  53. pygpt_net/provider/llms/openai.py +11 -7
  54. pygpt_net/provider/llms/perplexity.py +109 -0
  55. pygpt_net/provider/llms/x_ai.py +108 -0
  56. pygpt_net/ui/dialog/about.py +5 -5
  57. pygpt_net/ui/dialog/preset.py +5 -5
  58. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/METADATA +65 -178
  59. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/RECORD +62 -60
  60. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/LICENSE +0 -0
  61. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/WHEEL +0 -0
  62. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/entry_points.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pygpt-net
3
- Version: 2.5.18
4
- Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
3
+ Version: 2.5.20
4
+ Summary: Desktop AI Assistant powered by models: OpenAI o1, o3, GPT-4o, GPT-4 Vision, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
5
5
  License: MIT
6
6
  Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
7
7
  Author: Marcin Szczyglinski
@@ -35,13 +35,9 @@ Requires-Dist: httpx (>=0.27.2,<0.28.0)
35
35
  Requires-Dist: httpx-socks (>=0.9.2,<0.10.0)
36
36
  Requires-Dist: ipykernel (>=6.29.5,<7.0.0)
37
37
  Requires-Dist: jupyter_client (>=8.6.3,<9.0.0)
38
- Requires-Dist: langchain (>=0.2.17,<0.3.0)
39
- Requires-Dist: langchain-community (>=0.2.19,<0.3.0)
40
- Requires-Dist: langchain-experimental (>=0.0.64,<0.0.65)
41
- Requires-Dist: langchain-openai (>=0.1.25,<0.2.0)
42
- Requires-Dist: llama-index (>=0.12.22,<0.13.0)
38
+ Requires-Dist: llama-index (>=0.12.44,<0.13.0)
43
39
  Requires-Dist: llama-index-agent-openai (>=0.4.8,<0.5.0)
44
- Requires-Dist: llama-index-core (==0.12.22)
40
+ Requires-Dist: llama-index-core (==0.12.44)
45
41
  Requires-Dist: llama-index-embeddings-azure-openai (>=0.3.8,<0.4.0)
46
42
  Requires-Dist: llama-index-embeddings-gemini (>=0.3.2,<0.4.0)
47
43
  Requires-Dist: llama-index-embeddings-huggingface-api (>=0.3.1,<0.4.0)
@@ -50,12 +46,12 @@ Requires-Dist: llama-index-embeddings-openai (>=0.3.1,<0.4.0)
50
46
  Requires-Dist: llama-index-llms-anthropic (>=0.6.12,<0.7.0)
51
47
  Requires-Dist: llama-index-llms-azure-openai (>=0.3.2,<0.4.0)
52
48
  Requires-Dist: llama-index-llms-deepseek (>=0.1.1,<0.2.0)
53
- Requires-Dist: llama-index-llms-gemini (>=0.4.14,<0.5.0)
49
+ Requires-Dist: llama-index-llms-gemini (>=0.5.0,<0.6.0)
54
50
  Requires-Dist: llama-index-llms-huggingface-api (>=0.3.1,<0.4.0)
55
- Requires-Dist: llama-index-llms-ollama (>=0.5.6,<0.6.0)
56
- Requires-Dist: llama-index-llms-openai (>=0.3.28,<0.4.0)
57
- Requires-Dist: llama-index-llms-openai-like (>=0.3.4,<0.4.0)
58
- Requires-Dist: llama-index-multi-modal-llms-openai (>=0.4.3,<0.5.0)
51
+ Requires-Dist: llama-index-llms-ollama (>=0.6.2,<0.7.0)
52
+ Requires-Dist: llama-index-llms-openai (>=0.4.7,<0.5.0)
53
+ Requires-Dist: llama-index-llms-openai-like (>=0.4.0,<0.5.0)
54
+ Requires-Dist: llama-index-multi-modal-llms-openai (>=0.5.1,<0.6.0)
59
55
  Requires-Dist: llama-index-readers-chatgpt-plugin (>=0.3.0,<0.4.0)
60
56
  Requires-Dist: llama-index-readers-database (>=0.3.0,<0.4.0)
61
57
  Requires-Dist: llama-index-readers-file (>=0.4.9,<0.5.0)
@@ -100,7 +96,7 @@ Description-Content-Type: text/markdown
100
96
 
101
97
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
102
98
 
103
- Release: **2.5.18** | build: **2025-06-26** | Python: **>=3.10, <3.13**
99
+ Release: **2.5.20** | build: **2025-06-28** | Python: **>=3.10, <3.13**
104
100
 
105
101
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
106
102
  >
@@ -112,7 +108,7 @@ Release: **2.5.18** | build: **2025-06-26** | Python: **>=3.10, <3.13**
112
108
 
113
109
  ## Overview
114
110
 
115
- **PyGPT** is **all-in-one** Desktop AI Assistant that provides direct interaction with OpenAI language models, including `o1`, `gpt-4o`, `gpt-4`, `gpt-4 Vision`, and `gpt-3.5`, through the `OpenAI API`. By utilizing `LangChain` and `LlamaIndex`, the application also supports alternative LLMs, like those available on `HuggingFace`, locally available models (like `Llama 3`,`Mistral`, `DeepSeek V3/R1` or `Bielik`), `Google Gemini` and `Anthropic Claude`.
111
+ **PyGPT** is **all-in-one** Desktop AI Assistant that provides direct interaction with OpenAI language models, including `o1`, `gpt-4o`, `gpt-4`, `gpt-4 Vision`, and `gpt-3.5`, through the `OpenAI API`. By utilizing `LlamaIndex`, the application also supports alternative LLMs, like those available on `HuggingFace`, locally available models (like `Llama 3`,`Mistral`, `DeepSeek V3/R1` or `Bielik`), `Google Gemini` and `Anthropic Claude`.
116
112
 
117
113
  This assistant offers multiple modes of operation such as chat, assistants, completions, and image-related tasks using `DALL-E 3` for generation and `gpt-4 Vision` for image analysis. **PyGPT** has filesystem capabilities for file I/O, can generate and run Python code, execute system commands, execute custom commands and manage file transfers. It also allows models to perform web searches with the `Google` and `Microsoft Bing`.
118
114
 
@@ -120,7 +116,7 @@ For audio interactions, **PyGPT** includes speech synthesis using the `Microsoft
120
116
 
121
117
  **PyGPT**'s functionality extends through plugin support, allowing for custom enhancements. Its multi-modal capabilities make it an adaptable tool for a range of AI-assisted operations, such as text-based interactions, system automation, daily assisting, vision applications, natural language processing, code generation and image creation.
122
118
 
123
- Multiple operation modes are included, such as chat, text completion, assistant, vision, LangChain, Chat with Files (via `LlamaIndex`), commands execution, external API calls and image generation, making **PyGPT** a multi-tool for many AI-driven tasks.
119
+ Multiple operation modes are included, such as chat, text completion, assistant, vision, Chat with Files (via `LlamaIndex`), commands execution, external API calls and image generation, making **PyGPT** a multi-tool for many AI-driven tasks.
124
120
 
125
121
  **Video** (mp4, version `2.4.35`, build `2024-11-28`):
126
122
 
@@ -136,8 +132,8 @@ You can download compiled 64-bit versions for Windows and Linux here: https://py
136
132
 
137
133
  - Desktop AI Assistant for `Linux`, `Windows` and `Mac`, written in Python.
138
134
  - Works similarly to `ChatGPT`, but locally (on a desktop computer).
139
- - 12 modes of operation: Chat, Vision, Completion, Assistant, Image generation, LangChain, Chat with Files, Chat with Audio, Research (Perplexity), Experts, Autonomous Mode and Agents.
140
- - Supports multiple models: `o1`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LangChain`, `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `DeepSeek V3/R1`, `Bielik`, etc.
135
+ - 11 modes of operation: Chat, Vision, Completion, Assistant, Image generation, Chat with Files, Chat with Audio, Research (Perplexity), Experts, Autonomous Mode and Agents.
136
+ - Supports multiple models: `o1`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `DeepSeek V3/R1`, `Bielik`, etc.
141
137
  - Chat with your own Files: integrated `LlamaIndex` support: chat with data such as: `txt`, `pdf`, `csv`, `html`, `md`, `docx`, `json`, `epub`, `xlsx`, `xml`, webpages, `Google`, `GitHub`, video/audio, images and other data types, or use conversation history as additional context provided to the model.
142
138
  - Built-in vector databases support and automated files and data embedding.
143
139
  - Included support features for individuals with disabilities: customizable keyboard shortcuts, voice control, and translation of on-screen actions into audio via speech synthesis.
@@ -147,7 +143,6 @@ You can download compiled 64-bit versions for Windows and Linux here: https://py
147
143
  - Speech recognition via `OpenAI Whisper`, `Google` and `Microsoft Speech Recognition`.
148
144
  - Real-time video camera capture in Vision mode.
149
145
  - Image analysis via `GPT-4 Vision` and `GPT-4o`.
150
- - Integrated `LangChain` support (you can connect to any LLM, e.g., on `HuggingFace`).
151
146
  - Integrated calendar, day notes and search in contexts by selected date.
152
147
  - Tools and commands execution (via plugins: access to the local filesystem, Python Code Interpreter, system commands execution, and more).
153
148
  - Custom commands creation and execution.
@@ -177,7 +172,7 @@ Full Python source code is available on `GitHub`.
177
172
  **PyGPT uses the user's API key - to use the GPT models,
178
173
  you must have a registered OpenAI account and your own API key. Local models do not require any API keys.**
179
174
 
180
- You can also use built-it LangChain support to connect to other Large Language Models (LLMs),
175
+ You can also use built-it LlamaIndex support to connect to other Large Language Models (LLMs),
181
176
  such as those on HuggingFace. Additional API keys may be required.
182
177
 
183
178
  # Installation
@@ -462,7 +457,7 @@ Your API keys will be available here:
462
457
 
463
458
  **+ Inline Vision and Image generation**
464
459
 
465
- This mode in **PyGPT** mirrors `ChatGPT`, allowing you to chat with models such as `o1`, `o3`, `GPT-4`, `GPT-4o` and `GPT-3.5`. It works by using the `Responses` OpenAI API.
460
+ This mode in **PyGPT** mirrors `ChatGPT`, allowing you to chat with models such as `o1`, `o3`, `GPT-4`, `GPT-4o` and `GPT-3.5`. It works by using the `Responses` and `ChatCompletions` OpenAI API. You can select the API to use in: `Config -> Settings -> API Keys -> OpenAI`.
466
461
 
467
462
  **Tip: This mode directly uses the OpenAI API. Other models, such as Gemini, Claude, or Llama3, are supported in Chat mode via LlamaIndex, which the application switches to in the background when working with models other than OpenAI.**
468
463
 
@@ -622,28 +617,6 @@ The vector database in use will be displayed in the list of uploaded files, on t
622
617
 
623
618
  ![v2_assistant_stores_upload](https://github.com/szczyglis-dev/py-gpt/assets/61396542/8f13c2eb-f961-4eae-b08b-0b4937f06ca9)
624
619
 
625
- ## LangChain
626
-
627
- This mode enables you to work with models that are supported by `LangChain`. The LangChain support is integrated
628
- into the application, allowing you to interact with any LLM by simply supplying a configuration
629
- file for the specific model. You can add as many models as you like; just list them in the configuration
630
- file named `models.json`.
631
-
632
- Available LLMs providers supported by **PyGPT**, in `LangChain` and `Chat with Files (LlamaIndex)` modes:
633
-
634
- ```
635
- - OpenAI
636
- - Azure OpenAI
637
- - Google (Gemini, etc.)
638
- - HuggingFace
639
- - Anthropic
640
- - Ollama (Llama3, Mistral, etc.)
641
- ```
642
-
643
- You have the ability to add custom model wrappers for models that are not available by default in **PyGPT**.
644
- To integrate a new model, you can create your own wrapper and register it with the application.
645
- Detailed instructions for this process are provided in the section titled `Managing models / Adding models via LangChain`.
646
-
647
620
  ## Chat with Files (LlamaIndex)
648
621
 
649
622
  This mode enables chat interaction with your documents and entire context history through conversation.
@@ -973,7 +946,7 @@ To allow the model to manage files or python code execution, the `+ Tools` optio
973
946
 
974
947
  ## What is preset?
975
948
 
976
- Presets in **PyGPT** are essentially templates used to store and quickly apply different configurations. Each preset includes settings for the mode you want to use (such as chat, completion, or image generation), an initial system prompt, an assigned name for the AI, a username for the session, and the desired "temperature" for the conversation. A warmer "temperature" setting allows the AI to provide more creative responses, while a cooler setting encourages more predictable replies. These presets can be used across various modes and with models accessed via the `OpenAI API` or `LangChain`.
949
+ Presets in **PyGPT** are essentially templates used to store and quickly apply different configurations. Each preset includes settings for the mode you want to use (such as chat, completion, or image generation), an initial system prompt, an assigned name for the AI, a username for the session, and the desired "temperature" for the conversation. A warmer "temperature" setting allows the AI to provide more creative responses, while a cooler setting encourages more predictable replies. These presets can be used across various modes and with models accessed via the `OpenAI API` or `LlamaIndex`.
977
950
 
978
951
  The application lets you create as many presets as needed and easily switch among them. Additionally, you can clone an existing preset, which is useful for creating variations based on previously set configurations and experimentation.
979
952
 
@@ -1003,7 +976,7 @@ The name of the currently active profile is shown as (Profile Name) in the windo
1003
976
 
1004
977
  ## Built-in models
1005
978
 
1006
- PyGPT has built-in support for models (as of 2025-06-24):
979
+ PyGPT has built-in support for models (as of 2025-06-27):
1007
980
 
1008
981
  - `bielik-11b-v2.3-instruct:Q4_K_M`
1009
982
  - `chatgpt-4o-latest`
@@ -1049,6 +1022,10 @@ PyGPT has built-in support for models (as of 2025-06-24):
1049
1022
  - `gpt-4o-2024-11-20`
1050
1023
  - `gpt-4o-audio-preview`
1051
1024
  - `gpt-4o-mini`
1025
+ - `grok-3`
1026
+ - `grok-3-fast`
1027
+ - `grok-3-mini`
1028
+ - `grok-3-mini-fast`
1052
1029
  - `llama2-uncensored`
1053
1030
  - `llama3.1`
1054
1031
  - `llama3.1:405b`
@@ -1064,6 +1041,8 @@ PyGPT has built-in support for models (as of 2025-06-24):
1064
1041
  - `qwen:7b`
1065
1042
  - `qwen2:7b`
1066
1043
  - `qwen2.5-coder:7b`
1044
+ - `qwen3:8b`
1045
+ - `qwen3:30b-a3b`
1067
1046
  - `r1` (Perplexity)
1068
1047
  - `sonar` (Perplexity)
1069
1048
  - `sonar-deep-research` (Perplexity)
@@ -1073,7 +1052,7 @@ PyGPT has built-in support for models (as of 2025-06-24):
1073
1052
 
1074
1053
  All models are specified in the configuration file `models.json`, which you can customize.
1075
1054
  This file is located in your working directory. You can add new models provided directly by `OpenAI API`
1076
- and those supported by `LlamaIndex` or `LangChain` to this file. Configuration for LangChain wrapper is placed in `langchain` key, configuration for LlamaIndex in `llama_index` key.
1055
+ and those supported by `LlamaIndex` to this file. Configuration for LlamaIndex is placed in `llama_index` key.
1077
1056
 
1078
1057
  ## Adding a custom model
1079
1058
 
@@ -1081,13 +1060,16 @@ You can add your own models. See the section `Extending PyGPT / Adding a new mod
1081
1060
 
1082
1061
  There is built-in support for those LLM providers:
1083
1062
 
1084
- - OpenAI (openai)
1063
+ - Anthropic (anthropic)
1085
1064
  - Azure OpenAI (azure_openai)
1065
+ - Deepseek API (deepseek_api)
1086
1066
  - Google (google)
1087
1067
  - HuggingFace (huggingface)
1088
- - Anthropic (anthropic)
1068
+ - Local models (OpenAI API compatible)
1089
1069
  - Ollama (ollama)
1090
- - Deepseek API (deepseek_api)
1070
+ - OpenAI (openai)
1071
+ - Perplexity (perplexity)
1072
+ - xAI (x_ai)
1091
1073
 
1092
1074
  ## How to use local or non-GPT models
1093
1075
 
@@ -1525,7 +1507,7 @@ If enabled, plugin will stop after goal is reached." *Default:* `True`
1525
1507
 
1526
1508
  - `Reverse roles between iterations` *reverse_roles*
1527
1509
 
1528
- Only for Completion/LangChain modes.
1510
+ Only for Completion modes.
1529
1511
  If enabled, this option reverses the roles (AI <> user) with each iteration. For example,
1530
1512
  if in the previous iteration the response was generated for "Batman," the next iteration will use that
1531
1513
  response to generate an input for "Joker." *Default:* `True`
@@ -3007,6 +2989,10 @@ Config -> Settings...
3007
2989
 
3008
2990
  - `Model used for auto-summary`: Model used for context auto-summary (default: *gpt-3.5-turbo-1106*).
3009
2991
 
2992
+ **Remote tools**
2993
+
2994
+ Enable/disable remote tools, like Web Search or Image generation to use in OpenAI Responses API (OpenAI models and Chat mode only).
2995
+
3010
2996
  **Models**
3011
2997
 
3012
2998
  - `Max Output Tokens`: Sets the maximum number of tokens the model can generate for a single response.
@@ -3603,7 +3589,7 @@ PyGPT can be extended with:
3603
3589
 
3604
3590
  **Examples (tutorial files)**
3605
3591
 
3606
- See the `examples` directory in this repository with examples of custom launcher, plugin, vector store, LLM (LangChain and LlamaIndex) provider and data loader:
3592
+ See the `examples` directory in this repository with examples of custom launcher, plugin, vector store, LLM (LlamaIndex) provider and data loader:
3607
3593
 
3608
3594
  - `examples/custom_launcher.py`
3609
3595
 
@@ -3659,7 +3645,7 @@ To register custom web providers:
3659
3645
 
3660
3646
  ## Adding a custom model
3661
3647
 
3662
- To add a new model using the OpenAI API, LangChain, or LlamaIndex wrapper, use the editor in `Config -> Models` or manually edit the `models.json` file by inserting the model's configuration details. If you are adding a model via LangChain or LlamaIndex, ensure to include the model's name, its supported modes (either `chat`, `completion`, or both), the LLM provider (such as `OpenAI` or `HuggingFace`), and, if you are using an external API-based model, an optional `API KEY` along with any other necessary environment settings.
3648
+ To add a new model using the OpenAI API or LlamaIndex wrapper, use the editor in `Config -> Models` or manually edit the `models.json` file by inserting the model's configuration details. If you are adding a model via LlamaIndex, ensure to include the model's name, its supported modes (either `chat`, `completion`, or both), the LLM provider (such as `OpenAI` or `HuggingFace`), and, if you are using an external API-based model, an optional `API KEY` along with any other necessary environment settings.
3663
3649
 
3664
3650
  Example of models configuration - `%WORKDIR%/models.json`:
3665
3651
 
@@ -3673,30 +3659,8 @@ Example of models configuration - `%WORKDIR%/models.json`:
3673
3659
  "langchain",
3674
3660
  "llama_index"
3675
3661
  ],
3676
- "langchain": {
3677
- "provider": "openai",
3678
- "mode": [
3679
- "chat"
3680
- ],
3681
- "args": [
3682
- {
3683
- "name": "model_name",
3684
- "value": "gpt-3.5-turbo",
3685
- "type": "str"
3686
- }
3687
- ],
3688
- "env": [
3689
- {
3690
- "name": "OPENAI_API_KEY",
3691
- "value": "{api_key}"
3692
- }
3693
- ]
3694
- },
3662
+ "provider": "openai"
3695
3663
  "llama_index": {
3696
- "provider": "openai",
3697
- "mode": [
3698
- "chat"
3699
- ],
3700
3664
  "args": [
3701
3665
  {
3702
3666
  "name": "model",
@@ -3719,14 +3683,16 @@ Example of models configuration - `%WORKDIR%/models.json`:
3719
3683
 
3720
3684
  There is built-in support for those LLM providers:
3721
3685
 
3722
- ```
3723
- - `OpenAI` (openai)
3724
- - `Azure OpenAI` (azure_openai)
3725
- - `Google` (google)
3726
- - `HuggingFace API` (huggingface_api)
3727
- - `Anthropic` (anthropic)
3728
- - `Ollama` (ollama)
3729
- ```
3686
+ - Anthropic (anthropic)
3687
+ - Azure OpenAI (azure_openai)
3688
+ - Deepseek API (deepseek_api)
3689
+ - Google (google)
3690
+ - HuggingFace (huggingface)
3691
+ - Local models (OpenAI API compatible)
3692
+ - Ollama (ollama)
3693
+ - OpenAI (openai)
3694
+ - Perplexity (perplexity)
3695
+ - xAI (x_ai)
3730
3696
 
3731
3697
  **Tip**: `{api_key}` in `models.json` is a placeholder for the main OpenAI API KEY from the settings. It will be replaced by the configured key value.
3732
3698
 
@@ -3905,7 +3871,7 @@ Events flow can be debugged by enabling the option `Config -> Settings -> Develo
3905
3871
 
3906
3872
  ## Adding a custom LLM provider
3907
3873
 
3908
- Handling LLMs with LangChain and LlamaIndex is implemented through separated wrappers. This allows for the addition of support for any provider and model available via LangChain or LlamaIndex. All built-in wrappers for the models and its providers are placed in the `pygpt_net.provider.llms`.
3874
+ Handling LLMs with LlamaIndex is implemented through separated wrappers. This allows for the addition of support for any provider and model available via LlamaIndex. All built-in wrappers for the models and its providers are placed in the `pygpt_net.provider.llms`.
3909
3875
 
3910
3876
  These wrappers are loaded into the application during startup using `launcher.add_llm()` method:
3911
3877
 
@@ -3979,7 +3945,7 @@ run(
3979
3945
 
3980
3946
  **Examples (tutorial files)**
3981
3947
 
3982
- See the `examples` directory in this repository with examples of custom launcher, plugin, vector store, LLM (LangChain and LlamaIndex) provider and data loader:
3948
+ See the `examples` directory in this repository with examples of custom launcher, plugin, vector store, LLM provider and data loader:
3983
3949
 
3984
3950
  - `examples/custom_launcher.py`
3985
3951
 
@@ -4001,7 +3967,7 @@ These example files can be used as a starting point for creating your own extens
4001
3967
 
4002
3968
  To integrate your own model or provider into **PyGPT**, you can also reference the classes located in the `pygpt_net.provider.llms`. These samples can act as an more complex example for your custom class. Ensure that your custom wrapper class includes two essential methods: `chat` and `completion`. These methods should return the respective objects required for the model to operate in `chat` and `completion` modes.
4003
3969
 
4004
- Every single LLM provider (wrapper) inherits from `BaseLLM` class and can provide 3 components: provider for LangChain, provider for LlamaIndex, and provider for Embeddings.
3970
+ Every single LLM provider (wrapper) inherits from `BaseLLM` class and can provide 2 components: provider for LlamaIndex, and provider for Embeddings.
4005
3971
 
4006
3972
 
4007
3973
  ## Adding a custom vector store provider
@@ -4124,6 +4090,19 @@ may consume additional tokens that are not displayed in the main window.
4124
4090
 
4125
4091
  ## Recent changes:
4126
4092
 
4093
+ **2.5.20 (2025-06-28)**
4094
+
4095
+ - LlamaIndex upgraded to 0.12.44.
4096
+ - Langchain removed from the list of modes and dependencies.
4097
+ - Improved tools execution.
4098
+ - Simplified model configuration.
4099
+ - Added endpoint configuration for non-OpenAI APIs.
4100
+
4101
+ **2.5.19 (2025-06-27)**
4102
+
4103
+ - Added option to enable/disable `Responses API` in `Config -> Settings -> API Keys -> OpenAI`.
4104
+ - Added support for xAI / Grok models, added grok-3 models.
4105
+
4127
4106
  **2.5.18 (2025-06-26)**
4128
4107
 
4129
4108
  - Non-GPT models are now available in standard Chat mode.
@@ -4148,98 +4127,6 @@ may consume additional tokens that are not displayed in the main window.
4148
4127
  - Fixed Ollama provider in the newest LlamaIndex.
4149
4128
  - Added the ability to set a custom base URL for Ollama -> ENV: OLLAMA_API_BASE.
4150
4129
 
4151
- **2.5.14 (2025-06-23)**
4152
-
4153
- - Fix: crash if empty shortcuts in config.
4154
- - Fix: UUID serialization.
4155
-
4156
- **2.5.13 (2025-06-22)**
4157
-
4158
- - Disabled auto-switch to vision mode in Painter.
4159
- - UI fixes.
4160
-
4161
- **2.5.12 (2025-06-22)**
4162
-
4163
- - Fixed send-mode radio buttons switch.
4164
- - Added a new models: qwen2.5-coder, OpenAI gpt-4.1-mini.
4165
-
4166
- **2.5.11 (2025-06-21)**
4167
-
4168
- - Added a new models: OpenAI o1-pro and o3-pro, Anthropic Claude 4.0 Opus and Claude 4.0 Sonnet, Alibaba Qwen and Qwen2.
4169
- - Bielik model upgraded to v2.3 / merged PR #101.
4170
- - Fixed HTML output formatting.
4171
- - Fixed empty index in chat mode.
4172
-
4173
- **2.5.10 (2025-03-06)**
4174
-
4175
- - Added a new model: Claude 3.7 Sonnet.
4176
- - Fixed the context switch issue when the column changed and the tab is not a chat tab.
4177
- - LlamaIndex upgraded to 0.12.22.
4178
- - LlamaIndex LLMs upgraded to recent versions.
4179
-
4180
- **2.5.9 (2025-03-05)**
4181
-
4182
- - Improved formatting of HTML code in the output.
4183
- - Disabled automatic indentation parsing as code blocks.
4184
- - Disabled automatic scrolling of the notepad when opening a tab.
4185
-
4186
- **2.5.8 (2025-03-02)**
4187
-
4188
- - Added a new mode: Research (Perplexity) powered by: https://perplexity.ai - beta.
4189
- - Added Perplexity models: sonar, sonar-pro, sonar-deep-research, sonar-reasoning, sonar-reasoning-pro, r1-1776.
4190
- - Added a new OpenAI model: gpt-4.5-preview.
4191
-
4192
- **2.5.7 (2025-02-26)**
4193
-
4194
- - Stream mode has been enabled in o1 models.
4195
- - CSS styling for <think> tags (reasoning models) has been added.
4196
- - The search input has been moved to the top.
4197
- - The ChatGPT-based style is now set as default.
4198
- - Fix: Display of max tokens in models with a context window greater than 128k.
4199
-
4200
- **2.5.6 (2025-02-03)**
4201
-
4202
- - Fix: disabled index initialization if embedding provider is OpenAI and no API KEY is provided.
4203
- - Fix: embedding provider initialization on empty index.
4204
-
4205
- **2.5.5 (2025-02-02)**
4206
-
4207
- - Fix: system prompt apply.
4208
- - Added calendar live update on tab change.
4209
- - Added API Key monit at launch displayed only once.
4210
-
4211
- **2.5.4 (2025-02-02)**
4212
-
4213
- - Added new models: `o3-mini` and `gpt-4o-mini-audio-preview`.
4214
- - Enabled tool calls in Chat with Audio mode.
4215
- - Added a check to verify if Ollama is running and if the model is available.
4216
-
4217
- **2.5.3 (2025-02-01)**
4218
-
4219
- - Fix: Snap permission denied bug.
4220
- - Fix: column focus on tab change.
4221
- - Datetime separators in groups moved to right side.
4222
-
4223
- **2.5.2 (2025-02-01)**
4224
-
4225
- - Fix: spinner update after inline image generation.
4226
- - Added Ollama suffix to Ollama-models in models list.
4227
-
4228
- **2.5.1 (2025-02-01)**
4229
-
4230
- - PySide6 upgraded to 6.6.2.
4231
- - Disabled Transformers startup warnings.
4232
-
4233
- **2.5.0 (2025-01-31)**
4234
-
4235
- - Added provider for DeepSeek (in Chat with Files mode, beta).
4236
- - Added new models: OpenAI o1, Llama 3.3, DeepSeek V3 and R1 (API + local, with Ollama).
4237
- - Added tool calls for OpenAI o1.
4238
- - Added native vision for OpenAI o1.
4239
- - Fix: tool calls in Ollama provider.
4240
- - Fix: error handling in stream mode.
4241
- - Fix: added check for active plugin tools before tool call.
4242
-
4243
4130
  # Credits and links
4244
4131
 
4245
4132
  **Official website:** <https://pygpt.net>