pygpt-net 2.4.44__py3-none-any.whl → 2.4.46__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +10 -0
- README.md +32 -8
- pygpt_net/CHANGELOG.txt +10 -0
- pygpt_net/__init__.py +2 -2
- pygpt_net/controller/chat/text.py +5 -2
- pygpt_net/controller/idx/__init__.py +2 -1
- pygpt_net/controller/lang/mapping.py +32 -9
- pygpt_net/controller/settings/__init__.py +2 -2
- pygpt_net/core/idx/chat.py +71 -11
- pygpt_net/data/config/config.json +7 -2
- pygpt_net/data/config/models.json +311 -14
- pygpt_net/data/config/modes.json +2 -2
- pygpt_net/data/config/settings.json +104 -11
- pygpt_net/data/config/settings_section.json +3 -0
- pygpt_net/data/css/style.css +1 -0
- pygpt_net/data/locale/locale.de.ini +19 -3
- pygpt_net/data/locale/locale.en.ini +20 -3
- pygpt_net/data/locale/locale.es.ini +19 -3
- pygpt_net/data/locale/locale.fr.ini +19 -3
- pygpt_net/data/locale/locale.it.ini +19 -3
- pygpt_net/data/locale/locale.pl.ini +19 -3
- pygpt_net/data/locale/locale.uk.ini +19 -3
- pygpt_net/data/locale/locale.zh.ini +19 -3
- pygpt_net/data/locale/plugin.cmd_web.de.ini +1 -1
- pygpt_net/data/locale/plugin.cmd_web.en.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.es.ini +1 -1
- pygpt_net/data/locale/plugin.cmd_web.fr.ini +1 -1
- pygpt_net/data/locale/plugin.cmd_web.it.ini +1 -1
- pygpt_net/data/locale/plugin.cmd_web.pl.ini +1 -1
- pygpt_net/data/locale/plugin.cmd_web.uk.ini +1 -1
- pygpt_net/data/locale/plugin.cmd_web.zh.ini +1 -1
- pygpt_net/data/locale/plugin.mailer.en.ini +9 -9
- pygpt_net/provider/core/config/patch.py +22 -1
- pygpt_net/provider/core/model/patch.py +156 -1
- pygpt_net/provider/loaders/hub/google/gmail.py +2 -2
- pygpt_net/provider/loaders/web_github_issues.py +13 -0
- pygpt_net/provider/loaders/web_github_repo.py +10 -0
- pygpt_net/provider/loaders/web_google_gmail.py +3 -2
- pygpt_net/provider/loaders/web_rss.py +1 -0
- pygpt_net/provider/loaders/web_sitemap.py +7 -2
- pygpt_net/tools/indexer/ui/web.py +14 -3
- pygpt_net/ui/dialog/settings.py +22 -7
- pygpt_net/ui/widget/dialog/url.py +14 -3
- {pygpt_net-2.4.44.dist-info → pygpt_net-2.4.46.dist-info}/METADATA +33 -9
- {pygpt_net-2.4.44.dist-info → pygpt_net-2.4.46.dist-info}/RECORD +48 -48
- {pygpt_net-2.4.44.dist-info → pygpt_net-2.4.46.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.44.dist-info → pygpt_net-2.4.46.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.44.dist-info → pygpt_net-2.4.46.dist-info}/entry_points.txt +0 -0
CHANGELOG.md
CHANGED
@@ -1,5 +1,15 @@
|
|
1
1
|
# CHANGELOG
|
2
2
|
|
3
|
+
## 2.4.46 (2024-12-16)
|
4
|
+
|
5
|
+
- Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
|
6
|
+
- Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
|
7
|
+
- Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
|
8
|
+
|
9
|
+
## 2.4.45 (2024-12-16)
|
10
|
+
|
11
|
+
- Enhanced web data loaders UI.
|
12
|
+
|
3
13
|
## 2.4.44 (2024-12-16)
|
4
14
|
|
5
15
|
- Enhanced web data loaders.
|
README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
[](https://snapcraft.io/pygpt)
|
4
4
|
|
5
|
-
Release: **2.4.
|
5
|
+
Release: **2.4.46** | build: **2024.12.16** | Python: **>=3.10, <3.12**
|
6
6
|
|
7
7
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
8
8
|
>
|
@@ -329,12 +329,12 @@ During the initial launch, you must configure your API key within the applicatio
|
|
329
329
|
To do so, navigate to the menu:
|
330
330
|
|
331
331
|
``` ini
|
332
|
-
Config -> Settings
|
332
|
+
Config -> Settings -> API Keys
|
333
333
|
```
|
334
334
|
|
335
335
|
and then paste the API key into the `OpenAI API KEY` field.
|
336
336
|
|
337
|
-

|
338
338
|
|
339
339
|
The API key can be obtained by registering on the OpenAI website:
|
340
340
|
|
@@ -354,6 +354,8 @@ Your API keys will be available here:
|
|
354
354
|
|
355
355
|
This mode in **PyGPT** mirrors `ChatGPT`, allowing you to chat with models such as `o1`, `GPT-4`, `GPT-4o` and `GPT-3.5`. It works by using the `ChatCompletion` OpenAI API.
|
356
356
|
|
357
|
+
**Tip: This mode directly uses the OpenAI API. If you want to use models other than GPT (such as Gemini, Claude, or Llama3), use `Chat with Files` mode.**
|
358
|
+
|
357
359
|
The main part of the interface is a chat window where you see your conversations. Below it is a message box for typing. On the right side, you can set up or change the model and system prompt. You can also save these settings as presets to easily switch between models or tasks.
|
358
360
|
|
359
361
|
Above where you type your messages, the interface shows you the number of tokens your message will use up as you type it – this helps to keep track of usage. There is also a feature to attach and upload files in this area. Go to the `Files and Attachments` section for more information on how to use attachments.
|
@@ -2723,7 +2725,17 @@ Config -> Settings...
|
|
2723
2725
|
|
2724
2726
|
**General**
|
2725
2727
|
|
2726
|
-
- `
|
2728
|
+
- `Minimize to tray on exit`: Minimize to tray icon on exit. Tray icon enabled is required for this option to work. Default: False.
|
2729
|
+
|
2730
|
+
- `Render engine`: chat output render engine: `WebEngine / Chromium` - for full HTML/CSS and `Legacy (markdown)` for legacy, simple markdown CSS output. Default: WebEngine / Chromium.
|
2731
|
+
|
2732
|
+
- `OpenGL hardware acceleration`: enables hardware acceleration in `WebEngine / Chromium` renderer. Default: False.
|
2733
|
+
|
2734
|
+
- `Application environment (os.environ)`: Additional environment vars to set on application start.
|
2735
|
+
|
2736
|
+
**API Keys**
|
2737
|
+
|
2738
|
+
- `OpenAI API KEY`: Required for the OpenAI API. If you wish to use custom endpoints or local APIs, then you may enter any value here.
|
2727
2739
|
|
2728
2740
|
- `OpenAI ORGANIZATION KEY`: The organization's API key, which is optional for use within the application.
|
2729
2741
|
|
@@ -2731,13 +2743,15 @@ Config -> Settings...
|
|
2731
2743
|
|
2732
2744
|
- `Proxy address`: Proxy address to be used for connection; supports HTTP/SOCKS.
|
2733
2745
|
|
2734
|
-
- `
|
2746
|
+
- `Google API KEY`: Required for the Google API and Gemini models.
|
2735
2747
|
|
2736
|
-
- `
|
2748
|
+
- `Anthropic API KEY`: Required for the Anthropic API and Claude models.
|
2737
2749
|
|
2738
|
-
- `
|
2750
|
+
- `HuggingFace API KEY`: Required for the HuggingFace API.
|
2739
2751
|
|
2740
|
-
- `
|
2752
|
+
- `OpenAI API version`: Azure OpenAI API version, e.g. 2023-07-01-preview
|
2753
|
+
|
2754
|
+
- `Azure OpenAI API endpoint`: Azure OpenAI API endpoint, https://<your-resource-name>.openai.azure.com/
|
2741
2755
|
|
2742
2756
|
**Layout**
|
2743
2757
|
|
@@ -3938,6 +3952,16 @@ may consume additional tokens that are not displayed in the main window.
|
|
3938
3952
|
|
3939
3953
|
## Recent changes:
|
3940
3954
|
|
3955
|
+
**2.4.46 (2024-12-16)**
|
3956
|
+
|
3957
|
+
- Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
|
3958
|
+
- Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
|
3959
|
+
- Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
|
3960
|
+
|
3961
|
+
**2.4.45 (2024-12-16)**
|
3962
|
+
|
3963
|
+
- Enhanced web data loaders UI.
|
3964
|
+
|
3941
3965
|
**2.4.44 (2024-12-16)**
|
3942
3966
|
|
3943
3967
|
- Enhanced web data loaders.
|
pygpt_net/CHANGELOG.txt
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
2.4.46 (2024-12-16)
|
2
|
+
|
3
|
+
- Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
|
4
|
+
- Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
|
5
|
+
- Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
|
6
|
+
|
7
|
+
2.4.45 (2024-12-16)
|
8
|
+
|
9
|
+
- Enhanced web data loaders UI.
|
10
|
+
|
1
11
|
2.4.44 (2024-12-16)
|
2
12
|
|
3
13
|
- Enhanced web data loaders.
|
pygpt_net/__init__.py
CHANGED
@@ -6,14 +6,14 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.16
|
9
|
+
# Updated Date: 2024.12.16 20:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
13
13
|
__copyright__ = "Copyright 2024, Marcin Szczygliński"
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
15
15
|
__license__ = "MIT"
|
16
|
-
__version__ = "2.4.
|
16
|
+
__version__ = "2.4.46"
|
17
17
|
__build__ = "2024.12.16"
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.
|
9
|
+
# Updated Date: 2024.12.16 20:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional
|
@@ -83,10 +83,13 @@ class Text:
|
|
83
83
|
base_mode = mode # store parent mode
|
84
84
|
functions = [] # functions to call
|
85
85
|
tools_outputs = [] # tools outputs (assistant only)
|
86
|
+
idx_mode = self.window.core.config.get('llama.idx.mode')
|
86
87
|
|
87
88
|
# o1 models: disable stream mode
|
88
89
|
if model.startswith("o1") or mode in [MODE_AGENT_LLAMA, MODE_AUDIO]:
|
89
90
|
stream_mode = False
|
91
|
+
if mode in [MODE_LLAMA_INDEX] and idx_mode == "retrieval":
|
92
|
+
stream_mode = False
|
90
93
|
|
91
94
|
# create ctx item
|
92
95
|
ctx = CtxItem()
|
@@ -234,7 +237,7 @@ class Text:
|
|
234
237
|
file_ids=self.window.controller.files.uploaded_ids, # uploaded files IDs
|
235
238
|
assistant_id=assistant_id,
|
236
239
|
idx=self.window.controller.idx.current_idx, # current idx
|
237
|
-
idx_mode=
|
240
|
+
idx_mode=idx_mode, # llama index mode (chat or query)
|
238
241
|
external_functions=functions, # external functions
|
239
242
|
tools_outputs=tools_outputs, # if not empty then will submit outputs to assistant
|
240
243
|
max_tokens=max_tokens, # max output tokens
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.
|
9
|
+
# Updated Date: 2024.12.16 20:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
@@ -65,6 +65,7 @@ class Idx:
|
|
65
65
|
return [
|
66
66
|
{"chat": trans('toolbox.llama_index.mode.chat')},
|
67
67
|
{"query": trans('toolbox.llama_index.mode.query')},
|
68
|
+
{"retrieval": trans('toolbox.llama_index.mode.retrieval')},
|
68
69
|
]
|
69
70
|
|
70
71
|
def select_mode(self, mode: str):
|
@@ -34,42 +34,66 @@ class Mapping:
|
|
34
34
|
# nodes labels
|
35
35
|
for k in self.mapping['nodes']:
|
36
36
|
if k in self.window.ui.nodes:
|
37
|
-
|
37
|
+
try:
|
38
|
+
self.window.ui.nodes[k].setText(trans(self.mapping['nodes'][k]))
|
39
|
+
except:
|
40
|
+
pass
|
38
41
|
|
39
42
|
# menu title
|
40
43
|
for k in self.mapping['menu.title']:
|
41
44
|
if k in self.window.ui.menu:
|
42
|
-
|
45
|
+
try:
|
46
|
+
self.window.ui.menu[k].setTitle(trans(self.mapping['menu.title'][k]))
|
47
|
+
except:
|
48
|
+
pass
|
43
49
|
|
44
50
|
# menu text
|
45
51
|
for k in self.mapping['menu.text']:
|
46
52
|
if k in self.window.ui.menu:
|
47
|
-
|
53
|
+
try:
|
54
|
+
self.window.ui.menu[k].setText(trans(self.mapping['menu.text'][k]))
|
55
|
+
except:
|
56
|
+
pass
|
48
57
|
|
49
58
|
# menu tooltip
|
50
59
|
for k in self.mapping['menu.tooltip']:
|
51
60
|
if k in self.window.ui.menu:
|
52
|
-
|
61
|
+
try:
|
62
|
+
self.window.ui.menu[k].setToolTip(trans(self.mapping['menu.tooltip'][k]))
|
63
|
+
except:
|
64
|
+
pass
|
53
65
|
|
54
66
|
# dialog title
|
55
67
|
for k in self.mapping['dialog.title']:
|
56
68
|
if k in self.window.ui.dialog:
|
57
|
-
|
69
|
+
try:
|
70
|
+
self.window.ui.dialog[k].setWindowTitle(trans(self.mapping['dialog.title'][k]))
|
71
|
+
except:
|
72
|
+
pass
|
58
73
|
|
59
74
|
# tooltip
|
60
75
|
for k in self.mapping['tooltip']:
|
61
76
|
if k in self.window.ui.nodes:
|
62
|
-
|
77
|
+
try:
|
78
|
+
self.window.ui.nodes[k].setToolTip(trans(self.mapping['tooltip'][k]))
|
79
|
+
except:
|
80
|
+
pass
|
63
81
|
|
64
82
|
# placeholder
|
65
83
|
for k in self.mapping['placeholder']:
|
66
84
|
if k in self.window.ui.nodes:
|
67
|
-
|
85
|
+
try:
|
86
|
+
self.window.ui.nodes[k].setPlaceholderText(trans(self.mapping['placeholder'][k]))
|
87
|
+
except:
|
88
|
+
pass
|
68
89
|
|
69
90
|
# menu tab tools
|
70
91
|
for k in self.window.controller.tools.get_tab_tools():
|
71
92
|
if k in self.window.ui.menu:
|
72
|
-
|
93
|
+
try:
|
94
|
+
self.window.ui.menu[k].setText(trans("output.tab." + self.window.controller.tools.get_tab_tools()[k][0]))
|
95
|
+
except:
|
96
|
+
pass
|
73
97
|
|
74
98
|
def get_mapping(self) -> Dict[str, Dict[str, str]]:
|
75
99
|
"""
|
@@ -313,7 +337,6 @@ class Mapping:
|
|
313
337
|
nodes['tool.indexer.file.options.replace'] = 'tool.indexer.option.replace'
|
314
338
|
nodes['tool.indexer.web.loader.label'] = 'tool.indexer.tab.web.loader'
|
315
339
|
nodes['tool.indexer.web.options.label'] = 'tool.indexer.tab.web.source'
|
316
|
-
nodes['tool.indexer.web.config.label'] = 'tool.indexer.tab.web.cfg'
|
317
340
|
nodes['tool.indexer.web.options.replace'] = 'tool.indexer.option.replace'
|
318
341
|
nodes['tool.indexer.file.header.tip'] = 'tool.indexer.tab.files.tip'
|
319
342
|
nodes['tool.indexer.web.header.tip'] = 'tool.indexer.tab.web.tip'
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.
|
9
|
+
# Updated Date: 2024.12.16 20:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -230,7 +230,7 @@ class Settings:
|
|
230
230
|
|
231
231
|
def welcome_settings(self):
|
232
232
|
"""Open settings at first launch (if no API key yet)"""
|
233
|
-
self.open_section("
|
233
|
+
self.open_section("api_keys")
|
234
234
|
self.window.ui.config['config']['api_key'].setFocus()
|
235
235
|
self.window.ui.dialogs.close('info.start')
|
236
236
|
|
pygpt_net/core/idx/chat.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.12.
|
9
|
+
# Updated Date: 2024.12.16 20:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -61,6 +61,11 @@ class Chat:
|
|
61
61
|
context=context,
|
62
62
|
extra=extra,
|
63
63
|
)
|
64
|
+
elif idx_mode == "retrieval": # retrieval mode
|
65
|
+
return self.retrieval(
|
66
|
+
context=context,
|
67
|
+
extra=extra,
|
68
|
+
)
|
64
69
|
|
65
70
|
# if not raw, check if chat mode is available
|
66
71
|
if MODE_CHAT in model.llama_index['mode']:
|
@@ -163,6 +168,50 @@ class Chat:
|
|
163
168
|
return True
|
164
169
|
return False
|
165
170
|
|
171
|
+
def retrieval(
|
172
|
+
self,
|
173
|
+
context: BridgeContext,
|
174
|
+
extra: Optional[Dict[str, Any]] = None
|
175
|
+
) -> bool:
|
176
|
+
"""
|
177
|
+
Retrieve documents from index only
|
178
|
+
|
179
|
+
:param context: Bridge context
|
180
|
+
:param extra: Extra arguments
|
181
|
+
:return: True if success
|
182
|
+
"""
|
183
|
+
idx = context.idx
|
184
|
+
model = context.model
|
185
|
+
ctx = context.ctx
|
186
|
+
query = ctx.input # user input
|
187
|
+
verbose = self.window.core.config.get("log.llama", False)
|
188
|
+
|
189
|
+
self.log("Retrieval...")
|
190
|
+
self.log("Idx: {}, retrieve only: {}".format(
|
191
|
+
idx,
|
192
|
+
query,
|
193
|
+
))
|
194
|
+
|
195
|
+
index, service_context = self.get_index(idx, model)
|
196
|
+
retriever = index.as_retriever()
|
197
|
+
nodes = retriever.retrieve(query)
|
198
|
+
outputs = []
|
199
|
+
self.log("Retrieved {} nodes...".format(len(nodes)))
|
200
|
+
for node in nodes:
|
201
|
+
outputs.append({
|
202
|
+
"text": node.text,
|
203
|
+
"score": node.score,
|
204
|
+
})
|
205
|
+
if outputs:
|
206
|
+
response = ""
|
207
|
+
for output in outputs:
|
208
|
+
response += "**Score: {}**\n\n{}".format(output["score"], output["text"])
|
209
|
+
if output != outputs[-1]:
|
210
|
+
response += "\n\n-------\n\n"
|
211
|
+
ctx.set_output(response)
|
212
|
+
ctx.add_doc_meta(self.get_metadata(nodes))
|
213
|
+
return True
|
214
|
+
|
166
215
|
def chat(
|
167
216
|
self,
|
168
217
|
context: BridgeContext,
|
@@ -225,6 +274,7 @@ class Chat:
|
|
225
274
|
)
|
226
275
|
|
227
276
|
if use_index:
|
277
|
+
# CMD: commands are applied to system prompt here
|
228
278
|
# index as query engine
|
229
279
|
chat_engine = index.as_chat_engine(
|
230
280
|
llm=llm,
|
@@ -238,6 +288,7 @@ class Chat:
|
|
238
288
|
else:
|
239
289
|
response = chat_engine.chat(query)
|
240
290
|
else:
|
291
|
+
# CMD: commands are applied to system prompt here
|
241
292
|
# prepare tools (native calls if enabled)
|
242
293
|
tools = self.window.core.agents.tools.prepare(context, extra)
|
243
294
|
|
@@ -245,15 +296,25 @@ class Chat:
|
|
245
296
|
history.insert(0, self.context.add_system(system_prompt))
|
246
297
|
history.append(self.context.add_user(query))
|
247
298
|
if stream:
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
299
|
+
if hasattr(llm, "stream_chat_with_tools"):
|
300
|
+
response = llm.stream_chat_with_tools(
|
301
|
+
tools=tools,
|
302
|
+
messages=history,
|
303
|
+
)
|
304
|
+
else:
|
305
|
+
response = llm.stream_chat(
|
306
|
+
messages=history,
|
307
|
+
)
|
252
308
|
else:
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
309
|
+
if hasattr(llm, "chat_with_tools"):
|
310
|
+
response = llm.chat_with_tools(
|
311
|
+
tools=tools,
|
312
|
+
messages=history,
|
313
|
+
)
|
314
|
+
else:
|
315
|
+
response = llm.chat(
|
316
|
+
messages=history,
|
317
|
+
)
|
257
318
|
|
258
319
|
# handle response
|
259
320
|
if response:
|
@@ -275,10 +336,9 @@ class Chat:
|
|
275
336
|
if output is None:
|
276
337
|
output = ""
|
277
338
|
ctx.set_output(output, "")
|
278
|
-
print("output", output)
|
279
339
|
ctx.add_doc_meta(self.get_metadata(response.source_nodes)) # store metadata
|
280
340
|
else:
|
281
|
-
# from LLM
|
341
|
+
# from LLM directly
|
282
342
|
if stream:
|
283
343
|
# tools handled in stream output controller
|
284
344
|
ctx.stream = response # chunk is in response.delta
|
@@ -1,7 +1,7 @@
|
|
1
1
|
{
|
2
2
|
"__meta__": {
|
3
|
-
"version": "2.4.
|
4
|
-
"app.version": "2.4.
|
3
|
+
"version": "2.4.46",
|
4
|
+
"app.version": "2.4.46",
|
5
5
|
"updated_at": "2024-12-16T00:00:00"
|
6
6
|
},
|
7
7
|
"access.audio.event.speech": false,
|
@@ -58,8 +58,13 @@
|
|
58
58
|
"agent.llama.verbose": false,
|
59
59
|
"agent.mode": "chat",
|
60
60
|
"ai_name": "",
|
61
|
+
"api_azure_version": "2023-07-01-preview",
|
62
|
+
"api_azure_endpoint": "https://<your-resource-name>.openai.azure.com/",
|
61
63
|
"api_endpoint": "https://api.openai.com/v1",
|
62
64
|
"api_key": "",
|
65
|
+
"api_key_google": "",
|
66
|
+
"api_key_anthropic": "",
|
67
|
+
"api_key_hugging_face": "",
|
63
68
|
"api_proxy": "",
|
64
69
|
"app.env": [],
|
65
70
|
"assistant": "",
|