pygpt-net 2.4.36.post1__py3-none-any.whl → 2.4.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. CHANGELOG.md +14 -1
  2. README.md +54 -13
  3. pygpt_net/CHANGELOG.txt +14 -1
  4. pygpt_net/__init__.py +3 -3
  5. pygpt_net/controller/chat/attachment.py +7 -39
  6. pygpt_net/controller/config/placeholder.py +29 -0
  7. pygpt_net/controller/lang/mapping.py +2 -2
  8. pygpt_net/controller/settings/editor.py +6 -0
  9. pygpt_net/controller/theme/__init__.py +33 -8
  10. pygpt_net/controller/theme/common.py +22 -1
  11. pygpt_net/controller/theme/markdown.py +26 -14
  12. pygpt_net/controller/theme/menu.py +26 -5
  13. pygpt_net/core/attachments/context.py +145 -53
  14. pygpt_net/core/audio/__init__.py +59 -1
  15. pygpt_net/core/bridge/worker.py +16 -2
  16. pygpt_net/core/events/event.py +2 -1
  17. pygpt_net/core/filesystem/__init__.py +5 -19
  18. pygpt_net/core/idx/chat.py +22 -24
  19. pygpt_net/core/render/web/body.py +31 -15
  20. pygpt_net/data/config/config.json +11 -5
  21. pygpt_net/data/config/models.json +3 -3
  22. pygpt_net/data/config/modes.json +3 -3
  23. pygpt_net/data/config/settings.json +81 -10
  24. pygpt_net/data/config/settings_section.json +3 -0
  25. pygpt_net/data/css/style.light.css +1 -0
  26. pygpt_net/data/css/{web.css → web-blocks.css} +144 -133
  27. pygpt_net/data/css/web-chatgpt.css +342 -0
  28. pygpt_net/data/css/web-chatgpt.dark.css +64 -0
  29. pygpt_net/data/css/web-chatgpt.light.css +75 -0
  30. pygpt_net/data/css/web-chatgpt_wide.css +342 -0
  31. pygpt_net/data/css/web-chatgpt_wide.dark.css +64 -0
  32. pygpt_net/data/css/web-chatgpt_wide.light.css +75 -0
  33. pygpt_net/data/locale/locale.de.ini +16 -3
  34. pygpt_net/data/locale/locale.en.ini +24 -10
  35. pygpt_net/data/locale/locale.es.ini +16 -3
  36. pygpt_net/data/locale/locale.fr.ini +16 -3
  37. pygpt_net/data/locale/locale.it.ini +16 -3
  38. pygpt_net/data/locale/locale.pl.ini +17 -4
  39. pygpt_net/data/locale/locale.uk.ini +16 -3
  40. pygpt_net/data/locale/locale.zh.ini +17 -4
  41. pygpt_net/plugin/audio_input/simple.py +17 -3
  42. pygpt_net/plugin/idx_llama_index/__init__.py +2 -2
  43. pygpt_net/plugin/real_time/__init__.py +2 -2
  44. pygpt_net/provider/core/config/patch.py +26 -1
  45. pygpt_net/ui/menu/config.py +7 -11
  46. pygpt_net/ui/menu/theme.py +9 -2
  47. pygpt_net/ui/widget/lists/context.py +1 -0
  48. pygpt_net/ui/widget/textarea/search_input.py +4 -1
  49. {pygpt_net-2.4.36.post1.dist-info → pygpt_net-2.4.38.dist-info}/METADATA +55 -14
  50. {pygpt_net-2.4.36.post1.dist-info → pygpt_net-2.4.38.dist-info}/RECORD +55 -49
  51. /pygpt_net/data/css/{web.dark.css → web-blocks.dark.css} +0 -0
  52. /pygpt_net/data/css/{web.light.css → web-blocks.light.css} +0 -0
  53. {pygpt_net-2.4.36.post1.dist-info → pygpt_net-2.4.38.dist-info}/LICENSE +0 -0
  54. {pygpt_net-2.4.36.post1.dist-info → pygpt_net-2.4.38.dist-info}/WHEEL +0 -0
  55. {pygpt_net-2.4.36.post1.dist-info → pygpt_net-2.4.38.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.05 23:00:00 #
9
+ # Updated Date: 2024.12.07 21:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtGui import QAction
@@ -30,6 +30,18 @@ class Menu:
30
30
  # setup themes list menu
31
31
  if self.loaded:
32
32
  return
33
+
34
+ # styles
35
+ styles = self.window.controller.theme.common.get_styles_list()
36
+ for style in styles:
37
+ style_id = style.lower()
38
+ title = style.replace('_', ' ').title()
39
+ self.window.ui.menu['theme_style'][style_id] = QAction(title, self.window, checkable=True)
40
+ self.window.ui.menu['theme_style'][style_id].triggered.connect(
41
+ lambda checked=None, style=style_id: self.window.controller.theme.toggle_style(style))
42
+ self.window.ui.menu['theme.style'].addAction(self.window.ui.menu['theme_style'][style_id])
43
+
44
+ # color themes
33
45
  themes = self.window.controller.theme.common.get_themes_list()
34
46
  for theme in themes:
35
47
  name = self.window.controller.theme.common.translate(theme)
@@ -42,6 +54,7 @@ class Menu:
42
54
  self.window.ui.menu['theme.dark'].addAction(self.window.ui.menu['theme'][theme])
43
55
  elif theme.startswith('light'):
44
56
  self.window.ui.menu['theme.light'].addAction(self.window.ui.menu['theme'][theme])
57
+
45
58
  self.loaded = True
46
59
 
47
60
  def setup_syntax(self):
@@ -86,14 +99,22 @@ class Menu:
86
99
 
87
100
  def update_list(self):
88
101
  """Update theme list menu"""
89
- current = self.window.core.config.get('theme')
102
+ # styles
103
+ current_style = self.window.core.config.get('theme.style')
104
+ for style in self.window.ui.menu['theme_style']:
105
+ self.window.ui.menu['theme_style'][style].setChecked(False)
106
+ if current_style in self.window.ui.menu['theme_style']:
107
+ self.window.ui.menu['theme_style'][current_style].setChecked(True)
108
+
109
+ # color themes
110
+ current_theme = self.window.core.config.get('theme')
90
111
  for theme in self.window.ui.menu['theme']:
91
112
  self.window.ui.menu['theme'][theme].setChecked(False)
92
- if current in self.window.ui.menu['theme']:
93
- self.window.ui.menu['theme'][current].setChecked(True)
113
+ if current_theme in self.window.ui.menu['theme']:
114
+ self.window.ui.menu['theme'][current_theme].setChecked(True)
94
115
 
95
116
  def update_syntax(self):
96
- """Update syntax menu"""
117
+ """Update code syntax highlight menu"""
97
118
  current = self.window.core.config.get('render.code_syntax')
98
119
  for style in self.window.ui.menu['theme_syntax']:
99
120
  self.window.ui.menu['theme_syntax'][style].setChecked(False)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 04:00:00 #
9
+ # Updated Date: 2024.11.30 04:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -40,7 +40,7 @@ class Context:
40
40
  Summarize the text below by extracting the most important information,
41
41
  especially those that may help answer the question:
42
42
 
43
- `{query}`.
43
+ `{query}`
44
44
 
45
45
  If the answer to the question is not in the text to summarize,
46
46
  simply return a summary of the entire content.
@@ -59,25 +59,49 @@ class Context:
59
59
 
60
60
  `{content}`
61
61
  """
62
-
63
- def get_all(self, meta: CtxMeta) -> list:
64
- """
65
- Get all attachments for meta
66
-
67
- :param meta: CtxMeta instance
68
- :return: list of attachments
69
- """
70
- return meta.additional_ctx
71
-
72
- def get_dir(self, meta: CtxMeta) -> str:
73
- """
74
- Get directory for meta
75
-
76
- :param meta: CtxMeta instance
77
- :return: directory path
62
+ self.rag_prompt = """
63
+ Prepare a question for the RAG engine (vector database) asking for additional context that can help obtain
64
+ extra information necessary to answer the user's question. The query should be brief and to the point,
65
+ so as to be processed as effectively as possible by the RAG engine. Below is the entire conversation
66
+ of the user with the AI assistant, and at the end the current user's question, for which you need to
67
+ prepare DIRECT query for the RAG engine for additional context, taking into account the content of the entire
68
+ discussion and its context. In your response, return only the DIRECT query for additional context,
69
+ do not return anything else besides it. The response should not contain any phrases other than the query itself:
70
+
71
+ # Good RAG query example:
72
+
73
+ `What is the capital of France?`
74
+
75
+ # Bad RAG query example:
76
+
77
+ `Can you tell me the capital of France?`
78
+
79
+ # Full conversation:
80
+
81
+ `{history}`
82
+
83
+ # User question:
84
+
85
+ `{query}`
86
+ """
87
+
88
+ def get_context(self, mode: str, ctx: CtxItem, history: list) -> str:
89
+ """
90
+ Get context for mode
91
+
92
+ :param mode: Context mode
93
+ :param ctx: CtxItem instance
94
+ :param history: history
95
+ :return: context
78
96
  """
79
- meta_uuid = str(meta.uuid)
80
- return os.path.join(self.window.core.config.get_user_dir("ctx_idx"), meta_uuid)
97
+ content = ""
98
+ if mode == self.window.controller.chat.attachment.MODE_FULL_CONTEXT:
99
+ content = self.get_context_text(ctx, filename=True)
100
+ elif mode == self.window.controller.chat.attachment.MODE_QUERY_CONTEXT:
101
+ content = self.query_context(ctx, history)
102
+ elif mode == self.window.controller.chat.attachment.MODE_QUERY_CONTEXT_SUMMARY:
103
+ content = self.summary_context(ctx, history)
104
+ return content
81
105
 
82
106
  def get_context_text(self, ctx: CtxItem, filename: bool = False) -> str:
83
107
  """
@@ -95,6 +119,8 @@ class Context:
95
119
  if ("type" not in file
96
120
  or file["type"] not in ["local_file", "url"]):
97
121
  continue
122
+ if not "uuid" in file:
123
+ continue
98
124
  file_id = file["uuid"]
99
125
  file_idx_path = os.path.join(meta_path, file_id)
100
126
  text_path = os.path.join(file_idx_path, file_id + ".txt")
@@ -126,15 +152,17 @@ class Context:
126
152
  self.last_used_context = context
127
153
  return context
128
154
 
129
- def query_context(self, meta: CtxMeta, query: str) -> str:
155
+ def query_context(self, ctx: CtxItem, history: list) -> str:
130
156
  """
131
157
  Query the index for context
132
158
 
133
- :param meta : CtxMeta instance
134
- :param query: query string
159
+ :param ctx: CtxItem instance
160
+ :param history: history
135
161
  :return: query result
136
162
  """
163
+ meta = ctx.meta
137
164
  meta_path = self.get_dir(meta)
165
+ query = str(ctx.input)
138
166
  if not os.path.exists(meta_path) or not os.path.isdir(meta_path):
139
167
  return ""
140
168
  idx_path = os.path.join(self.get_dir(meta), self.dir_index)
@@ -162,8 +190,21 @@ class Context:
162
190
  self.window.core.ctx.replace(meta)
163
191
  self.window.core.ctx.save(meta.id)
164
192
 
193
+ history_data = self.prepare_context_history(history)
165
194
  model, model_item = self.get_selected_model("query")
166
- result = self.window.core.idx.chat.query_attachment(query, idx_path, model_item)
195
+
196
+ verbose = False
197
+ if self.is_verbose():
198
+ verbose = True
199
+ print("Attachments: using query model: {}".format(model))
200
+
201
+ result = self.window.core.idx.chat.query_attachment(
202
+ query=query,
203
+ path=idx_path,
204
+ model=model_item,
205
+ history=history_data,
206
+ verbose=verbose,
207
+ )
167
208
  self.last_used_context = result
168
209
 
169
210
  if self.is_verbose():
@@ -171,28 +212,12 @@ class Context:
171
212
 
172
213
  return result
173
214
 
174
- def get_selected_model(self, mode: str = "summary"):
175
- """
176
- Get selected model for attachments
177
-
178
- :return: model name, model item
179
- """
180
- model_item = None
181
- model = None
182
- if mode == "summary":
183
- model = self.window.core.config.get("ctx.attachment.summary.model", "gpt-4o-mini")
184
- elif mode == "query":
185
- model = self.window.core.config.get("ctx.attachment.query.model", "gpt-4o-mini")
186
- if model:
187
- model_item = self.window.core.models.get(model)
188
- return model, model_item
189
-
190
- def summary_context(self, ctx: CtxItem, query: str) -> str:
215
+ def summary_context(self, ctx: CtxItem, history: list) -> str:
191
216
  """
192
217
  Get summary of the context
193
218
 
194
219
  :param ctx: CtxItem instance
195
- :param query: query string
220
+ :param history: history
196
221
  :return: query result
197
222
  """
198
223
  model, model_item = self.get_selected_model("summary")
@@ -202,6 +227,7 @@ class Context:
202
227
  if self.is_verbose():
203
228
  print("Attachments: using summary model: {}".format(model))
204
229
 
230
+ query = str(ctx.input)
205
231
  content = self.get_context_text(ctx, filename=True)
206
232
  prompt = self.summary_prompt.format(
207
233
  query=str(query).strip(),
@@ -210,12 +236,14 @@ class Context:
210
236
  if self.is_verbose():
211
237
  print("Attachments: summary prompt: {}".format(prompt))
212
238
 
239
+ history_data = self.prepare_context_history(history)
213
240
  ctx = CtxItem()
214
241
  bridge_context = BridgeContext(
215
242
  ctx=ctx,
216
243
  prompt=prompt,
217
244
  stream=False,
218
245
  model=model_item,
246
+ history=history_data,
219
247
  )
220
248
  event = KernelEvent(KernelEvent.CALL, {
221
249
  'context': bridge_context,
@@ -228,6 +256,35 @@ class Context:
228
256
  print("Attachments: summary received: {}".format(response))
229
257
  return response
230
258
 
259
+ def prepare_context_history(self, history: list) -> list:
260
+ """
261
+ Prepare context history
262
+
263
+ :param history: history
264
+ :return: history data
265
+ """
266
+ use_history = self.window.core.config.get("ctx.attachment.rag.history", True)
267
+ history_data = []
268
+ if use_history:
269
+ if self.is_verbose():
270
+ print("Attachments: using history for query prepare...")
271
+
272
+ # use only last X items from history
273
+ num_items = self.window.core.config.get("ctx.attachment.rag.history.max_items", 3)
274
+ history_data = []
275
+ for item in history:
276
+ history_data.append(item)
277
+
278
+ # 0 = unlimited
279
+ if num_items > 0:
280
+ if self.is_verbose():
281
+ print("Attachments: using last {} items from history...".format(num_items))
282
+ if len(history_data) < num_items:
283
+ num_items = len(history_data)
284
+ history_data = history_data[-num_items:]
285
+
286
+ return history_data
287
+
231
288
  def upload(
232
289
  self,
233
290
  meta: CtxMeta,
@@ -396,6 +453,41 @@ class Context:
396
453
  print("Attachments: indexed. Doc IDs: {}".format(doc_ids))
397
454
  return doc_ids
398
455
 
456
+ def get_all(self, meta: CtxMeta) -> list:
457
+ """
458
+ Get all attachments for meta
459
+
460
+ :param meta: CtxMeta instance
461
+ :return: list of attachments
462
+ """
463
+ return meta.additional_ctx
464
+
465
+ def get_dir(self, meta: CtxMeta) -> str:
466
+ """
467
+ Get directory for meta
468
+
469
+ :param meta: CtxMeta instance
470
+ :return: directory path
471
+ """
472
+ meta_uuid = str(meta.uuid)
473
+ return os.path.join(self.window.core.config.get_user_dir("ctx_idx"), meta_uuid)
474
+
475
+ def get_selected_model(self, mode: str = "summary"):
476
+ """
477
+ Get selected model for attachments
478
+
479
+ :return: model name, model item
480
+ """
481
+ model_item = None
482
+ model = None
483
+ if mode == "summary":
484
+ model = self.window.core.config.get("ctx.attachment.summary.model", "gpt-4o-mini")
485
+ elif mode == "query":
486
+ model = self.window.core.config.get("ctx.attachment.query.model", "gpt-4o-mini")
487
+ if model:
488
+ model_item = self.window.core.models.get(model)
489
+ return model, model_item
490
+
399
491
  def duplicate(self, from_meta_id: int, to_meta_id: int) -> bool:
400
492
  """
401
493
  Duplicate attachments from one meta to another
@@ -465,17 +557,6 @@ class Context:
465
557
  if meta is not None:
466
558
  self.delete_index(meta)
467
559
 
468
- def reset_by_meta_id(self, meta_id: int, delete_files: bool = False):
469
- """
470
- Delete all attachments for meta by id
471
-
472
- :param meta_id: Meta id
473
- :param delete_files: delete files
474
- """
475
- meta = self.window.core.ctx.get_meta_by_id(meta_id)
476
- if meta is not None:
477
- self.reset_by_meta(meta, delete_files)
478
-
479
560
  def reset_by_meta(self, meta: CtxMeta, delete_files: bool = False):
480
561
  """
481
562
  Delete all attachments for meta
@@ -488,6 +569,17 @@ class Context:
488
569
  if delete_files:
489
570
  self.delete_index(meta)
490
571
 
572
+ def reset_by_meta_id(self, meta_id: int, delete_files: bool = False):
573
+ """
574
+ Delete all attachments for meta by id
575
+
576
+ :param meta_id: Meta id
577
+ :param delete_files: delete files
578
+ """
579
+ meta = self.window.core.ctx.get_meta_by_id(meta_id)
580
+ if meta is not None:
581
+ self.reset_by_meta(meta, delete_files)
582
+
491
583
  def clear(self, meta: CtxMeta, delete_files: bool = False):
492
584
  """
493
585
  Clear all attachments by ctx meta
@@ -6,10 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 19:00:00 #
9
+ # Updated Date: 2024.12.08 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
13
+ from bs4 import UnicodeDammit
13
14
 
14
15
  from pygpt_net.provider.audio_input.base import BaseProvider as InputBaseProvider
15
16
  from pygpt_net.provider.audio_output.base import BaseProvider as OutputBaseProvider
@@ -27,6 +28,55 @@ class Audio:
27
28
  "input": {},
28
29
  "output": {},
29
30
  }
31
+ self.last_error = None
32
+
33
+ def get_input_devices(self) -> list:
34
+ """
35
+ Get input devices
36
+
37
+ :return devices list: [(id, name)]
38
+ """
39
+ import pyaudio
40
+ devices = []
41
+ try:
42
+ p = pyaudio.PyAudio()
43
+ num_devices = p.get_device_count()
44
+ for i in range(num_devices):
45
+ info = p.get_device_info_by_index(i)
46
+ if info["maxInputChannels"] > 0:
47
+ dammit = UnicodeDammit(info["name"])
48
+ devices.append((i, dammit.unicode_markup))
49
+ # print(f"Device ID {i}: {info['name']}")
50
+ p.terminate()
51
+ except Exception as e:
52
+ print(f"Audio input devices receive error: {e}")
53
+ return devices
54
+
55
+ def is_device_compatible(self, device_index) -> bool:
56
+ """
57
+ Check if device is compatible
58
+
59
+ :param device_index: device index
60
+ :return: True if compatible
61
+ """
62
+ import pyaudio
63
+ rate = int(self.window.core.config.get('audio.input.rate', 44100))
64
+ channels = int(self.window.core.config.get('audio.input.channels', 1))
65
+ p = pyaudio.PyAudio()
66
+ info = p.get_device_info_by_index(device_index)
67
+ supported = False
68
+ try:
69
+ p.is_format_supported(
70
+ rate=rate,
71
+ input_device=info['index'],
72
+ input_channels=channels,
73
+ input_format=pyaudio.paInt16)
74
+ supported = True
75
+ except ValueError as e:
76
+ self.last_error = str(e)
77
+ supported = False
78
+ p.terminate()
79
+ return supported
30
80
 
31
81
  def is_registered(self, id: str, type: str = "output") -> bool:
32
82
  """
@@ -92,3 +142,11 @@ class Audio:
92
142
  :return: cleaned text
93
143
  """
94
144
  return re.sub(r'~###~.*?~###~', '', str(text))
145
+
146
+ def get_last_error(self) -> str:
147
+ """
148
+ Return last error
149
+
150
+ :return: Error
151
+ """
152
+ return self.last_error
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.23 21:00:00 #
9
+ # Updated Date: 2024.11.29 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtCore import QObject, Signal, QRunnable, Slot
@@ -50,6 +50,9 @@ class BridgeWorker(QObject, QRunnable):
50
50
  # ADDITIONAL CONTEXT: append additional context from attachments
51
51
  self.handle_additional_context()
52
52
 
53
+ # POST PROMPT END: handle post prompt end event
54
+ self.handle_post_prompt_end()
55
+
53
56
  # Langchain
54
57
  if self.mode == MODE_LANGCHAIN:
55
58
  result = self.window.core.chain.call(
@@ -124,6 +127,17 @@ class BridgeWorker(QObject, QRunnable):
124
127
  self.window.dispatch(event)
125
128
  self.context.system_prompt = event.data['value']
126
129
 
130
+ def handle_post_prompt_end(self):
131
+ """Handle post prompt end event"""
132
+ event = Event(Event.POST_PROMPT_END, {
133
+ 'mode': self.context.mode,
134
+ 'reply': self.context.ctx.reply,
135
+ 'value': self.context.system_prompt,
136
+ })
137
+ event.ctx = self.context.ctx
138
+ self.window.dispatch(event)
139
+ self.context.system_prompt = event.data['value']
140
+
127
141
  def handle_additional_context(self):
128
142
  """Append additional context"""
129
143
  ctx = self.context.ctx
@@ -133,7 +147,7 @@ class BridgeWorker(QObject, QRunnable):
133
147
  return
134
148
  if not self.window.controller.chat.attachment.has_context(ctx.meta):
135
149
  return
136
- ad_context = self.window.controller.chat.attachment.get_context(ctx)
150
+ ad_context = self.window.controller.chat.attachment.get_context(ctx, self.context.history)
137
151
  ad_mode = self.window.controller.chat.attachment.get_mode()
138
152
  if ad_context:
139
153
  self.context.prompt += "\n\n" + ad_context # append to input text
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 19:00:00 #
9
+ # Updated Date: 2024.11.29 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -50,6 +50,7 @@ class Event(BaseEvent):
50
50
  PLUGIN_OPTION_GET = "plugin.option.get"
51
51
  POST_PROMPT = "post.prompt"
52
52
  POST_PROMPT_ASYNC = "post.prompt.async"
53
+ POST_PROMPT_END = "post.prompt.end"
53
54
  PRE_PROMPT = "pre.prompt"
54
55
  SYSTEM_PROMPT = "system.prompt"
55
56
  TOOL_OUTPUT_RENDER = "tool.output.render"
@@ -6,13 +6,12 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.23 21:00:00 #
9
+ # Updated Date: 2024.12.07 21:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
13
13
  import shutil
14
14
 
15
- from datetime import datetime
16
15
  from pathlib import PurePath
17
16
  from uuid import uuid4
18
17
 
@@ -38,18 +37,6 @@ class Filesystem:
38
37
  self.types = Types(window)
39
38
  self.url = Url(window)
40
39
  self.workdir_placeholder = "%workdir%"
41
- self.styles = [
42
- "style.css",
43
- "style.dark.css",
44
- "style.light.css",
45
- "markdown.css",
46
- "markdown.dark.css",
47
- "markdown.light.css",
48
- "web.css",
49
- "web.dark.css",
50
- "web.light.css",
51
- "fix_windows.css",
52
- ]
53
40
 
54
41
  def install(self):
55
42
  """Install provider data"""
@@ -77,9 +64,9 @@ class Filesystem:
77
64
 
78
65
  src_dir = os.path.join(self.window.core.config.get_app_path(), 'data', 'css')
79
66
  dst_dir = os.path.join(self.window.core.config.path, 'css')
80
-
67
+ app_styles = os.listdir(src_dir)
81
68
  try:
82
- for style in self.styles:
69
+ for style in app_styles:
83
70
  src = os.path.join(src_dir, style)
84
71
  dst = os.path.join(dst_dir, style)
85
72
  if (not os.path.exists(dst) or force) and os.path.exists(src):
@@ -91,7 +78,8 @@ class Filesystem:
91
78
  """Backup user custom css styles"""
92
79
  css_dir = os.path.join(self.window.core.config.path, 'css')
93
80
  backup_file_extension = '.backup'
94
- for style in self.styles:
81
+ user_styles = os.listdir(css_dir)
82
+ for style in user_styles:
95
83
  src = os.path.join(css_dir, style)
96
84
  dst = os.path.join(css_dir, style + backup_file_extension)
97
85
  if os.path.exists(src):
@@ -457,5 +445,3 @@ class Filesystem:
457
445
  else:
458
446
  files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
459
447
  return files
460
-
461
-
@@ -405,7 +405,9 @@ class Chat:
405
405
  self,
406
406
  query: str,
407
407
  path: str,
408
- model: ModelItem = None
408
+ model: ModelItem = None,
409
+ history: list = None,
410
+ verbose: bool = False,
409
411
  ) -> str:
410
412
  """
411
413
  Query attachment
@@ -413,6 +415,8 @@ class Chat:
413
415
  :param query: query
414
416
  :param path: path to index
415
417
  :param model: model
418
+ :param history: chat history
419
+ :param verbose: verbose mode
416
420
  :return: response
417
421
  """
418
422
  if model is None:
@@ -424,39 +428,33 @@ class Chat:
424
428
  retriever = index.as_retriever()
425
429
  nodes = retriever.retrieve(query)
426
430
  response = ""
431
+ score = 0
427
432
  for node in nodes:
428
433
  if node.score > 0.5:
434
+ score = node.score
429
435
  response = node.text
430
436
  break
431
437
  output = ""
432
438
  if response:
433
439
  output = str(response)
440
+ if verbose:
441
+ print("Found using retrieval, {} (score: {})".format(output, score))
434
442
  else:
435
- # 2. try with prepared prompt
436
- prompt = """
437
- # Task
438
- Translate the below user prompt into a suitable, short query for the RAG engine, so it can fetch the context
439
- related to the query from the vector database.
440
-
441
- # Important rules
442
- 1. Edit the user prompt in a way that allows for the best possible result.
443
- 2. In your response, give me only the reworded query, without any additional information from yourself.
444
-
445
- # User prompt:
446
- ```{prompt}```
447
- """.format(prompt=query)
448
- response_prepare = index.as_query_engine(
443
+ if verbose:
444
+ print("Not found using retrieval, trying with query engine...")
445
+ history = self.context.get_messages(
446
+ query,
447
+ "",
448
+ history,
449
+ )
450
+ memory = self.get_memory_buffer(history, service_context.llm)
451
+ response = index.as_chat_engine(
449
452
  llm=service_context.llm,
450
453
  streaming=False,
451
- ).query(prompt)
452
- if response_prepare:
453
- # try the final query with prepared prompt
454
- final_response = index.as_query_engine(
455
- llm=service_context.llm,
456
- streaming=False,
457
- ).query(response_prepare.response)
458
- if final_response:
459
- output = str(final_response.response)
454
+ memory=memory,
455
+ ).chat(query)
456
+ if response:
457
+ output = str(response.response)
460
458
  return output
461
459
 
462
460
  def query_retrieval(