webscout 6.0__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/Onlinesearcher.py +22 -10
  3. webscout/Agents/functioncall.py +2 -2
  4. webscout/Bard.py +21 -21
  5. webscout/Extra/autollama.py +37 -20
  6. webscout/Local/__init__.py +6 -7
  7. webscout/Local/formats.py +406 -194
  8. webscout/Local/model.py +1074 -477
  9. webscout/Local/samplers.py +108 -144
  10. webscout/Local/thread.py +251 -410
  11. webscout/Local/ui.py +401 -0
  12. webscout/Local/utils.py +338 -136
  13. webscout/Provider/Amigo.py +51 -38
  14. webscout/Provider/Deepseek.py +7 -6
  15. webscout/Provider/EDITEE.py +2 -2
  16. webscout/Provider/GPTWeb.py +1 -1
  17. webscout/Provider/Llama3.py +1 -1
  18. webscout/Provider/NinjaChat.py +200 -0
  19. webscout/Provider/OLLAMA.py +1 -1
  20. webscout/Provider/Perplexity.py +1 -1
  21. webscout/Provider/Reka.py +12 -5
  22. webscout/Provider/TTI/AIuncensored.py +103 -0
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +4 -2
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/TTI/talkai.py +116 -0
  28. webscout/Provider/TeachAnything.py +0 -3
  29. webscout/Provider/Youchat.py +1 -1
  30. webscout/Provider/__init__.py +16 -12
  31. webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
  32. webscout/Provider/cerebras.py +143 -123
  33. webscout/Provider/cleeai.py +1 -1
  34. webscout/Provider/felo_search.py +1 -1
  35. webscout/Provider/gaurish.py +207 -0
  36. webscout/Provider/geminiprorealtime.py +160 -0
  37. webscout/Provider/genspark.py +1 -1
  38. webscout/Provider/julius.py +8 -3
  39. webscout/Provider/learnfastai.py +1 -1
  40. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  41. webscout/Provider/promptrefine.py +3 -1
  42. webscout/Provider/talkai.py +196 -0
  43. webscout/Provider/turboseek.py +3 -8
  44. webscout/Provider/tutorai.py +1 -1
  45. webscout/__init__.py +2 -43
  46. webscout/exceptions.py +5 -1
  47. webscout/tempid.py +4 -73
  48. webscout/utils.py +3 -0
  49. webscout/version.py +1 -1
  50. webscout/webai.py +1 -1
  51. webscout/webscout_search.py +154 -123
  52. {webscout-6.0.dist-info → webscout-6.2.dist-info}/METADATA +164 -245
  53. {webscout-6.0.dist-info → webscout-6.2.dist-info}/RECORD +57 -55
  54. webscout/Local/rawdog.py +0 -946
  55. webscout/Provider/BasedGPT.py +0 -214
  56. webscout/Provider/TTI/amigo.py +0 -148
  57. webscout/Provider/bixin.py +0 -264
  58. webscout/Provider/xdash.py +0 -182
  59. webscout/websx_search.py +0 -19
  60. {webscout-6.0.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  61. {webscout-6.0.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  62. {webscout-6.0.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  63. {webscout-6.0.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
webscout/Local/thread.py CHANGED
@@ -1,78 +1,73 @@
1
- import json
2
- from ._version import __version__, __llama_cpp_version__
3
-
4
- """Submodule containing the Thread class, used for interaction with a Model"""
5
-
6
1
  import sys
2
+ import time
3
+ from typing import Optional, Literal, Union, Generator, Tuple, TextIO
4
+ import uuid
7
5
 
8
- from .model import Model, assert_model_is_loaded, _SupportsWriteAndFlush
9
- from .utils import RESET_ALL, cls, print_verbose, truncate
6
+ from .model import Model, assert_model_is_loaded, _SupportsWriteAndFlush
7
+ from .utils import RESET_ALL, cls, print_verbose, truncate
10
8
  from .samplers import SamplerSettings, DefaultSampling
11
- from typing import Optional, Literal, Union
12
- from .formats import AdvancedFormat
13
-
14
- from .formats import blank as formats_blank
9
+ from .formats import AdvancedFormat, blank as formats_blank
15
10
 
16
11
 
17
12
  class Message(dict):
18
13
  """
19
- A dictionary representing a single message within a Thread
14
+ Represents a single message within a Thread.
20
15
 
21
- Works just like a normal `dict`, but a new method:
22
- - `.as_string` - Return the full message string
16
+ Inherits from `dict` and provides additional functionality:
23
17
 
24
- Generally, messages have these keys:
25
- - `role` - The role of the speaker: 'system', 'user', or 'bot'
26
- - `prefix` - The text that prefixes the message content
27
- - `content` - The actual content of the message
28
- - `suffix` - The text that suffixes the message content
18
+ - `as_string()`: Returns the full message string.
19
+
20
+ Typical message keys:
21
+ - `role`: The speaker's role ('system', 'user', 'bot').
22
+ - `prefix`: Text prefixing the content.
23
+ - `content`: The message content.
24
+ - `suffix`: Text suffixing the content.
29
25
  """
30
26
 
31
27
  def __repr__(self) -> str:
32
- return \
33
- f"Message([" \
34
- f"('role', {repr(self['role'])}), " \
35
- f"('prefix', {repr(self['prefix'])}), " \
36
- f"('content', {repr(self['content'])}), " \
28
+ return (
29
+ f"Message(["
30
+ f"('role', {repr(self['role'])}), "
31
+ f"('prefix', {repr(self['prefix'])}), "
32
+ f"('content', {repr(self['content'])}), "
37
33
  f"('suffix', {repr(self['suffix'])})])"
34
+ )
38
35
 
39
- def as_string(self):
40
- """Return the full message string"""
36
+ def as_string(self) -> str:
37
+ """Returns the full message string."""
41
38
  try:
42
39
  return self['prefix'] + self['content'] + self['suffix']
43
40
  except KeyError as e:
44
41
  e.add_note(
45
- "as_string: Message is missing one or more of the "
46
- "required 'prefix', 'content', 'suffix' attributes - this is "
47
- "unexpected"
42
+ "Message.as_string(): Missing 'prefix', 'content', or 'suffix' "
43
+ "attribute. This is unexpected."
48
44
  )
49
45
  raise e
50
46
 
51
47
 
52
48
  class Thread:
53
49
  """
54
- Provide functionality to facilitate easy interactions with a Model
55
-
56
- This is just a brief overview of m.Thread.
57
- To see a full description of each method and its parameters,
58
- call help(Thread), or see the relevant docstring.
59
-
60
- The following methods are available:
61
- - `.add_message()` - Add a message to `Thread.messages`
62
- - `.as_string()` - Return this thread's complete message history as a string
63
- - `.create_message()` - Create a message using the format of this thread
64
- - `.inference_str_from_messages()` - Using the list of messages, return a string suitable for inference
65
- - `.interact()` - Start an interactive, terminal-based chat session
66
- - `.len_messages()` - Get the total length of all messages in tokens
67
- - `.print_stats()` - Print stats about the context usage in this thread
68
- - `.reset()` - Clear the list of messages
69
- - `.send()` - Send a message in this thread
70
-
71
- The following attributes are available:
72
- - `.format` - The format being used for messages in this thread
73
- - `.messages` - The list of messages in this thread
74
- - `.model` - The `m.Model` instance used by this thread
75
- - `.sampler` - The SamplerSettings object used in this thread
50
+ Facilitates easy interactions with a Model.
51
+
52
+ Methods:
53
+ - `add_message()`: Appends a message to the thread's messages.
54
+ - `as_string()`: Returns the complete message history as a string.
55
+ - `create_message()`: Creates a message using the thread's format.
56
+ - `inference_str_from_messages()`: Generates an inference-ready string from messages.
57
+ - `interact()`: Starts an interactive chat session.
58
+ - `len_messages()`: Gets the total token length of all messages.
59
+ - `print_stats()`: Prints context usage statistics.
60
+ - `reset()`: Clears the message history.
61
+ - `send()`: Sends a message and receives a response.
62
+ - `warmup()`: Warms up the model by running a simple generation.
63
+
64
+ Attributes:
65
+ - `format`: The message format (see `webscout.AIutel.formats`).
66
+ - `messages`: The list of messages in the thread.
67
+ - `model`: The associated `webscout.AIutel.model.Model` instance.
68
+ - `sampler`: The `webscout.AIutel.samplers.SamplerSettings` for text generation.
69
+ - `tools`: A list of tools available for function calling.
70
+ - `uuid`: A unique identifier for the thread (UUID object).
76
71
  """
77
72
 
78
73
  def __init__(
@@ -81,24 +76,18 @@ class Thread:
81
76
  format: Union[dict, AdvancedFormat],
82
77
  sampler: SamplerSettings = DefaultSampling,
83
78
  messages: Optional[list[Message]] = None,
84
-
85
79
  ):
86
-
87
80
  """
88
- Given a Model and a format, construct a Thread instance.
81
+ Initializes a Thread instance.
89
82
 
90
- model: The Model to use for text generation
91
- format: The format specifying how messages should be structured (see m.formats)
92
-
93
- The following parameters are optional:
94
- - sampler: The SamplerSettings object used to control text generation
95
- - messages: A list of m.thread.Message objects to add to the Thread upon construction
83
+ Args:
84
+ model: The Model instance for text generation.
85
+ format: The message format (see `webscout.AIutel.formats`).
86
+ sampler: Sampler settings for controlling generation.
87
+ messages: Initial list of messages (optional).
96
88
  """
97
-
98
89
  assert isinstance(model, Model), \
99
- "Thread: model should be an " + \
100
- f"instance of webscout.Local.Model, not {type(model)}"
101
-
90
+ f"Thread: model should be a webscout.AIutel.model.Model, not {type(model)}"
102
91
  assert_model_is_loaded(model)
103
92
 
104
93
  assert isinstance(format, (dict, AdvancedFormat)), \
@@ -106,23 +95,17 @@ class Thread:
106
95
 
107
96
  if any(k not in format.keys() for k in formats_blank.keys()):
108
97
  raise KeyError(
109
- "Thread: format is missing one or more required keys, see " + \
110
- "webscout.Local.formats.blank for an example"
98
+ "Thread: format is missing one or more required keys, see "
99
+ "webscout.AIutel.formats.blank for an example"
111
100
  )
112
101
 
113
102
  assert isinstance(format['stops'], list), \
114
- "Thread: format['stops'] should be list, not " + \
115
- f"{type(format['stops'])}"
103
+ f"Thread: format['stops'] should be list, not {type(format['stops'])}"
116
104
 
117
105
  assert all(
118
106
  hasattr(sampler, attr) for attr in [
119
- 'max_len_tokens',
120
- 'temp',
121
- 'top_p',
122
- 'min_p',
123
- 'frequency_penalty',
124
- 'presence_penalty',
125
- 'repeat_penalty',
107
+ 'max_len_tokens', 'temp', 'top_p', 'min_p',
108
+ 'frequency_penalty', 'presence_penalty', 'repeat_penalty',
126
109
  'top_k'
127
110
  ]
128
111
  ), 'Thread: sampler is missing one or more required attributes'
@@ -132,30 +115,29 @@ class Thread:
132
115
  if not all(isinstance(msg, Message) for msg in self._messages):
133
116
  raise TypeError(
134
117
  "Thread: one or more messages provided to __init__() is "
135
- "not an instance of m.thread.Message"
118
+ "not an instance of webscout.AIutel.thread.Message"
136
119
  )
137
-
138
- # Thread.messages is never empty, unless `messages` param is explicity
139
- # set to `[]` during construction
140
120
 
141
- self.model: Model = model
142
- self.format: Union[dict, AdvancedFormat] = format
121
+ self.model = model
122
+ self.format = format
143
123
  self.messages: list[Message] = [
144
- self.create_message("system", self.format['system_content'])
124
+ self.create_message("system", self.format['system_prompt'])
145
125
  ] if self._messages is None else self._messages
146
- self.sampler: SamplerSettings = sampler
147
- self.tools = []
126
+ self.sampler = sampler
127
+ self.tools = []
128
+ self.uuid = uuid.uuid4() # Generate a UUID for the thread
129
+
148
130
  if self.model.verbose:
149
- print_verbose("new Thread instance with the following attributes:")
131
+ print_verbose("New Thread instance with attributes:")
150
132
  print_verbose(f"model == {self.model}")
151
133
  print_verbose(f"format['system_prefix'] == {truncate(repr(self.format['system_prefix']))}")
152
- print_verbose(f"format['system_content'] == {truncate(repr(self.format['system_content']))}")
134
+ print_verbose(f"format['system_prompt'] == {truncate(repr(self.format['system_prompt']))}")
153
135
  print_verbose(f"format['system_suffix'] == {truncate(repr(self.format['system_suffix']))}")
154
136
  print_verbose(f"format['user_prefix'] == {truncate(repr(self.format['user_prefix']))}")
155
- print_verbose(f"format['user_content'] == {truncate(repr(self.format['user_content']))}")
137
+ # print_verbose(f"format['user_content'] == {truncate(repr(self.format['user_content']))}")
156
138
  print_verbose(f"format['user_suffix'] == {truncate(repr(self.format['user_suffix']))}")
157
139
  print_verbose(f"format['bot_prefix'] == {truncate(repr(self.format['bot_prefix']))}")
158
- print_verbose(f"format['bot_content'] == {truncate(repr(self.format['bot_content']))}")
140
+ # print_verbose(f"format['bot_content'] == {truncate(repr(self.format['bot_content']))}")
159
141
  print_verbose(f"format['bot_suffix'] == {truncate(repr(self.format['bot_suffix']))}")
160
142
  print_verbose(f"format['stops'] == {truncate(repr(self.format['stops']))}")
161
143
  print_verbose(f"sampler.temp == {self.sampler.temp}")
@@ -165,126 +147,85 @@ class Thread:
165
147
  print_verbose(f"sampler.presence_penalty == {self.sampler.presence_penalty}")
166
148
  print_verbose(f"sampler.repeat_penalty == {self.sampler.repeat_penalty}")
167
149
  print_verbose(f"sampler.top_k == {self.sampler.top_k}")
168
- def add_tool(self, tool: dict):
169
- """Adds a tool to the Thread for function calling."""
150
+
151
+ def add_tool(self, tool: dict) -> None:
152
+ """
153
+ Adds a tool to the Thread for function calling.
154
+
155
+ Args:
156
+ tool (dict): A dictionary describing the tool, containing
157
+ 'function' with 'name', 'description', and 'execute' keys.
158
+ """
170
159
  self.tools.append(tool)
171
- self.model.register_tool(tool['function']['name'], tool['function']['execute']) # Register the tool
160
+ self.model.register_tool(tool['function']['name'], tool['function']['execute'])
161
+ self.messages[0]['content'] += f"\nYou have access to the following tool:\n{tool['function']['description']}"
172
162
 
173
- # Include tool information in the system message (optional, but helpful)
174
- self.messages[0]['content'] += f"\nYou have access to the following tool:\n{tool['function']['description']}"
175
163
  def __repr__(self) -> str:
176
- return \
177
- f"Thread({repr(self.model)}, {repr(self.format)}, " + \
164
+ return (
165
+ f"Thread({repr(self.model)}, {repr(self.format)}, "
178
166
  f"{repr(self.sampler)}, {repr(self.messages)})"
179
-
167
+ )
168
+
180
169
  def __str__(self) -> str:
181
170
  return self.as_string()
182
-
183
- def __len__(self) -> int:
184
- """
185
- `len(Thread)` returns the length of the Thread in tokens
186
171
 
187
- To get the number of messages in the Thread, use `len(Thread.messages)`
188
- """
172
+ def __len__(self) -> int:
173
+ """Returns the total token length of all messages."""
189
174
  return self.len_messages()
190
175
 
191
- def create_message(
192
- self,
193
- role: Literal['system', 'user', 'bot'],
194
- content: str
195
- ) -> Message:
196
- """
197
- Construct a message using the format of this Thread
198
- """
199
-
176
+ def create_message(self, role: Literal['system', 'user', 'bot'], content: str) -> Message:
177
+ """Constructs a message using the thread's format."""
200
178
  assert role.lower() in ['system', 'user', 'bot'], \
201
- f"create_message: role should be 'system', 'user', or 'bot', not '{role.lower()}'"
202
-
179
+ f"Thread.create_message(): role should be 'system', 'user', or 'bot', not '{role.lower()}'"
203
180
  assert isinstance(content, str), \
204
- f"create_message: content should be str, not {type(content)}"
205
-
206
- if role.lower() == 'system':
207
- return Message(
208
- [
209
- ('role', 'system'),
210
- ('prefix', self.format['system_prefix']),
211
- ('content', content),
212
- ('suffix', self.format['system_suffix'])
213
- ]
214
- )
215
-
216
- elif role.lower() == 'user':
217
- return Message(
218
- [
219
- ('role', 'user'),
220
- ('prefix', self.format['user_prefix']),
221
- ('content', content),
222
- ('suffix', self.format['user_suffix'])
223
- ]
224
- )
225
-
226
- elif role.lower() == 'bot':
227
- return Message(
228
- [
229
- ('role', 'bot'),
230
- ('prefix', self.format['bot_prefix']),
231
- ('content', content),
232
- ('suffix', self.format['bot_suffix'])
233
- ]
234
- )
235
-
236
- def len_messages(self) -> int:
237
- """
238
- Return the total length of all messages in this thread, in tokens.
239
-
240
- Can also use `len(Thread)`."""
181
+ f"Thread.create_message(): content should be str, not {type(content)}"
182
+
183
+ message_data = {
184
+ 'system': {
185
+ 'role': 'system',
186
+ 'prefix': self.format['system_prefix'],
187
+ 'content': content,
188
+ 'suffix': self.format['system_suffix']
189
+ },
190
+ 'user': {
191
+ 'role': 'user',
192
+ 'prefix': self.format['user_prefix'],
193
+ 'content': content,
194
+ 'suffix': self.format['user_suffix']
195
+ },
196
+ 'bot': {
197
+ 'role': 'bot',
198
+ 'prefix': self.format['bot_prefix'],
199
+ 'content': content,
200
+ 'suffix': self.format['bot_suffix']
201
+ }
202
+ }
203
+
204
+ return Message(message_data[role.lower()])
241
205
 
206
+ def len_messages(self) -> int:
207
+ """Returns the total length of all messages in tokens."""
242
208
  return self.model.get_length(self.as_string())
243
209
 
244
- def add_message(
245
- self,
246
- role: Literal['system', 'user', 'bot'],
247
- content: str
248
- ) -> None:
249
- """
250
- Create a message and append it to `Thread.messages`.
251
-
252
- `Thread.add_message(...)` is a shorthand for
253
- `Thread.messages.append(Thread.create_message(...))`
254
- """
255
- self.messages.append(
256
- self.create_message(
257
- role=role,
258
- content=content
259
- )
260
- )
210
+ def add_message(self, role: Literal['system', 'user', 'bot'], content: str) -> None:
211
+ """Appends a message to the thread's messages."""
212
+ self.messages.append(self.create_message(role, content))
261
213
 
262
214
  def inference_str_from_messages(self) -> str:
263
- """
264
- Using the list of messages, construct a string suitable for inference,
265
- respecting the format and context length of this thread.
266
- """
267
-
215
+ """Constructs an inference-ready string from messages."""
268
216
  inf_str = ''
269
217
  sys_msg_str = ''
270
- # whether to treat the first message as necessary to keep
271
218
  sys_msg_flag = False
272
219
  context_len_budget = self.model.context_length
273
220
 
274
- # if at least 1 message is history
275
- if len(self.messages) >= 1:
276
- # if first message has system role
277
- if self.messages[0]['role'] == 'system':
278
- sys_msg_flag = True
279
- sys_msg = self.messages[0]
280
- sys_msg_str = sys_msg.as_string()
281
- context_len_budget -= self.model.get_length(sys_msg_str)
282
-
283
- if sys_msg_flag:
284
- iterator = reversed(self.messages[1:])
285
- else:
286
- iterator = reversed(self.messages)
287
-
221
+ if len(self.messages) >= 1 and self.messages[0]['role'] == 'system':
222
+ sys_msg_flag = True
223
+ sys_msg = self.messages[0]
224
+ sys_msg_str = sys_msg.as_string()
225
+ context_len_budget -= self.model.get_length(sys_msg_str)
226
+
227
+ iterator = reversed(self.messages[1:]) if sys_msg_flag else reversed(self.messages)
228
+
288
229
  for message in iterator:
289
230
  msg_str = message.as_string()
290
231
  context_len_budget -= self.model.get_length(msg_str)
@@ -292,21 +233,13 @@ class Thread:
292
233
  break
293
234
  inf_str = msg_str + inf_str
294
235
 
295
- if sys_msg_flag:
296
- inf_str = sys_msg_str + inf_str
236
+ inf_str = sys_msg_str + inf_str if sys_msg_flag else inf_str
297
237
  inf_str += self.format['bot_prefix']
298
238
 
299
239
  return inf_str
300
240
 
301
-
302
241
  def send(self, prompt: str) -> str:
303
- """
304
- Send a message in this thread. This adds your message and the bot's
305
- response to the list of messages.
306
-
307
- Returns a string containing the response to your message.
308
- """
309
-
242
+ """Sends a message and receives a response."""
310
243
  self.add_message("user", prompt)
311
244
  output = self.model.generate(
312
245
  self.inference_str_from_messages(),
@@ -314,85 +247,28 @@ class Thread:
314
247
  sampler=self.sampler
315
248
  )
316
249
  self.add_message("bot", output)
317
-
318
250
  return output
319
-
320
251
 
321
252
  def _interactive_update_sampler(self) -> None:
322
- """Interactively update the sampler settings used in this Thread"""
253
+ """Interactively updates sampler settings."""
323
254
  print()
324
255
  try:
325
- new_max_len_tokens = input(f'max_len_tokens: {self.sampler.max_len_tokens} -> ')
326
- new_temp = input(f'temp: {self.sampler.temp} -> ')
327
- new_top_p = input(f'top_p: {self.sampler.top_p} -> ')
328
- new_min_p = input(f'min_p: {self.sampler.min_p} -> ')
329
- new_frequency_penalty = input(f'frequency_penalty: {self.sampler.frequency_penalty} -> ')
330
- new_presence_penalty = input(f'presence_penalty: {self.sampler.presence_penalty} -> ')
331
- new_repeat_penalty = input(f'repeat_penalty: {self.sampler.repeat_penalty} -> ')
332
- new_top_k = input(f'top_k: {self.sampler.top_k} -> ')
333
-
256
+ for param_name in SamplerSettings.param_types:
257
+ current_value = getattr(self.sampler, param_name)
258
+ new_value = input(f'{param_name}: {current_value} -> ')
259
+ try:
260
+ if new_value.lower() == 'none':
261
+ setattr(self.sampler, param_name, None)
262
+ elif param_name in ('top_k', 'max_len_tokens'):
263
+ setattr(self.sampler, param_name, int(new_value))
264
+ else:
265
+ setattr(self.sampler, param_name, float(new_value))
266
+ print(f'webscout.AIutel: {param_name} updated')
267
+ except ValueError:
268
+ print(f'webscout.AIutel: {param_name} not updated (invalid input)')
269
+ print()
334
270
  except KeyboardInterrupt:
335
- print('\nwebscout.Local: sampler settings not updated\n')
336
- return
337
- print()
338
-
339
- try:
340
- self.sampler.max_len_tokens = int(new_max_len_tokens)
341
- except ValueError:
342
- pass
343
- else:
344
- print('webscout.Local: max_len_tokens updated')
345
-
346
- try:
347
- self.sampler.temp = float(new_temp)
348
- except ValueError:
349
- pass
350
- else:
351
- print('webscout.Local: temp updated')
352
-
353
- try:
354
- self.sampler.top_p = float(new_top_p)
355
- except ValueError:
356
- pass
357
- else:
358
- print('webscout.Local: top_p updated')
359
-
360
- try:
361
- self.sampler.min_p = float(new_min_p)
362
- except ValueError:
363
- pass
364
- else:
365
- print('webscout.Local: min_p updated')
366
-
367
- try:
368
- self.sampler.frequency_penalty = float(new_frequency_penalty)
369
- except ValueError:
370
- pass
371
- else:
372
- print('webscout.Local: frequency_penalty updated')
373
-
374
- try:
375
- self.sampler.presence_penalty = float(new_presence_penalty)
376
- except ValueError:
377
- pass
378
- else:
379
- print('webscout.Local: presence_penalty updated')
380
-
381
- try:
382
- self.sampler.repeat_penalty = float(new_repeat_penalty)
383
- except ValueError:
384
- pass
385
- else:
386
- print('webscout.Local: repeat_penalty updated')
387
-
388
- try:
389
- self.sampler.top_k = int(new_top_k)
390
- except ValueError:
391
- pass
392
- else:
393
- print('webscout.Local: top_k updated')
394
- print()
395
-
271
+ print('\nwebscout.AIutel: Sampler settings not updated\n')
396
272
 
397
273
  def _interactive_input(
398
274
  self,
@@ -401,21 +277,20 @@ class Thread:
401
277
  _user_style: str,
402
278
  _bot_style: str,
403
279
  _special_style: str
404
- ) -> tuple:
405
- """
406
- Recive input from the user, while handling multi-line input
407
- and commands
408
- """
409
- full_user_input = '' # may become multiline
280
+ ) -> Tuple[Optional[str], Optional[str]]:
281
+ """Receives input from the user, handling multi-line input and commands."""
282
+ full_user_input = ''
410
283
 
411
284
  while True:
412
- user_input = input(prompt)
413
-
285
+ try:
286
+ user_input = input(prompt)
287
+ except KeyboardInterrupt:
288
+ print(f"{RESET_ALL}\n")
289
+ return None, None
290
+
414
291
  if user_input.endswith('\\'):
415
292
  full_user_input += user_input[:-1] + '\n'
416
-
417
293
  elif user_input == '!':
418
-
419
294
  print()
420
295
  try:
421
296
  command = input(f'{RESET_ALL} ! {_dim_style}')
@@ -424,121 +299,105 @@ class Thread:
424
299
  continue
425
300
 
426
301
  if command == '':
427
- print(f'\n[no command]\n')
428
-
302
+ print('\n[No command]\n')
429
303
  elif command.lower() in ['reset', 'restart']:
430
304
  self.reset()
431
- print(f'\n[thread reset]\n')
432
-
305
+ print('\n[Thread reset]\n')
433
306
  elif command.lower() in ['cls', 'clear']:
434
307
  cls()
435
308
  print()
436
-
437
309
  elif command.lower() in ['ctx', 'context']:
438
310
  print(f"\n{self.len_messages()}\n")
439
-
440
311
  elif command.lower() in ['stats', 'print_stats']:
441
312
  print()
442
313
  self.print_stats()
443
314
  print()
444
-
445
315
  elif command.lower() in ['sampler', 'samplers', 'settings']:
446
316
  self._interactive_update_sampler()
447
-
448
317
  elif command.lower() in ['str', 'string', 'as_string']:
449
318
  print(f"\n{self.as_string()}\n")
450
-
451
319
  elif command.lower() in ['repr', 'save', 'backup']:
452
320
  print(f"\n{repr(self)}\n")
453
-
454
321
  elif command.lower() in ['remove', 'rem', 'delete', 'del']:
455
322
  print()
456
- old_len = len(self.messages)
457
- del self.messages[-1]
458
- assert len(self.messages) == (old_len - 1)
459
- print('[removed last message]\n')
460
-
323
+ if len(self.messages) > 1: # Prevent deleting the system message
324
+ old_len = len(self.messages)
325
+ del self.messages[-1]
326
+ assert len(self.messages) == (old_len - 1)
327
+ print('[Removed last message]\n')
328
+ else:
329
+ print('[Cannot remove system message]\n')
461
330
  elif command.lower() in ['last', 'repeat']:
462
- last_msg = self.messages[-1]
463
- if last_msg['role'] == 'user':
464
- print(f"\n{_user_style}{last_msg['content']}{RESET_ALL}\n")
465
- elif last_msg['role'] == 'bot':
466
- print(f"\n{_bot_style}{last_msg['content']}{RESET_ALL}\n")
467
-
331
+ if len(self.messages) > 1:
332
+ last_msg = self.messages[-1]
333
+ if last_msg['role'] == 'user':
334
+ print(f"\n{_user_style}{last_msg['content']}{RESET_ALL}\n")
335
+ elif last_msg['role'] == 'bot':
336
+ print(f"\n{_bot_style}{last_msg['content']}{RESET_ALL}\n")
337
+ else:
338
+ print("\n[No previous message]\n")
468
339
  elif command.lower() in ['inf', 'inference', 'inf_str']:
469
340
  print(f'\n"""{self.inference_str_from_messages()}"""\n')
470
-
471
341
  elif command.lower() in ['reroll', 're-roll', 're', 'swipe']:
472
- old_len = len(self.messages)
473
- del self.messages[-1]
474
- assert len(self.messages) == (old_len - 1)
475
- return '', None
476
-
342
+ if len(self.messages) > 1:
343
+ old_len = len(self.messages)
344
+ del self.messages[-1]
345
+ assert len(self.messages) == (old_len - 1)
346
+ return '', None
347
+ else:
348
+ print("\n[Cannot reroll system message]\n")
477
349
  elif command.lower() in ['exit', 'quit']:
478
350
  print(RESET_ALL)
479
351
  return None, None
480
-
481
352
  elif command.lower() in ['help', '/?', '?']:
482
- print()
483
- print('reset | restart -- Reset the thread to its original state')
484
- print('clear | cls -- Clear the terminal')
485
- print('context | ctx -- Get the context usage in tokens')
486
- print('print_stats | stats -- Get the context usage stats')
487
- print('sampler | settings -- Update the sampler settings')
488
- print('string | str -- Print the message history as a string')
489
- print('repr | save -- Print the representation of the thread')
490
- print('remove | delete -- Remove the last message')
491
- print('last | repeat -- Repeat the last message')
492
- print('inference | inf -- Print the inference string')
493
- print('reroll | swipe -- Regenerate the last message')
494
- print('exit | quit -- Exit the interactive chat (can also use ^C)')
495
- print('help | ? -- Show this screen')
496
- print()
497
- print("TIP: type < at the prompt and press ENTER to prefix the bot's next message.")
498
- print(' for example, type "Sure!" to bypass refusals')
499
- print()
500
- print("TIP: type !! at the prompt and press ENTER to insert a system message")
501
- print()
502
-
353
+ print(
354
+ "\n"
355
+ "reset | restart -- Reset the thread to its original state\n"
356
+ "clear | cls -- Clear the terminal\n"
357
+ "context | ctx -- Get the context usage in tokens\n"
358
+ "print_stats | stats -- Get the context usage stats\n"
359
+ "sampler | settings -- Update the sampler settings\n"
360
+ "string | str -- Print the message history as a string\n"
361
+ "repr | save -- Print the representation of the thread\n"
362
+ "remove | delete -- Remove the last message\n"
363
+ "last | repeat -- Repeat the last message\n"
364
+ "inference | inf -- Print the inference string\n"
365
+ "reroll | swipe -- Regenerate the last message\n"
366
+ "exit | quit -- Exit the interactive chat (can also use ^C)\n"
367
+ "help | ? -- Show this screen\n"
368
+ "\n"
369
+ "TIP: Type '<' at the prompt and press ENTER to prefix the bot's next message.\n"
370
+ " For example, type 'Sure!' to bypass refusals\n"
371
+ "\n"
372
+ "TIP: Type '!!' at the prompt and press ENTER to insert a system message\n"
373
+ "\n"
374
+ )
503
375
  else:
504
- print(f'\n[unknown command]\n')
505
-
506
- # prefix the bot's next message
376
+ print('\n[Unknown command]\n')
507
377
  elif user_input == '<':
508
-
509
378
  print()
510
379
  try:
511
380
  next_message_start = input(f'{RESET_ALL} < {_dim_style}')
512
-
513
381
  except KeyboardInterrupt:
514
382
  print(f'{RESET_ALL}\n')
515
383
  continue
516
-
517
384
  else:
518
385
  print()
519
386
  return '', next_message_start
520
-
521
- # insert a system message
522
387
  elif user_input == '!!':
523
388
  print()
524
-
525
389
  try:
526
390
  next_sys_msg = input(f'{RESET_ALL} !! {_special_style}')
527
-
528
391
  except KeyboardInterrupt:
529
392
  print(f'{RESET_ALL}\n')
530
393
  continue
531
-
532
394
  else:
533
395
  print()
534
- return next_sys_msg, -1
535
-
536
- # concatenate multi-line input
396
+ return next_sys_msg, '-1'
537
397
  else:
538
398
  full_user_input += user_input
539
399
  return full_user_input, None
540
400
 
541
-
542
401
  def interact(
543
402
  self,
544
403
  color: bool = True,
@@ -546,71 +405,46 @@ class Thread:
546
405
  stream: bool = True
547
406
  ) -> None:
548
407
  """
549
- Start an interactive chat session using this Thread.
550
-
551
- While text is being generated, press `^C` to interrupt the bot.
552
- Then you have the option to press `ENTER` to re-roll, or to simply type
553
- another message.
554
-
555
- At the prompt, press `^C` to end the chat session.
556
-
557
- Type `!` and press `ENTER` to enter a basic command prompt. For a list
558
- of commands, type `help` at this prompt.
559
-
560
- Type `<` and press `ENTER` to prefix the bot's next message, for
561
- example with `Sure!`.
408
+ Starts an interactive chat session.
562
409
 
563
- Type `!!` at the prompt and press `ENTER` to insert a system message.
410
+ Allows for real-time interaction with the model, including
411
+ interrupting generation, regenerating responses, and using
412
+ commands.
564
413
 
565
- The following parameters are optional:
566
- - color: Whether to use colored text to differentiate user / bot
567
- - header: Header text to print at the start of the interaction
568
- - stream: Whether to stream text as it is generated
414
+ Args:
415
+ color (bool, optional): Whether to use colored output. Defaults to True.
416
+ header (Optional[str], optional): Header text to display. Defaults to None.
417
+ stream (bool, optional): Whether to stream the response. Defaults to True.
569
418
  """
570
419
  print()
571
-
572
- # fresh import of color codes in case `color` param has changed
573
420
  from .utils import SPECIAL_STYLE, USER_STYLE, BOT_STYLE, DIM_STYLE
574
-
575
- # disable color codes if explicitly disabled by `color` param
576
421
  if not color:
577
- SPECIAL_STYLE = ''
578
- USER_STYLE = ''
579
- BOT_STYLE = ''
580
- DIM_STYLE = ''
422
+ SPECIAL_STYLE = USER_STYLE = BOT_STYLE = DIM_STYLE = ''
581
423
 
582
424
  if header is not None:
583
425
  print(f"{SPECIAL_STYLE}{header}{RESET_ALL}\n")
584
-
585
- while True:
586
426
 
427
+ while True:
587
428
  prompt = f"{RESET_ALL} > {USER_STYLE}"
588
-
589
429
  try:
590
430
  user_prompt, next_message_start = self._interactive_input(
591
- prompt,
592
- DIM_STYLE,
593
- USER_STYLE,
594
- BOT_STYLE,
595
- SPECIAL_STYLE
431
+ prompt, DIM_STYLE, USER_STYLE, BOT_STYLE, SPECIAL_STYLE
596
432
  )
597
433
  except KeyboardInterrupt:
598
434
  print(f"{RESET_ALL}\n")
599
435
  return
600
436
 
601
- # got 'exit' or 'quit' command
602
437
  if user_prompt is None and next_message_start is None:
603
438
  break
604
-
605
- # insert a system message via `!!` prompt
606
- if next_message_start == -1:
439
+
440
+ if next_message_start == '-1':
607
441
  self.add_message('system', user_prompt)
608
442
  continue
609
-
443
+
610
444
  if next_message_start is not None:
611
445
  try:
446
+ print(f"{BOT_STYLE}{next_message_start}", end='', flush=True)
612
447
  if stream:
613
- print(f"{BOT_STYLE}{next_message_start}", end='', flush=True)
614
448
  output = next_message_start + self.model.stream_print(
615
449
  self.inference_str_from_messages() + next_message_start,
616
450
  stops=self.format['stops'],
@@ -618,7 +452,6 @@ class Thread:
618
452
  end=''
619
453
  )
620
454
  else:
621
- print(f"{BOT_STYLE}", end='', flush=True)
622
455
  output = next_message_start + self.model.generate(
623
456
  self.inference_str_from_messages() + next_message_start,
624
457
  stops=self.format['stops'],
@@ -626,12 +459,15 @@ class Thread:
626
459
  )
627
460
  print(output, end='', flush=True)
628
461
  except KeyboardInterrupt:
629
- print(f"{DIM_STYLE} [message not added to history; press ENTER to re-roll]\n")
462
+ print(
463
+ f"{DIM_STYLE} [Message not added to history; "
464
+ "press ENTER to re-roll]\n"
465
+ )
630
466
  continue
631
467
  else:
632
468
  self.add_message("bot", output)
633
469
  else:
634
- print(BOT_STYLE)
470
+ print(BOT_STYLE, end='')
635
471
  if user_prompt != "":
636
472
  self.add_message("user", user_prompt)
637
473
  try:
@@ -650,49 +486,54 @@ class Thread:
650
486
  )
651
487
  print(output, end='', flush=True)
652
488
  except KeyboardInterrupt:
653
- print(f"{DIM_STYLE} [message not added to history; press ENTER to re-roll]\n")
489
+ print(
490
+ f"{DIM_STYLE} [Message not added to history; "
491
+ "press ENTER to re-roll]\n"
492
+ )
654
493
  continue
655
494
  else:
656
495
  self.add_message("bot", output)
657
496
 
658
497
  if output.endswith("\n\n"):
659
- print(RESET_ALL, end = '', flush=True)
498
+ print(RESET_ALL, end='', flush=True)
660
499
  elif output.endswith("\n"):
661
500
  print(RESET_ALL)
662
501
  else:
663
502
  print(f"{RESET_ALL}\n")
664
503
 
665
-
666
504
  def reset(self) -> None:
667
- """
668
- Clear the list of messages, which resets the thread to its original
669
- state
670
- """
505
+ """Clears the message history, resetting the thread to its initial state."""
671
506
  self.messages: list[Message] = [
672
- self.create_message("system", self.format['system_content'])
507
+ self.create_message("system", self.format['system_prompt'])
673
508
  ] if self._messages is None else self._messages
674
-
675
-
509
+
676
510
  def as_string(self) -> str:
677
- """Return this thread's message history as a string"""
678
- thread_string = ''
679
- for msg in self.messages:
680
- thread_string += msg.as_string()
681
- return thread_string
511
+ """Returns the thread's message history as a string."""
512
+ return ''.join(msg.as_string() for msg in self.messages)
682
513
 
683
-
684
514
  def print_stats(
685
515
  self,
686
516
  end: str = '\n',
687
- file: _SupportsWriteAndFlush = sys.stdout,
517
+ file: TextIO = sys.stdout,
688
518
  flush: bool = True
689
519
  ) -> None:
690
- """Print stats about the context usage in this thread"""
520
+ """Prints context usage statistics."""
691
521
  thread_len_tokens = self.len_messages()
692
522
  max_ctx_len = self.model.context_length
693
- context_used_percentage = round((thread_len_tokens/max_ctx_len)*100)
694
- print(f"{thread_len_tokens} / {max_ctx_len} tokens", file=file, flush=flush)
695
- print(f"{context_used_percentage}% of context used", file=file, flush=flush)
696
- print(f"{len(self.messages)} messages", end=end, file=file, flush=flush)
523
+ context_used_percentage = round((thread_len_tokens / max_ctx_len) * 100)
524
+ print(
525
+ f"{thread_len_tokens} / {max_ctx_len} tokens "
526
+ f"({context_used_percentage}% of context used), "
527
+ f"{len(self.messages)} messages",
528
+ end=end, file=file, flush=flush
529
+ )
697
530
  if not flush:
698
- file.flush()
531
+ file.flush()
532
+
533
+ def warmup(self):
534
+ """
535
+ Warms up the model by running a simple generation.
536
+ """
537
+ if self.model.verbose:
538
+ print_verbose("Warming up the model...")
539
+ self.model.generate("This is a warm-up prompt.")