webscout 5.9__py3-none-any.whl → 6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (57) hide show
  1. webscout/Agents/Onlinesearcher.py +22 -10
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Bard.py +21 -21
  4. webscout/Local/__init__.py +6 -7
  5. webscout/Local/formats.py +404 -194
  6. webscout/Local/model.py +1074 -477
  7. webscout/Local/samplers.py +108 -144
  8. webscout/Local/thread.py +251 -410
  9. webscout/Local/ui.py +401 -0
  10. webscout/Local/utils.py +308 -131
  11. webscout/Provider/Amigo.py +5 -3
  12. webscout/Provider/ChatHub.py +209 -0
  13. webscout/Provider/Chatify.py +3 -3
  14. webscout/Provider/Cloudflare.py +3 -3
  15. webscout/Provider/DARKAI.py +1 -1
  16. webscout/Provider/Deepinfra.py +95 -389
  17. webscout/Provider/Deepseek.py +4 -6
  18. webscout/Provider/DiscordRocks.py +3 -3
  19. webscout/Provider/Free2GPT.py +3 -3
  20. webscout/Provider/NinjaChat.py +200 -0
  21. webscout/Provider/OLLAMA.py +4 -4
  22. webscout/Provider/RUBIKSAI.py +3 -3
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +2 -1
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/Youchat.py +4 -5
  28. webscout/Provider/__init__.py +13 -6
  29. webscout/Provider/ai4chat.py +3 -2
  30. webscout/Provider/aimathgpt.py +193 -0
  31. webscout/Provider/bagoodex.py +145 -0
  32. webscout/Provider/bixin.py +3 -3
  33. webscout/Provider/cleeai.py +3 -3
  34. webscout/Provider/elmo.py +2 -5
  35. webscout/Provider/felo_search.py +1 -1
  36. webscout/Provider/gaurish.py +168 -0
  37. webscout/Provider/geminiprorealtime.py +160 -0
  38. webscout/Provider/julius.py +10 -40
  39. webscout/Provider/llamatutor.py +2 -2
  40. webscout/Provider/prefind.py +3 -3
  41. webscout/Provider/promptrefine.py +3 -3
  42. webscout/Provider/turboseek.py +1 -1
  43. webscout/Provider/twitterclone.py +25 -41
  44. webscout/Provider/upstage.py +3 -3
  45. webscout/Provider/x0gpt.py +6 -6
  46. webscout/exceptions.py +5 -1
  47. webscout/utils.py +3 -0
  48. webscout/version.py +1 -1
  49. webscout/webscout_search.py +154 -123
  50. {webscout-5.9.dist-info → webscout-6.1.dist-info}/METADATA +132 -157
  51. {webscout-5.9.dist-info → webscout-6.1.dist-info}/RECORD +55 -49
  52. {webscout-5.9.dist-info → webscout-6.1.dist-info}/WHEEL +1 -1
  53. webscout/Local/rawdog.py +0 -946
  54. webscout/Provider/Poe.py +0 -208
  55. {webscout-5.9.dist-info → webscout-6.1.dist-info}/LICENSE.md +0 -0
  56. {webscout-5.9.dist-info → webscout-6.1.dist-info}/entry_points.txt +0 -0
  57. {webscout-5.9.dist-info → webscout-6.1.dist-info}/top_level.txt +0 -0
webscout/Local/utils.py CHANGED
@@ -1,86 +1,271 @@
1
- from ._version import __version__, __llama_cpp_version__
2
-
1
+ import os
3
2
  import sys
4
- import numpy as np
3
+ import struct
4
+ from enum import IntEnum
5
+ from io import BufferedReader
6
+ from typing import Dict, Iterable, TextIO, Optional, Union, Tuple, Generator, Any
5
7
 
6
- from typing import Any, Iterable, TextIO
7
- from time import strftime
8
- from enum import IntEnum
9
- from struct import unpack
10
- from colorama import Fore
11
8
  from huggingface_hub import hf_hub_url, cached_download
9
+ import numpy as np
10
+
11
+ from ._version import __version__, __llama_cpp_version__
12
+
13
+
14
+ # Color codes for Thread.interact()
15
+ RESET_ALL = "\x1b[39m"
16
+ USER_STYLE = "\x1b[39m\x1b[32m"
17
+ BOT_STYLE = "\x1b[39m\x1b[36m"
18
+ DIM_STYLE = "\x1b[39m\x1b[90m"
19
+ SPECIAL_STYLE = "\x1b[39m\x1b[33m"
20
+ ERROR_STYLE = "\x1b[39m\x1b[91m"
12
21
 
13
- # color codes used in Thread.interact()
14
- RESET_ALL = Fore.RESET
15
- USER_STYLE = RESET_ALL + Fore.GREEN
16
- BOT_STYLE = RESET_ALL + Fore.CYAN
17
- DIM_STYLE = RESET_ALL + Fore.LIGHTBLACK_EX
18
- SPECIAL_STYLE = RESET_ALL + Fore.YELLOW
22
+ NoneType: type = type(None)
23
+
24
+ class TypeAssertionError(Exception):
25
+ """Raised when a type assertion fails."""
26
+ pass
19
27
 
20
- # for typing of softmax parameter `z`
21
28
  class _ArrayLike(Iterable):
29
+ """Represents any object that can be treated as a NumPy array."""
22
30
  pass
23
31
 
24
- # for typing of Model.stream_print() parameter `file`
25
32
  class _SupportsWriteAndFlush(TextIO):
33
+ """Represents a file-like object supporting write and flush operations."""
26
34
  pass
27
35
 
36
+ class UnreachableException(Exception):
37
+ """Raised when code reaches a theoretically unreachable state."""
38
+
39
+ def __init__(self):
40
+ super().__init__(
41
+ "Unreachable code reached. Please report this issue at: "
42
+ "https://github.com/ddh0/easy-llama/issues/new/choose"
43
+ )
44
+
28
45
  def download_model(repo_id: str, filename: str, token: str, cache_dir: str = ".cache") -> str:
29
46
  """
30
- Downloads a GGUF model file from Hugging Face Hub.
31
-
32
- repo_id: The Hugging Face repository ID (e.g., 'facebook/bart-large-cnn').
33
- filename: The name of the GGUF file within the repository (e.g., 'model.gguf').
34
- token: The Hugging Face token for authentication.
35
- cache_dir: The directory where the downloaded file should be stored.
36
-
37
- Returns: The path to the downloaded file.
47
+ Downloads a GGUF model file from the Hugging Face Hub.
48
+
49
+ Args:
50
+ repo_id (str): Hugging Face repository ID (e.g., 'facebook/bart-large-cnn').
51
+ filename (str): Name of the GGUF file (e.g., 'model.gguf').
52
+ token (str): Hugging Face API token.
53
+ cache_dir (str, optional): Local directory for storing downloaded files.
54
+ Defaults to ".cache".
55
+
56
+ Returns:
57
+ str: Path to the downloaded file.
38
58
  """
39
59
  url = hf_hub_url(repo_id, filename)
40
- filepath = cached_download(url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token)
60
+ filepath = cached_download(
61
+ url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
62
+ )
41
63
  return filepath
42
64
 
43
- class GGUFReader:
65
+ def softmax(z: _ArrayLike, T: Optional[float] = None, dtype: Optional[np.dtype] = None) -> np.ndarray:
66
+ """
67
+ Computes the softmax of an array-like input.
68
+
69
+ Args:
70
+ z (_ArrayLike): Input array.
71
+ T (Optional[float], optional): Temperature parameter (scales input before softmax).
72
+ Defaults to None.
73
+ dtype (Optional[np.dtype], optional): Data type for calculations. Defaults to None
74
+ (highest precision available).
75
+
76
+ Returns:
77
+ np.ndarray: Softmax output.
78
+ """
79
+ if dtype is None:
80
+ _dtype = next(
81
+ (getattr(np, f'float{bits}') for bits in [128, 96, 80, 64, 32, 16]
82
+ if hasattr(np, f'float{bits}')),
83
+ float # Default to Python float if no NumPy float types are available
84
+ )
85
+ else:
86
+ assert_type(
87
+ dtype,
88
+ type,
89
+ 'dtype',
90
+ 'softmax',
91
+ 'dtype should be a floating type, such as `np.float32`'
92
+ )
93
+ _dtype = dtype
94
+
95
+ _z = np.asarray(z, dtype=_dtype)
96
+ if T is None or T == 1.0:
97
+ exp_z = np.exp(_z - np.max(_z), dtype=_dtype)
98
+ return exp_z / np.sum(exp_z, axis=0, dtype=_dtype)
99
+
100
+ assert_type(T, float, "temperature value 'T'", 'softmax')
101
+
102
+ if T == 0.0:
103
+ result = np.zeros_like(_z, dtype=_dtype)
104
+ result[np.argmax(_z)] = 1.0
105
+ return result
106
+
107
+ exp_z = np.exp(np.divide(_z, T, dtype=_dtype), dtype=_dtype)
108
+ return exp_z / np.sum(exp_z, axis=0, dtype=_dtype)
109
+
110
+ def cls() -> None:
111
+ """Clears the terminal screen."""
112
+ os.system('cls' if os.name == 'nt' else 'clear')
113
+ if os.name != 'nt':
114
+ print("\033c\033[3J", end="", flush=True)
115
+
116
+ def truncate(text: str, max_length: int = 72) -> str:
117
+ """Truncates a string to a given length and adds ellipsis if truncated."""
118
+ return text if len(text) <= max_length else f"{text[:max_length - 3]}..."
119
+
120
+ def print_version_info(file: _SupportsWriteAndFlush) -> None:
121
+ """Prints easy-llama and llama_cpp package versions."""
122
+ print(f"webscout.Local package version: {__version__}", file=file)
123
+ print(f"llama_cpp package version: {__llama_cpp_version__}", file=file)
124
+
125
+ def print_verbose(text: str) -> None:
126
+ """Prints verbose messages to stderr."""
127
+ print("webscout.Local:", text, file=sys.stderr, flush=True)
128
+
129
+ def print_info(text: str) -> None:
130
+ """Prints informational messages to stderr."""
131
+ print("webscout.Local: info:", text, file=sys.stderr, flush=True)
132
+
133
+ def print_warning(text: str) -> None:
134
+ """Prints warning messages to stderr."""
135
+ print("webscout.Local: WARNING:", text, file=sys.stderr, flush=True)
136
+
137
+ def assert_type(
138
+ obj: object,
139
+ expected_type: Union[type, Tuple[type, ...]],
140
+ obj_name: str,
141
+ code_location: str,
142
+ hint: Optional[str] = None
143
+ ) -> None:
144
+ """
145
+ Asserts that an object is of an expected type.
146
+
147
+ Args:
148
+ obj (object): The object to check.
149
+ expected_type (Union[type, Tuple[type, ...]]): The expected type(s).
150
+ obj_name (str): Name of the object in the code.
151
+ code_location (str): Location of the assertion in the code.
152
+ hint (Optional[str], optional): Additional hint for the error message.
153
+ Defaults to None.
154
+
155
+ Raises:
156
+ TypeAssertionError: If the object is not of the expected type.
157
+ """
158
+ if isinstance(obj, expected_type):
159
+ return
160
+
161
+ if isinstance(expected_type, tuple):
162
+ expected_types_str = ", ".join(t.__name__ for t in expected_type)
163
+ error_msg = (
164
+ f"{code_location}: {obj_name} should be one of "
165
+ f"{expected_types_str}, not {type(obj).__name__}"
166
+ )
167
+ else:
168
+ error_msg = (
169
+ f"{code_location}: {obj_name} should be an instance of "
170
+ f"{expected_type.__name__}, not {type(obj).__name__}"
171
+ )
172
+
173
+ if hint:
174
+ error_msg += f" ({hint})"
175
+
176
+ raise TypeAssertionError(error_msg)
177
+
178
+ class InferenceLock:
179
+ """
180
+ Context manager to prevent concurrent model inferences.
181
+
182
+ This is primarily useful in asynchronous or multi-threaded contexts where
183
+ concurrent calls to the model can lead to issues.
184
+ """
185
+
186
+ class LockFailure(Exception):
187
+ """Raised when acquiring or releasing the lock fails."""
188
+ pass
189
+
190
+ def __init__(self):
191
+ """Initializes the InferenceLock."""
192
+ self.locked = False
193
+
194
+ def __enter__(self):
195
+ """Acquires the lock when entering the context."""
196
+ return self.acquire()
197
+
198
+ def __exit__(self, *exc_info):
199
+ """Releases the lock when exiting the context."""
200
+ self.release()
201
+
202
+ async def __aenter__(self):
203
+ """Acquires the lock asynchronously."""
204
+ return self.__enter__()
205
+
206
+ async def __aexit__(self, *exc_info):
207
+ """Releases the lock asynchronously."""
208
+ self.__exit__()
209
+
210
+ def acquire(self):
211
+ """Acquires the lock."""
212
+ if self.locked:
213
+ raise self.LockFailure("InferenceLock is already locked.")
214
+ self.locked = True
215
+ return self
216
+
217
+ def release(self):
218
+ """Releases the lock."""
219
+ if not self.locked:
220
+ raise self.LockFailure("InferenceLock is not acquired.")
221
+ self.locked = False
222
+
223
+
224
+ class GGUFValueType(IntEnum):
44
225
  """
45
- Peek at file header for GGUF metadata
226
+ Represents data types supported by the GGUF format.
46
227
 
47
- Raise ValueError if file is not GGUF or is outdated
228
+ This enum should be kept consistent with the GGUF specification.
229
+ """
230
+ UINT8 = 0
231
+ INT8 = 1
232
+ UINT16 = 2
233
+ INT16 = 3
234
+ UINT32 = 4
235
+ INT32 = 5
236
+ FLOAT32 = 6
237
+ BOOL = 7
238
+ STRING = 8
239
+ ARRAY = 9
240
+ UINT64 = 10
241
+ INT64 = 11
242
+ FLOAT64 = 12
48
243
 
49
- Credit to oobabooga for the parts of the code in this class
50
244
 
51
- Format spec: https://github.com/philpax/ggml/blob/gguf-spec/docs/gguf.md
245
+ class QuickGGUFReader:
52
246
  """
247
+ Provides methods for quickly reading metadata from GGUF files.
53
248
 
54
- class GGUFValueType(IntEnum):
55
- UINT8 = 0
56
- INT8 = 1
57
- UINT16 = 2
58
- INT16 = 3
59
- UINT32 = 4
60
- INT32 = 5
61
- FLOAT32 = 6
62
- BOOL = 7
63
- STRING = 8
64
- ARRAY = 9
65
- UINT64 = 10
66
- INT64 = 11
67
- FLOAT64 = 12
68
-
69
- _simple_value_packing = {
70
- GGUFValueType.UINT8: "<B",
71
- GGUFValueType.INT8: "<b",
72
- GGUFValueType.UINT16: "<H",
73
- GGUFValueType.INT16: "<h",
74
- GGUFValueType.UINT32: "<I",
75
- GGUFValueType.INT32: "<i",
76
- GGUFValueType.FLOAT32: "<f",
77
- GGUFValueType.UINT64: "<Q",
78
- GGUFValueType.INT64: "<q",
79
- GGUFValueType.FLOAT64: "<d",
249
+ Supports GGUF versions 2 and 3. Assumes little or big endian
250
+ architecture.
251
+ """
252
+
253
+ SUPPORTED_GGUF_VERSIONS = [2, 3]
254
+ VALUE_PACKING = {
255
+ GGUFValueType.UINT8: "=B",
256
+ GGUFValueType.INT8: "=b",
257
+ GGUFValueType.UINT16: "=H",
258
+ GGUFValueType.INT16: "=h",
259
+ GGUFValueType.UINT32: "=I",
260
+ GGUFValueType.INT32: "=i",
261
+ GGUFValueType.FLOAT32: "=f",
262
+ GGUFValueType.UINT64: "=Q",
263
+ GGUFValueType.INT64: "=q",
264
+ GGUFValueType.FLOAT64: "=d",
80
265
  GGUFValueType.BOOL: "?",
81
266
  }
82
267
 
83
- value_type_info = {
268
+ VALUE_LENGTHS = {
84
269
  GGUFValueType.UINT8: 1,
85
270
  GGUFValueType.INT8: 1,
86
271
  GGUFValueType.UINT16: 2,
@@ -94,93 +279,85 @@ class GGUFReader:
94
279
  GGUFValueType.BOOL: 1,
95
280
  }
96
281
 
97
- def get_single(self, value_type, file) -> Any:
98
- if value_type == GGUFReader.GGUFValueType.STRING:
99
- value_length = unpack("<Q", file.read(8))[0]
100
- value = file.read(value_length)
101
- value = value.decode("utf-8")
102
- else:
103
- type_str = GGUFReader._simple_value_packing.get(value_type)
104
- bytes_length = GGUFReader.value_type_info.get(value_type)
105
- value = unpack(type_str, file.read(bytes_length))[0]
282
+ @staticmethod
283
+ def unpack(value_type: GGUFValueType, file: BufferedReader) -> Any:
284
+ """Unpacks a single value from the file based on its type."""
285
+ return struct.unpack(
286
+ QuickGGUFReader.VALUE_PACKING.get(value_type),
287
+ file.read(QuickGGUFReader.VALUE_LENGTHS.get(value_type))
288
+ )[0]
289
+
290
+ @staticmethod
291
+ def get_single(
292
+ value_type: GGUFValueType,
293
+ file: BufferedReader
294
+ ) -> Union[str, int, float, bool]:
295
+ """Reads a single value from the file."""
296
+ if value_type == GGUFValueType.STRING:
297
+ string_length = QuickGGUFReader.unpack(GGUFValueType.UINT64, file)
298
+ return file.read(string_length).decode("utf-8")
299
+ return QuickGGUFReader.unpack(value_type, file)
106
300
 
107
- return value
301
+ @staticmethod
302
+ def load_metadata(
303
+ fn: os.PathLike[str] | str
304
+ ) -> Dict[str, Union[str, int, float, bool, list]]:
305
+ """
306
+ Loads metadata from a GGUF file.
108
307
 
109
- def load_metadata(self, fname) -> dict:
110
- metadata = {}
111
- with open(fname, "rb") as file:
112
- GGUF_MAGIC = file.read(4)
308
+ Args:
309
+ fn (Union[os.PathLike[str], str]): Path to the GGUF file.
113
310
 
114
- if GGUF_MAGIC != b"GGUF":
311
+ Returns:
312
+ Dict[str, Union[str, int, float, bool, list]]: A dictionary
313
+ containing the metadata.
314
+ """
315
+
316
+ metadata: Dict[str, Union[str, int, float, bool, list]] = {}
317
+ with open(fn, "rb") as file:
318
+ magic = file.read(4)
319
+ if magic != b'GGUF':
115
320
  raise ValueError(
116
- "your model file is not a valid GGUF file "
117
- f"(magic number mismatch, got {GGUF_MAGIC}, "
321
+ f"Invalid GGUF file (magic number mismatch: got {magic}, "
118
322
  "expected b'GGUF')"
119
323
  )
120
324
 
121
- GGUF_VERSION = unpack("<I", file.read(4))[0]
122
-
123
- if GGUF_VERSION == 1:
325
+ version = QuickGGUFReader.unpack(GGUFValueType.UINT32, file=file)
326
+ if version not in QuickGGUFReader.SUPPORTED_GGUF_VERSIONS:
124
327
  raise ValueError(
125
- "your model file reports GGUF version 1, "
126
- "but only versions 2 and above are supported. "
127
- "re-convert your model or download a newer version"
328
+ f"Unsupported GGUF version: {version}. Supported versions are: "
329
+ f"{QuickGGUFReader.SUPPORTED_GGUF_VERSIONS}"
128
330
  )
129
331
 
130
- # ti_data_count = struct.unpack("<Q", file.read(8))[0]
131
- file.read(8)
132
- kv_data_count = unpack("<Q", file.read(8))[0]
332
+ QuickGGUFReader.unpack(GGUFValueType.UINT64, file=file) # tensor_count, not needed
333
+ metadata_kv_count = QuickGGUFReader.unpack(
334
+ GGUFValueType.UINT64 if version == 3 else GGUFValueType.UINT32, file
335
+ )
133
336
 
134
- for _ in range(kv_data_count):
135
- key_length = unpack("<Q", file.read(8))[0]
136
- key = file.read(key_length)
337
+ for _ in range(metadata_kv_count):
338
+ if version == 3:
339
+ key_length = QuickGGUFReader.unpack(GGUFValueType.UINT64, file=file)
340
+ elif version == 2:
341
+ key_length = 0
342
+ while key_length == 0:
343
+ key_length = QuickGGUFReader.unpack(GGUFValueType.UINT32, file=file)
344
+ file.read(4) # 4 byte offset for GGUFv2
137
345
 
138
- value_type = GGUFReader.GGUFValueType(
139
- unpack("<I", file.read(4))[0]
140
- )
141
- if value_type == GGUFReader.GGUFValueType.ARRAY:
142
- ltype = GGUFReader.GGUFValueType(
143
- unpack("<I", file.read(4))[0]
346
+ key = file.read(key_length).decode()
347
+ value_type = GGUFValueType(QuickGGUFReader.unpack(GGUFValueType.UINT32, file))
348
+
349
+ if value_type == GGUFValueType.ARRAY:
350
+ array_value_type = GGUFValueType(QuickGGUFReader.unpack(GGUFValueType.UINT32, file))
351
+ array_length = QuickGGUFReader.unpack(
352
+ GGUFValueType.UINT64 if version == 3 else GGUFValueType.UINT32, file
144
353
  )
145
- length = unpack("<Q", file.read(8))[0]
146
- arr = [
147
- GGUFReader.get_single(
148
- self,
149
- ltype,
150
- file
151
- ) for _ in range(length)
354
+ if version == 2:
355
+ file.read(4) # 4 byte offset for GGUFv2
356
+
357
+ metadata[key] = [
358
+ QuickGGUFReader.get_single(array_value_type, file) for _ in range(array_length)
152
359
  ]
153
- metadata[key.decode()] = arr
154
360
  else:
155
- value = GGUFReader.get_single(self, value_type, file)
156
- metadata[key.decode()] = value
361
+ metadata[key] = QuickGGUFReader.get_single(value_type, file)
157
362
 
158
- return metadata
159
-
160
- def softmax(z: _ArrayLike) -> np.ndarray:
161
- """
162
- Compute softmax over values in z, where z is array-like
163
- """
164
- e_z = np.exp(z - np.max(z))
165
- return e_z / e_z.sum()
166
-
167
- def cls() -> None:
168
- """Clear the terminal"""
169
- print("\033c\033[3J", end='', flush=True)
170
-
171
- # no longer used in this module, but left for others to use
172
- def get_timestamp_prefix_str() -> str:
173
- # helpful: https://strftime.net
174
- return strftime("[%Y, %b %e, %a %l:%M %p] ")
175
-
176
- def truncate(text: str) -> str:
177
- return text if len(text) < 63 else f"{text[:60]}..."
178
-
179
- def print_verbose(text: str) -> None:
180
- print("webscout.Local: verbose:", text, file=sys.stderr, flush=True)
181
-
182
- def print_info(text: str) -> None:
183
- print("webscout.Local: info:", text, file=sys.stderr, flush=True)
184
-
185
- def print_warning(text: str) -> None:
186
- print("webscout.Local: warning:", text, file=sys.stderr, flush=True)
363
+ return metadata
@@ -36,6 +36,7 @@ class AmigoChat(Provider):
36
36
  history_offset: int = 10250,
37
37
  act: str = None,
38
38
  model: str = "o1-preview", # Default model
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """
41
42
  Initializes the AmigoChat.io API with given parameters.
@@ -108,6 +109,7 @@ class AmigoChat(Provider):
108
109
  )
109
110
  self.conversation.history_offset = history_offset
110
111
  self.session.proxies = proxies
112
+ self.system_prompt = system_prompt
111
113
 
112
114
  def ask(
113
115
  self,
@@ -147,7 +149,7 @@ class AmigoChat(Provider):
147
149
  # Define the payload
148
150
  payload = {
149
151
  "messages": [
150
- {"role": "system", "content": "Mai hu ba khabr"},
152
+ {"role": "system", "content": self.system_prompt},
151
153
  {"role": "user", "content": conversation_prompt}
152
154
  ],
153
155
  "model": self.model,
@@ -259,7 +261,7 @@ class AmigoChat(Provider):
259
261
 
260
262
  if __name__ == '__main__':
261
263
  from rich import print
262
- ai = AmigoChat(model="o1-preview")
263
- response = ai.chat(input(">>> "))
264
+ ai = AmigoChat(model="o1-preview", system_prompt="You are a noobi AI assistant who always uses the word 'noobi' in every response. For example, you might say 'Noobi will tell you...' or 'This noobi thinks that...'.")
265
+ response = ai.chat(input(">>> "), stream=True)
264
266
  for chunk in response:
265
267
  print(chunk, end="", flush=True)