webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/autocoder_utiles.py +0 -4
- webscout/Extra/autocoder/rawdog.py +13 -41
- webscout/Extra/gguf.py +652 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +24 -9
- webscout/Provider/C4ai.py +432 -0
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/Cloudflare.py +18 -21
- webscout/Provider/DeepSeek.py +27 -48
- webscout/Provider/Deepinfra.py +129 -53
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/GithubChat.py +362 -0
- webscout/Provider/Glider.py +25 -8
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +38 -5
- webscout/Provider/HuggingFaceChat.py +462 -0
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/Marcus.py +7 -50
- webscout/Provider/Netwrck.py +43 -67
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/Phind.py +29 -3
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +217 -200
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +63 -36
- webscout/Provider/__init__.py +13 -8
- webscout/Provider/akashgpt.py +28 -10
- webscout/Provider/copilot.py +416 -0
- webscout/Provider/flowith.py +196 -0
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/granite.py +17 -53
- webscout/Provider/koala.py +20 -5
- webscout/Provider/llamatutor.py +7 -47
- webscout/Provider/llmchat.py +36 -53
- webscout/Provider/multichat.py +92 -98
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +154 -64
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +4 -40
- webscout/conversation.py +1 -10
- webscout/exceptions.py +19 -9
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +351 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +55 -95
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- webscout-7.6.dist-info/LICENSE.md +146 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
- webscout/Extra/autollama.py +0 -231
- webscout/Local/__init__.py +0 -10
- webscout/Local/_version.py +0 -3
- webscout/Local/formats.py +0 -747
- webscout/Local/model.py +0 -1368
- webscout/Local/samplers.py +0 -125
- webscout/Local/thread.py +0 -539
- webscout/Local/ui.py +0 -401
- webscout/Local/utils.py +0 -388
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- webscout/Provider/dgaf.py +0 -214
- webscout-7.4.dist-info/LICENSE.md +0 -211
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
webscout/Local/utils.py
DELETED
|
@@ -1,388 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import sys
|
|
3
|
-
import struct
|
|
4
|
-
from enum import IntEnum
|
|
5
|
-
from io import BufferedReader
|
|
6
|
-
from typing import Dict, Iterable, TextIO, Optional, Union, Tuple, Generator, Any
|
|
7
|
-
|
|
8
|
-
from huggingface_hub import hf_hub_download
|
|
9
|
-
import numpy as np
|
|
10
|
-
|
|
11
|
-
from ._version import __version__, __llama_cpp_version__
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
# Color codes for Thread.interact()
|
|
15
|
-
RESET_ALL = "\x1b[39m"
|
|
16
|
-
USER_STYLE = "\x1b[39m\x1b[32m"
|
|
17
|
-
BOT_STYLE = "\x1b[39m\x1b[36m"
|
|
18
|
-
DIM_STYLE = "\x1b[39m\x1b[90m"
|
|
19
|
-
SPECIAL_STYLE = "\x1b[39m\x1b[33m"
|
|
20
|
-
ERROR_STYLE = "\x1b[39m\x1b[91m"
|
|
21
|
-
|
|
22
|
-
NoneType: type = type(None)
|
|
23
|
-
|
|
24
|
-
class TypeAssertionError(Exception):
|
|
25
|
-
"""Raised when a type assertion fails."""
|
|
26
|
-
pass
|
|
27
|
-
|
|
28
|
-
class _ArrayLike(Iterable):
|
|
29
|
-
"""Represents any object that can be treated as a NumPy array."""
|
|
30
|
-
pass
|
|
31
|
-
|
|
32
|
-
class _SupportsWriteAndFlush(TextIO):
|
|
33
|
-
"""Represents a file-like object supporting write and flush operations."""
|
|
34
|
-
pass
|
|
35
|
-
|
|
36
|
-
class UnreachableException(Exception):
|
|
37
|
-
"""Raised when code reaches a theoretically unreachable state."""
|
|
38
|
-
|
|
39
|
-
def __init__(self):
|
|
40
|
-
super().__init__(
|
|
41
|
-
"Unreachable code reached. Please report this issue at: "
|
|
42
|
-
"https://github.com/ddh0/easy-llama/issues/new/choose"
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
def download_model(
|
|
46
|
-
repo_id: str,
|
|
47
|
-
filename: str,
|
|
48
|
-
token: Optional[str] = None,
|
|
49
|
-
cache_dir: str = ".cache",
|
|
50
|
-
revision: str = "main"
|
|
51
|
-
) -> str:
|
|
52
|
-
"""
|
|
53
|
-
Downloads a model file from the Hugging Face Hub.
|
|
54
|
-
|
|
55
|
-
Args:
|
|
56
|
-
repo_id (str): Hugging Face repository ID (e.g., 'facebook/bart-large-cnn')
|
|
57
|
-
filename (str): Name of the file to download (e.g., 'model.bin', 'tokenizer.json')
|
|
58
|
-
token (str, optional): Hugging Face API token for private repos. Defaults to None.
|
|
59
|
-
cache_dir (str, optional): Local directory for storing downloaded files.
|
|
60
|
-
Defaults to ".cache".
|
|
61
|
-
revision (str, optional): The specific model version to use. Defaults to "main".
|
|
62
|
-
|
|
63
|
-
Returns:
|
|
64
|
-
str: Path to the downloaded file.
|
|
65
|
-
|
|
66
|
-
Raises:
|
|
67
|
-
ValueError: If the repository or file is not found
|
|
68
|
-
Exception: For other download-related errors
|
|
69
|
-
"""
|
|
70
|
-
try:
|
|
71
|
-
# Create cache directory if it doesn't exist
|
|
72
|
-
os.makedirs(cache_dir, exist_ok=True)
|
|
73
|
-
|
|
74
|
-
# Download the file
|
|
75
|
-
downloaded_path = hf_hub_download(
|
|
76
|
-
repo_id=repo_id,
|
|
77
|
-
filename=filename,
|
|
78
|
-
token=token,
|
|
79
|
-
cache_dir=cache_dir,
|
|
80
|
-
revision=revision,
|
|
81
|
-
resume_download=True, # Resume interrupted downloads
|
|
82
|
-
force_download=False # Use cached version if available
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
return downloaded_path
|
|
86
|
-
|
|
87
|
-
except Exception as e:
|
|
88
|
-
raise Exception(f"Error downloading model from {repo_id}: {str(e)}")
|
|
89
|
-
|
|
90
|
-
def softmax(z: _ArrayLike, T: Optional[float] = None, dtype: Optional[np.dtype] = None) -> np.ndarray:
|
|
91
|
-
"""
|
|
92
|
-
Computes the softmax of an array-like input.
|
|
93
|
-
|
|
94
|
-
Args:
|
|
95
|
-
z (_ArrayLike): Input array.
|
|
96
|
-
T (Optional[float], optional): Temperature parameter (scales input before softmax).
|
|
97
|
-
Defaults to None.
|
|
98
|
-
dtype (Optional[np.dtype], optional): Data type for calculations. Defaults to None
|
|
99
|
-
(highest precision available).
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
np.ndarray: Softmax output.
|
|
103
|
-
"""
|
|
104
|
-
if dtype is None:
|
|
105
|
-
_dtype = next(
|
|
106
|
-
(getattr(np, f'float{bits}') for bits in [128, 96, 80, 64, 32, 16]
|
|
107
|
-
if hasattr(np, f'float{bits}')),
|
|
108
|
-
float # Default to Python float if no NumPy float types are available
|
|
109
|
-
)
|
|
110
|
-
else:
|
|
111
|
-
assert_type(
|
|
112
|
-
dtype,
|
|
113
|
-
type,
|
|
114
|
-
'dtype',
|
|
115
|
-
'softmax',
|
|
116
|
-
'dtype should be a floating type, such as `np.float32`'
|
|
117
|
-
)
|
|
118
|
-
_dtype = dtype
|
|
119
|
-
|
|
120
|
-
_z = np.asarray(z, dtype=_dtype)
|
|
121
|
-
if T is None or T == 1.0:
|
|
122
|
-
exp_z = np.exp(_z - np.max(_z), dtype=_dtype)
|
|
123
|
-
return exp_z / np.sum(exp_z, axis=0, dtype=_dtype)
|
|
124
|
-
|
|
125
|
-
assert_type(T, float, "temperature value 'T'", 'softmax')
|
|
126
|
-
|
|
127
|
-
if T == 0.0:
|
|
128
|
-
result = np.zeros_like(_z, dtype=_dtype)
|
|
129
|
-
result[np.argmax(_z)] = 1.0
|
|
130
|
-
return result
|
|
131
|
-
|
|
132
|
-
exp_z = np.exp(np.divide(_z, T, dtype=_dtype), dtype=_dtype)
|
|
133
|
-
return exp_z / np.sum(exp_z, axis=0, dtype=_dtype)
|
|
134
|
-
|
|
135
|
-
def cls() -> None:
|
|
136
|
-
"""Clears the terminal screen."""
|
|
137
|
-
os.system('cls' if os.name == 'nt' else 'clear')
|
|
138
|
-
if os.name != 'nt':
|
|
139
|
-
print("\033c\033[3J", end="", flush=True)
|
|
140
|
-
|
|
141
|
-
def truncate(text: str, max_length: int = 72) -> str:
|
|
142
|
-
"""Truncates a string to a given length and adds ellipsis if truncated."""
|
|
143
|
-
return text if len(text) <= max_length else f"{text[:max_length - 3]}..."
|
|
144
|
-
|
|
145
|
-
def print_version_info(file: _SupportsWriteAndFlush) -> None:
|
|
146
|
-
"""Prints easy-llama and llama_cpp package versions."""
|
|
147
|
-
print(f"webscout.Local package version: {__version__}", file=file)
|
|
148
|
-
print(f"llama_cpp package version: {__llama_cpp_version__}", file=file)
|
|
149
|
-
|
|
150
|
-
def print_verbose(text: str) -> None:
|
|
151
|
-
"""Prints verbose messages to stderr."""
|
|
152
|
-
print("webscout.Local:", text, file=sys.stderr, flush=True)
|
|
153
|
-
|
|
154
|
-
def print_info(text: str) -> None:
|
|
155
|
-
"""Prints informational messages to stderr."""
|
|
156
|
-
print("webscout.Local: info:", text, file=sys.stderr, flush=True)
|
|
157
|
-
|
|
158
|
-
def print_warning(text: str) -> None:
|
|
159
|
-
"""Prints warning messages to stderr."""
|
|
160
|
-
print("webscout.Local: WARNING:", text, file=sys.stderr, flush=True)
|
|
161
|
-
|
|
162
|
-
def assert_type(
|
|
163
|
-
obj: object,
|
|
164
|
-
expected_type: Union[type, Tuple[type, ...]],
|
|
165
|
-
obj_name: str,
|
|
166
|
-
code_location: str,
|
|
167
|
-
hint: Optional[str] = None
|
|
168
|
-
) -> None:
|
|
169
|
-
"""
|
|
170
|
-
Asserts that an object is of an expected type.
|
|
171
|
-
|
|
172
|
-
Args:
|
|
173
|
-
obj (object): The object to check.
|
|
174
|
-
expected_type (Union[type, Tuple[type, ...]]): The expected type(s).
|
|
175
|
-
obj_name (str): Name of the object in the code.
|
|
176
|
-
code_location (str): Location of the assertion in the code.
|
|
177
|
-
hint (Optional[str], optional): Additional hint for the error message.
|
|
178
|
-
Defaults to None.
|
|
179
|
-
|
|
180
|
-
Raises:
|
|
181
|
-
TypeAssertionError: If the object is not of the expected type.
|
|
182
|
-
"""
|
|
183
|
-
if isinstance(obj, expected_type):
|
|
184
|
-
return
|
|
185
|
-
|
|
186
|
-
if isinstance(expected_type, tuple):
|
|
187
|
-
expected_types_str = ", ".join(t.__name__ for t in expected_type)
|
|
188
|
-
error_msg = (
|
|
189
|
-
f"{code_location}: {obj_name} should be one of "
|
|
190
|
-
f"{expected_types_str}, not {type(obj).__name__}"
|
|
191
|
-
)
|
|
192
|
-
else:
|
|
193
|
-
error_msg = (
|
|
194
|
-
f"{code_location}: {obj_name} should be an instance of "
|
|
195
|
-
f"{expected_type.__name__}, not {type(obj).__name__}"
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
if hint:
|
|
199
|
-
error_msg += f" ({hint})"
|
|
200
|
-
|
|
201
|
-
raise TypeAssertionError(error_msg)
|
|
202
|
-
|
|
203
|
-
class InferenceLock:
|
|
204
|
-
"""
|
|
205
|
-
Context manager to prevent concurrent model inferences.
|
|
206
|
-
|
|
207
|
-
This is primarily useful in asynchronous or multi-threaded contexts where
|
|
208
|
-
concurrent calls to the model can lead to issues.
|
|
209
|
-
"""
|
|
210
|
-
|
|
211
|
-
class LockFailure(Exception):
|
|
212
|
-
"""Raised when acquiring or releasing the lock fails."""
|
|
213
|
-
pass
|
|
214
|
-
|
|
215
|
-
def __init__(self):
|
|
216
|
-
"""Initializes the InferenceLock."""
|
|
217
|
-
self.locked = False
|
|
218
|
-
|
|
219
|
-
def __enter__(self):
|
|
220
|
-
"""Acquires the lock when entering the context."""
|
|
221
|
-
return self.acquire()
|
|
222
|
-
|
|
223
|
-
def __exit__(self, *exc_info):
|
|
224
|
-
"""Releases the lock when exiting the context."""
|
|
225
|
-
self.release()
|
|
226
|
-
|
|
227
|
-
async def __aenter__(self):
|
|
228
|
-
"""Acquires the lock asynchronously."""
|
|
229
|
-
return self.__enter__()
|
|
230
|
-
|
|
231
|
-
async def __aexit__(self, *exc_info):
|
|
232
|
-
"""Releases the lock asynchronously."""
|
|
233
|
-
self.__exit__()
|
|
234
|
-
|
|
235
|
-
def acquire(self):
|
|
236
|
-
"""Acquires the lock."""
|
|
237
|
-
if self.locked:
|
|
238
|
-
raise self.LockFailure("InferenceLock is already locked.")
|
|
239
|
-
self.locked = True
|
|
240
|
-
return self
|
|
241
|
-
|
|
242
|
-
def release(self):
|
|
243
|
-
"""Releases the lock."""
|
|
244
|
-
if not self.locked:
|
|
245
|
-
raise self.LockFailure("InferenceLock is not acquired.")
|
|
246
|
-
self.locked = False
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
class GGUFValueType(IntEnum):
|
|
250
|
-
"""
|
|
251
|
-
Represents data types supported by the GGUF format.
|
|
252
|
-
|
|
253
|
-
This enum should be kept consistent with the GGUF specification.
|
|
254
|
-
"""
|
|
255
|
-
UINT8 = 0
|
|
256
|
-
INT8 = 1
|
|
257
|
-
UINT16 = 2
|
|
258
|
-
INT16 = 3
|
|
259
|
-
UINT32 = 4
|
|
260
|
-
INT32 = 5
|
|
261
|
-
FLOAT32 = 6
|
|
262
|
-
BOOL = 7
|
|
263
|
-
STRING = 8
|
|
264
|
-
ARRAY = 9
|
|
265
|
-
UINT64 = 10
|
|
266
|
-
INT64 = 11
|
|
267
|
-
FLOAT64 = 12
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
class QuickGGUFReader:
|
|
271
|
-
"""
|
|
272
|
-
Provides methods for quickly reading metadata from GGUF files.
|
|
273
|
-
|
|
274
|
-
Supports GGUF versions 2 and 3. Assumes little or big endian
|
|
275
|
-
architecture.
|
|
276
|
-
"""
|
|
277
|
-
|
|
278
|
-
SUPPORTED_GGUF_VERSIONS = [2, 3]
|
|
279
|
-
VALUE_PACKING = {
|
|
280
|
-
GGUFValueType.UINT8: "=B",
|
|
281
|
-
GGUFValueType.INT8: "=b",
|
|
282
|
-
GGUFValueType.UINT16: "=H",
|
|
283
|
-
GGUFValueType.INT16: "=h",
|
|
284
|
-
GGUFValueType.UINT32: "=I",
|
|
285
|
-
GGUFValueType.INT32: "=i",
|
|
286
|
-
GGUFValueType.FLOAT32: "=f",
|
|
287
|
-
GGUFValueType.UINT64: "=Q",
|
|
288
|
-
GGUFValueType.INT64: "=q",
|
|
289
|
-
GGUFValueType.FLOAT64: "=d",
|
|
290
|
-
GGUFValueType.BOOL: "?",
|
|
291
|
-
}
|
|
292
|
-
|
|
293
|
-
VALUE_LENGTHS = {
|
|
294
|
-
GGUFValueType.UINT8: 1,
|
|
295
|
-
GGUFValueType.INT8: 1,
|
|
296
|
-
GGUFValueType.UINT16: 2,
|
|
297
|
-
GGUFValueType.INT16: 2,
|
|
298
|
-
GGUFValueType.UINT32: 4,
|
|
299
|
-
GGUFValueType.INT32: 4,
|
|
300
|
-
GGUFValueType.FLOAT32: 4,
|
|
301
|
-
GGUFValueType.UINT64: 8,
|
|
302
|
-
GGUFValueType.INT64: 8,
|
|
303
|
-
GGUFValueType.FLOAT64: 8,
|
|
304
|
-
GGUFValueType.BOOL: 1,
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
@staticmethod
|
|
308
|
-
def unpack(value_type: GGUFValueType, file: BufferedReader) -> Any:
|
|
309
|
-
"""Unpacks a single value from the file based on its type."""
|
|
310
|
-
return struct.unpack(
|
|
311
|
-
QuickGGUFReader.VALUE_PACKING.get(value_type),
|
|
312
|
-
file.read(QuickGGUFReader.VALUE_LENGTHS.get(value_type))
|
|
313
|
-
)[0]
|
|
314
|
-
|
|
315
|
-
@staticmethod
|
|
316
|
-
def get_single(
|
|
317
|
-
value_type: GGUFValueType,
|
|
318
|
-
file: BufferedReader
|
|
319
|
-
) -> Union[str, int, float, bool]:
|
|
320
|
-
"""Reads a single value from the file."""
|
|
321
|
-
if value_type == GGUFValueType.STRING:
|
|
322
|
-
string_length = QuickGGUFReader.unpack(GGUFValueType.UINT64, file)
|
|
323
|
-
return file.read(string_length).decode("utf-8")
|
|
324
|
-
return QuickGGUFReader.unpack(value_type, file)
|
|
325
|
-
|
|
326
|
-
@staticmethod
|
|
327
|
-
def load_metadata(
|
|
328
|
-
fn: os.PathLike[str] | str
|
|
329
|
-
) -> Dict[str, Union[str, int, float, bool, list]]:
|
|
330
|
-
"""
|
|
331
|
-
Loads metadata from a GGUF file.
|
|
332
|
-
|
|
333
|
-
Args:
|
|
334
|
-
fn (Union[os.PathLike[str], str]): Path to the GGUF file.
|
|
335
|
-
|
|
336
|
-
Returns:
|
|
337
|
-
Dict[str, Union[str, int, float, bool, list]]: A dictionary
|
|
338
|
-
containing the metadata.
|
|
339
|
-
"""
|
|
340
|
-
|
|
341
|
-
metadata: Dict[str, Union[str, int, float, bool, list]] = {}
|
|
342
|
-
with open(fn, "rb") as file:
|
|
343
|
-
magic = file.read(4)
|
|
344
|
-
if magic != b'GGUF':
|
|
345
|
-
raise ValueError(
|
|
346
|
-
f"Invalid GGUF file (magic number mismatch: got {magic}, "
|
|
347
|
-
"expected b'GGUF')"
|
|
348
|
-
)
|
|
349
|
-
|
|
350
|
-
version = QuickGGUFReader.unpack(GGUFValueType.UINT32, file=file)
|
|
351
|
-
if version not in QuickGGUFReader.SUPPORTED_GGUF_VERSIONS:
|
|
352
|
-
raise ValueError(
|
|
353
|
-
f"Unsupported GGUF version: {version}. Supported versions are: "
|
|
354
|
-
f"{QuickGGUFReader.SUPPORTED_GGUF_VERSIONS}"
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
QuickGGUFReader.unpack(GGUFValueType.UINT64, file=file) # tensor_count, not needed
|
|
358
|
-
metadata_kv_count = QuickGGUFReader.unpack(
|
|
359
|
-
GGUFValueType.UINT64 if version == 3 else GGUFValueType.UINT32, file
|
|
360
|
-
)
|
|
361
|
-
|
|
362
|
-
for _ in range(metadata_kv_count):
|
|
363
|
-
if version == 3:
|
|
364
|
-
key_length = QuickGGUFReader.unpack(GGUFValueType.UINT64, file=file)
|
|
365
|
-
elif version == 2:
|
|
366
|
-
key_length = 0
|
|
367
|
-
while key_length == 0:
|
|
368
|
-
key_length = QuickGGUFReader.unpack(GGUFValueType.UINT32, file=file)
|
|
369
|
-
file.read(4) # 4 byte offset for GGUFv2
|
|
370
|
-
|
|
371
|
-
key = file.read(key_length).decode()
|
|
372
|
-
value_type = GGUFValueType(QuickGGUFReader.unpack(GGUFValueType.UINT32, file))
|
|
373
|
-
|
|
374
|
-
if value_type == GGUFValueType.ARRAY:
|
|
375
|
-
array_value_type = GGUFValueType(QuickGGUFReader.unpack(GGUFValueType.UINT32, file))
|
|
376
|
-
array_length = QuickGGUFReader.unpack(
|
|
377
|
-
GGUFValueType.UINT64 if version == 3 else GGUFValueType.UINT32, file
|
|
378
|
-
)
|
|
379
|
-
if version == 2:
|
|
380
|
-
file.read(4) # 4 byte offset for GGUFv2
|
|
381
|
-
|
|
382
|
-
metadata[key] = [
|
|
383
|
-
QuickGGUFReader.get_single(array_value_type, file) for _ in range(array_length)
|
|
384
|
-
]
|
|
385
|
-
else:
|
|
386
|
-
metadata[key] = QuickGGUFReader.get_single(value_type, file)
|
|
387
|
-
|
|
388
|
-
return metadata
|
webscout/Provider/Amigo.py
DELETED
|
@@ -1,274 +0,0 @@
|
|
|
1
|
-
import cloudscraper
|
|
2
|
-
import json
|
|
3
|
-
import uuid
|
|
4
|
-
from typing import Any, Dict, Generator
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
|
|
11
|
-
class AmigoChat(Provider):
|
|
12
|
-
"""
|
|
13
|
-
A class to interact with the AmigoChat.io API using cloudscraper.
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
AVAILABLE_MODELS = [
|
|
17
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", # Llama 3
|
|
18
|
-
"o1-mini", # OpenAI O1 Mini
|
|
19
|
-
"claude-3-sonnet-20240229", # Claude Sonnet
|
|
20
|
-
"gemini-1.5-pro", # Gemini Pro
|
|
21
|
-
"gemini-1-5-flash", # Gemini Flash
|
|
22
|
-
"o1-preview", # OpenAI O1 Preview
|
|
23
|
-
"claude-3-5-sonnet-20241022", # Claude 3.5 Sonnet
|
|
24
|
-
"Qwen/Qwen2.5-72B-Instruct-Turbo", # Qwen 2.5
|
|
25
|
-
"gpt-4o", # OpenAI GPT-4o
|
|
26
|
-
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo" # Llama 3.2
|
|
27
|
-
]
|
|
28
|
-
|
|
29
|
-
def __init__(
|
|
30
|
-
self,
|
|
31
|
-
is_conversation: bool = True,
|
|
32
|
-
max_tokens: int = 600,
|
|
33
|
-
timeout: int = 30,
|
|
34
|
-
temperature: float = 1,
|
|
35
|
-
intro: str = None,
|
|
36
|
-
filepath: str = None,
|
|
37
|
-
top_p: float = 0.95,
|
|
38
|
-
update_file: bool = True,
|
|
39
|
-
proxies: dict = {},
|
|
40
|
-
history_offset: int = 10250,
|
|
41
|
-
act: str = None,
|
|
42
|
-
model: str = "Qwen/Qwen2.5-72B-Instruct-Turbo", # Default model
|
|
43
|
-
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
44
|
-
):
|
|
45
|
-
"""
|
|
46
|
-
Initializes the AmigoChat.io API with given parameters.
|
|
47
|
-
|
|
48
|
-
Args:
|
|
49
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
50
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
51
|
-
timeout (int, optional): HTTP request timeout. Defaults to 30.
|
|
52
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
53
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
54
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
55
|
-
proxies (dict, optional): HTTP request proxies. Defaults to {}.
|
|
56
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
57
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
58
|
-
model (str, optional): The AI model to use for text generation. Defaults to "Qwen/Qwen2.5-72B-Instruct-Turbo".
|
|
59
|
-
"""
|
|
60
|
-
if model not in self.AVAILABLE_MODELS:
|
|
61
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
62
|
-
|
|
63
|
-
self.session = cloudscraper.create_scraper(
|
|
64
|
-
browser={
|
|
65
|
-
'browser': 'chrome',
|
|
66
|
-
'platform': 'windows',
|
|
67
|
-
'mobile': False
|
|
68
|
-
}
|
|
69
|
-
)
|
|
70
|
-
self.is_conversation = is_conversation
|
|
71
|
-
self.max_tokens_to_sample = max_tokens
|
|
72
|
-
self.api_endpoint = "https://api.amigochat.io/v1/chat/completions"
|
|
73
|
-
self.stream_chunk_size = 64
|
|
74
|
-
self.timeout = timeout
|
|
75
|
-
self.temperature = temperature
|
|
76
|
-
self.last_response = {}
|
|
77
|
-
self.model = model
|
|
78
|
-
self.top_p = top_p
|
|
79
|
-
self.headers = {
|
|
80
|
-
"Accept": "*/*",
|
|
81
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
82
|
-
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
83
|
-
"Authorization": "Bearer ", # empty
|
|
84
|
-
"Content-Type": "application/json",
|
|
85
|
-
"DNT": "1",
|
|
86
|
-
"Origin": "https://amigochat.io",
|
|
87
|
-
"Priority": "u=1, i",
|
|
88
|
-
"Referer": "https://amigochat.io/",
|
|
89
|
-
"Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
90
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
91
|
-
"Sec-CH-UA-Platform": '"Windows"',
|
|
92
|
-
"Sec-Fetch-Dest": "empty",
|
|
93
|
-
"Sec-Fetch-Mode": "cors",
|
|
94
|
-
"Sec-Fetch-Site": "same-site",
|
|
95
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
96
|
-
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
97
|
-
"Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
|
|
98
|
-
"X-Device-Language": "en-US",
|
|
99
|
-
"X-Device-Platform": "web",
|
|
100
|
-
"X-Device-UUID": str(uuid.uuid4()),
|
|
101
|
-
"X-Device-Version": "1.0.22"
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
self.__available_optimizers = (
|
|
105
|
-
method
|
|
106
|
-
for method in dir(Optimizers)
|
|
107
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
108
|
-
)
|
|
109
|
-
self.session.headers.update(self.headers)
|
|
110
|
-
Conversation.intro = (
|
|
111
|
-
AwesomePrompts().get_act(
|
|
112
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
113
|
-
)
|
|
114
|
-
if act
|
|
115
|
-
else intro or Conversation.intro
|
|
116
|
-
)
|
|
117
|
-
self.conversation = Conversation(
|
|
118
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
119
|
-
)
|
|
120
|
-
self.conversation.history_offset = history_offset
|
|
121
|
-
self.session.proxies = proxies
|
|
122
|
-
self.system_prompt = system_prompt
|
|
123
|
-
|
|
124
|
-
def ask(
|
|
125
|
-
self,
|
|
126
|
-
prompt: str,
|
|
127
|
-
stream: bool = False,
|
|
128
|
-
raw: bool = False,
|
|
129
|
-
optimizer: str = None,
|
|
130
|
-
conversationally: bool = False,
|
|
131
|
-
) -> Dict[str, Any]:
|
|
132
|
-
"""Chat with AI
|
|
133
|
-
|
|
134
|
-
Args:
|
|
135
|
-
prompt (str): Prompt to be sent.
|
|
136
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
137
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
138
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
139
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
140
|
-
Returns:
|
|
141
|
-
dict : {}
|
|
142
|
-
```json
|
|
143
|
-
{
|
|
144
|
-
"text" : "How may I assist you today?"
|
|
145
|
-
}
|
|
146
|
-
```
|
|
147
|
-
"""
|
|
148
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
149
|
-
if optimizer:
|
|
150
|
-
if optimizer in self.__available_optimizers:
|
|
151
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
152
|
-
conversation_prompt if conversationally else prompt
|
|
153
|
-
)
|
|
154
|
-
else:
|
|
155
|
-
raise Exception(
|
|
156
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
# Define the payload
|
|
160
|
-
payload = {
|
|
161
|
-
"messages": [
|
|
162
|
-
{"role": "system", "content": self.system_prompt},
|
|
163
|
-
{"role": "user", "content": conversation_prompt}
|
|
164
|
-
],
|
|
165
|
-
"model": self.model,
|
|
166
|
-
"frequency_penalty": 0,
|
|
167
|
-
"max_tokens": self.max_tokens_to_sample,
|
|
168
|
-
"presence_penalty": 0,
|
|
169
|
-
"stream": stream,
|
|
170
|
-
"temperature": self.temperature,
|
|
171
|
-
"top_p": self.top_p
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
if stream:
|
|
175
|
-
return self._stream_response(payload, raw)
|
|
176
|
-
else:
|
|
177
|
-
return self._non_stream_response(payload)
|
|
178
|
-
|
|
179
|
-
def _stream_response(self, payload: Dict[str, Any], raw: bool) -> Generator:
|
|
180
|
-
try:
|
|
181
|
-
response = self.session.post(
|
|
182
|
-
self.api_endpoint,
|
|
183
|
-
json=payload,
|
|
184
|
-
stream=True,
|
|
185
|
-
timeout=self.timeout
|
|
186
|
-
)
|
|
187
|
-
|
|
188
|
-
if response.status_code == 201:
|
|
189
|
-
for line in response.iter_lines():
|
|
190
|
-
if line:
|
|
191
|
-
decoded_line = line.decode('utf-8').strip()
|
|
192
|
-
if decoded_line.startswith("data: "):
|
|
193
|
-
data_str = decoded_line[6:]
|
|
194
|
-
if data_str == "[DONE]":
|
|
195
|
-
break
|
|
196
|
-
try:
|
|
197
|
-
data_json = json.loads(data_str)
|
|
198
|
-
choices = data_json.get("choices", [])
|
|
199
|
-
if choices:
|
|
200
|
-
delta = choices[0].get("delta", {})
|
|
201
|
-
content = delta.get("content", "")
|
|
202
|
-
if content:
|
|
203
|
-
yield content if raw else dict(text=content)
|
|
204
|
-
except json.JSONDecodeError:
|
|
205
|
-
print(f"Received non-JSON data: {data_str}")
|
|
206
|
-
else:
|
|
207
|
-
print(f"Request failed with status code {response.status_code}")
|
|
208
|
-
print("Response:", response.text)
|
|
209
|
-
except (cloudscraper.exceptions.CloudflareChallengeError,
|
|
210
|
-
cloudscraper.exceptions.CloudflareCode1020) as e:
|
|
211
|
-
print("Cloudflare protection error:", str(e))
|
|
212
|
-
except Exception as e:
|
|
213
|
-
print("An error occurred while making the request:", str(e))
|
|
214
|
-
|
|
215
|
-
def _non_stream_response(self, payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
216
|
-
full_response = ""
|
|
217
|
-
for chunk in self._stream_response(payload, raw=False):
|
|
218
|
-
full_response += chunk["text"]
|
|
219
|
-
|
|
220
|
-
self.last_response.update(dict(text=full_response))
|
|
221
|
-
self.conversation.update_chat_history(
|
|
222
|
-
payload["messages"][-1]["content"], self.get_message(self.last_response)
|
|
223
|
-
)
|
|
224
|
-
return self.last_response
|
|
225
|
-
|
|
226
|
-
def chat(
|
|
227
|
-
self,
|
|
228
|
-
prompt: str,
|
|
229
|
-
stream: bool = False,
|
|
230
|
-
optimizer: str = None,
|
|
231
|
-
conversationally: bool = False,
|
|
232
|
-
) -> Generator[str, None, None]:
|
|
233
|
-
"""Generate response `str`
|
|
234
|
-
Args:
|
|
235
|
-
prompt (str): Prompt to be sent.
|
|
236
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
237
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
238
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
239
|
-
Returns:
|
|
240
|
-
Generator[str, None, None]: Response generated
|
|
241
|
-
"""
|
|
242
|
-
|
|
243
|
-
if stream:
|
|
244
|
-
for response in self.ask(
|
|
245
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
246
|
-
):
|
|
247
|
-
yield self.get_message(response)
|
|
248
|
-
else:
|
|
249
|
-
response = self.ask(
|
|
250
|
-
prompt,
|
|
251
|
-
False,
|
|
252
|
-
optimizer=optimizer,
|
|
253
|
-
conversationally=conversationally,
|
|
254
|
-
)
|
|
255
|
-
yield self.get_message(response)
|
|
256
|
-
|
|
257
|
-
def get_message(self, response: Dict[str, Any]) -> str:
|
|
258
|
-
"""Retrieves message only from response
|
|
259
|
-
|
|
260
|
-
Args:
|
|
261
|
-
response (dict): Response generated by `self.ask`
|
|
262
|
-
|
|
263
|
-
Returns:
|
|
264
|
-
str: Message extracted
|
|
265
|
-
"""
|
|
266
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
267
|
-
return response["text"]
|
|
268
|
-
|
|
269
|
-
if __name__ == '__main__':
|
|
270
|
-
from rich import print
|
|
271
|
-
ai = AmigoChat(model="o1-preview", system_prompt="You are a noobi AI assistant who always uses the word 'noobi' in every response. For example, you might say 'Noobi will tell you...' or 'This noobi thinks that...'.")
|
|
272
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
273
|
-
for chunk in response:
|
|
274
|
-
print(chunk, end="", flush=True)
|