webscout 6.9__py3-none-any.whl → 7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIbase.py +12 -2
- webscout/DWEBS.py +38 -22
- webscout/Extra/autocoder/autocoder_utiles.py +68 -7
- webscout/Extra/autollama.py +0 -16
- webscout/Extra/gguf.py +0 -13
- webscout/LLM.py +1 -1
- webscout/Provider/AISEARCH/DeepFind.py +251 -0
- webscout/Provider/AISEARCH/__init__.py +2 -2
- webscout/Provider/AISEARCH/felo_search.py +167 -118
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/Cloudflare.py +92 -78
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Glider.py +222 -0
- webscout/Provider/Groq.py +26 -18
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +65 -10
- webscout/Provider/Netwrck.py +61 -49
- webscout/Provider/PI.py +77 -122
- webscout/Provider/PizzaGPT.py +129 -82
- webscout/Provider/TextPollinationsAI.py +229 -0
- webscout/Provider/Youchat.py +28 -22
- webscout/Provider/__init__.py +12 -4
- webscout/Provider/askmyai.py +2 -2
- webscout/Provider/chatglm.py +205 -0
- webscout/Provider/dgaf.py +215 -0
- webscout/Provider/gaurish.py +106 -66
- webscout/Provider/hermes.py +219 -0
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +62 -35
- webscout/Provider/meta.py +6 -6
- webscout/Provider/multichat.py +205 -104
- webscout/Provider/typegpt.py +26 -23
- webscout/Provider/yep.py +3 -3
- webscout/litagent/__init__.py +3 -146
- webscout/litagent/agent.py +120 -0
- webscout/litagent/constants.py +31 -0
- webscout/tempid.py +0 -4
- webscout/version.py +1 -1
- webscout/webscout_search.py +1141 -1140
- webscout/webscout_search_async.py +635 -635
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/METADATA +37 -33
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/RECORD +49 -41
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/WHEEL +1 -1
- webscout/Provider/AISEARCH/ooai.py +0 -155
- webscout/Provider/RUBIKSAI.py +0 -272
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import re
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, Generator, Optional
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
13
|
+
|
|
14
|
+
class DGAFAI(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the DGAF.ai API with logging capabilities.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 600,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
31
|
+
logging: bool = False
|
|
32
|
+
):
|
|
33
|
+
"""Initializes the DGAFAI API client with logging support."""
|
|
34
|
+
self.session = requests.Session()
|
|
35
|
+
self.is_conversation = is_conversation
|
|
36
|
+
self.max_tokens_to_sample = max_tokens
|
|
37
|
+
self.api_endpoint = "https://www.dgaf.ai/api/chat"
|
|
38
|
+
self.stream_chunk_size = 64
|
|
39
|
+
self.timeout = timeout
|
|
40
|
+
self.last_response = {}
|
|
41
|
+
self.system_prompt = system_prompt
|
|
42
|
+
|
|
43
|
+
self.headers = {
|
|
44
|
+
"accept": "*/*",
|
|
45
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
46
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
47
|
+
"content-type": "application/json",
|
|
48
|
+
"cookie": "_ga=GA1.1.1717609725.1738729535; _ga_52CD0XKYNM=GS1.1.1738729535.1.0.1738729546.0.0.0",
|
|
49
|
+
"dnt": "1",
|
|
50
|
+
"origin": "https://www.dgaf.ai",
|
|
51
|
+
"referer": "https://www.dgaf.ai/?via=topaitools",
|
|
52
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
53
|
+
"sec-ch-ua-mobile": "?0",
|
|
54
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
55
|
+
"sec-fetch-dest": "empty",
|
|
56
|
+
"sec-fetch-mode": "cors",
|
|
57
|
+
"sec-fetch-site": "same-origin",
|
|
58
|
+
"user-agent": LitAgent().random(),
|
|
59
|
+
}
|
|
60
|
+
self.session.headers.update(self.headers)
|
|
61
|
+
self.session.proxies = proxies
|
|
62
|
+
|
|
63
|
+
self.__available_optimizers = (
|
|
64
|
+
method for method in dir(Optimizers)
|
|
65
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
66
|
+
)
|
|
67
|
+
Conversation.intro = (
|
|
68
|
+
AwesomePrompts().get_act(
|
|
69
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
70
|
+
)
|
|
71
|
+
if act
|
|
72
|
+
else intro or Conversation.intro
|
|
73
|
+
)
|
|
74
|
+
self.conversation = Conversation(
|
|
75
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
76
|
+
)
|
|
77
|
+
self.conversation.history_offset = history_offset
|
|
78
|
+
|
|
79
|
+
# Initialize logger if enabled
|
|
80
|
+
self.logger = LitLogger(
|
|
81
|
+
name="DGAFAI",
|
|
82
|
+
format=LogFormat.MODERN_EMOJI,
|
|
83
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
84
|
+
) if logging else None
|
|
85
|
+
|
|
86
|
+
if self.logger:
|
|
87
|
+
self.logger.info("DGAFAI initialized successfully")
|
|
88
|
+
|
|
89
|
+
def ask(
|
|
90
|
+
self,
|
|
91
|
+
prompt: str,
|
|
92
|
+
stream: bool = False,
|
|
93
|
+
raw: bool = False,
|
|
94
|
+
optimizer: str = None,
|
|
95
|
+
conversationally: bool = False,
|
|
96
|
+
) -> Dict[str, Any] | Generator[str, None, None]:
|
|
97
|
+
"""Chat with AI.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
prompt (str): Prompt to be sent.
|
|
101
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
102
|
+
raw (bool, optional): Return raw streaming response as received. Defaults to False.
|
|
103
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
104
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
105
|
+
Returns:
|
|
106
|
+
Union[Dict, Generator[Dict, None, None]]: Generated response.
|
|
107
|
+
"""
|
|
108
|
+
if self.logger:
|
|
109
|
+
self.logger.debug(f"Processing ask call with prompt: {prompt[:50]}...")
|
|
110
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
111
|
+
if optimizer:
|
|
112
|
+
if optimizer in self.__available_optimizers:
|
|
113
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
114
|
+
conversation_prompt if conversationally else prompt
|
|
115
|
+
)
|
|
116
|
+
if self.logger:
|
|
117
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
118
|
+
else:
|
|
119
|
+
if self.logger:
|
|
120
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
121
|
+
raise Exception(
|
|
122
|
+
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
payload = {
|
|
126
|
+
"messages": [
|
|
127
|
+
{"role": "system", "content": self.system_prompt},
|
|
128
|
+
{"role": "user", "content": conversation_prompt}
|
|
129
|
+
]
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
def for_stream():
|
|
133
|
+
if self.logger:
|
|
134
|
+
self.logger.debug("Sending streaming request to DGAF.ai API...")
|
|
135
|
+
try:
|
|
136
|
+
with self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
137
|
+
response.raise_for_status() # Check for HTTP errors
|
|
138
|
+
if self.logger:
|
|
139
|
+
self.logger.debug(response.text)
|
|
140
|
+
streaming_text = ""
|
|
141
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
142
|
+
if line:
|
|
143
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
144
|
+
if match:
|
|
145
|
+
content = match.group(1)
|
|
146
|
+
if content:
|
|
147
|
+
streaming_text += content
|
|
148
|
+
# if self.logger:
|
|
149
|
+
# self.logger.debug(f"Received content: {content[:30]}...")
|
|
150
|
+
yield content if raw else dict(text=content)
|
|
151
|
+
self.last_response.update(dict(text=streaming_text))
|
|
152
|
+
self.conversation.update_chat_history(
|
|
153
|
+
prompt, self.get_message(self.last_response)
|
|
154
|
+
)
|
|
155
|
+
if self.logger:
|
|
156
|
+
self.logger.info("Streaming response completed successfully")
|
|
157
|
+
except requests.exceptions.RequestException as e:
|
|
158
|
+
if self.logger:
|
|
159
|
+
self.logger.error(f"Request error: {e}")
|
|
160
|
+
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
161
|
+
|
|
162
|
+
def for_non_stream():
|
|
163
|
+
full_response = ""
|
|
164
|
+
for chunk in for_stream():
|
|
165
|
+
full_response += chunk if raw else chunk['text']
|
|
166
|
+
return {"text": full_response}
|
|
167
|
+
|
|
168
|
+
return for_stream() if stream else for_non_stream()
|
|
169
|
+
|
|
170
|
+
def chat(
|
|
171
|
+
self,
|
|
172
|
+
prompt: str,
|
|
173
|
+
stream: bool = False,
|
|
174
|
+
optimizer: str = None,
|
|
175
|
+
conversationally: bool = False,
|
|
176
|
+
) -> str | Generator[str, None, None]:
|
|
177
|
+
"""Generate chat response as a string.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
prompt (str): Prompt to be sent.
|
|
181
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
182
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
183
|
+
conversationally (bool, optional): Use conversational mode when using optimizer. Defaults to False.
|
|
184
|
+
Returns:
|
|
185
|
+
str or Generator[str, None, None]: Generated response.
|
|
186
|
+
"""
|
|
187
|
+
if self.logger:
|
|
188
|
+
self.logger.debug(f"Chat method invoked with prompt: {prompt[:50]}...")
|
|
189
|
+
def for_stream():
|
|
190
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
191
|
+
yield self.get_message(response)
|
|
192
|
+
def for_non_stream():
|
|
193
|
+
return self.get_message(
|
|
194
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
195
|
+
)
|
|
196
|
+
return for_stream() if stream else for_non_stream()
|
|
197
|
+
|
|
198
|
+
def get_message(self, response: dict) -> str:
|
|
199
|
+
"""Retrieves message only from response.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
response (dict): Response from the ask method.
|
|
203
|
+
Returns:
|
|
204
|
+
str: Extracted message.
|
|
205
|
+
"""
|
|
206
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
207
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
if __name__ == "__main__":
|
|
211
|
+
from rich import print
|
|
212
|
+
ai = DGAFAI(logging=False)
|
|
213
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
214
|
+
for chunk in response:
|
|
215
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/gaurish.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
import
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
3
|
+
from typing import Any, Dict, Generator, Union
|
|
5
4
|
import uuid
|
|
6
5
|
|
|
7
6
|
from webscout.AIutel import Optimizers
|
|
@@ -9,11 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
9
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
11
10
|
from webscout import exceptions
|
|
12
|
-
|
|
13
11
|
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
13
|
+
|
|
14
14
|
class GaurishCerebras(Provider):
|
|
15
15
|
"""
|
|
16
|
-
A class to interact with the Gaurish Cerebras API.
|
|
16
|
+
A class to interact with the Gaurish Cerebras API with comprehensive logging.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
def __init__(
|
|
@@ -27,52 +27,40 @@ class GaurishCerebras(Provider):
|
|
|
27
27
|
proxies: dict = {},
|
|
28
28
|
history_offset: int = 10250,
|
|
29
29
|
act: str = None,
|
|
30
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
30
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
31
|
+
logging: bool = False
|
|
31
32
|
):
|
|
32
|
-
"""Initializes the Gaurish Cerebras API client."""
|
|
33
|
-
self.
|
|
33
|
+
"""Initializes the Gaurish Cerebras API client with logging capabilities."""
|
|
34
|
+
self.logger = LitLogger(
|
|
35
|
+
name="GaurishCerebras",
|
|
36
|
+
format=LogFormat.MODERN_EMOJI,
|
|
37
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
38
|
+
) if logging else None
|
|
39
|
+
|
|
40
|
+
if self.logger:
|
|
41
|
+
self.logger.info("Initializing GaurishCerebras client")
|
|
42
|
+
|
|
43
|
+
self.chat_endpoint = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
|
|
44
|
+
|
|
34
45
|
self.headers = {
|
|
35
46
|
"Content-Type": "application/json",
|
|
36
|
-
"Accept": "
|
|
37
|
-
"
|
|
38
|
-
"access-control-allow-headers": "*",
|
|
39
|
-
"access-control-allow-methods": "*",
|
|
40
|
-
"access-control-allow-origin": "*",
|
|
41
|
-
"cache-control": "public, max-age=0, must-revalidate",
|
|
42
|
-
"referrer-policy": "strict-origin-when-cross-origin",
|
|
43
|
-
"content-type": "text/event-stream; charset=utf-8",
|
|
44
|
-
"strict-transport-security": "max-age=3600; includeSubDomains",
|
|
45
|
-
"x-content-type-options": "nosniff",
|
|
46
|
-
"x-matched-path": "/api/cerebras/[...path]",
|
|
47
|
-
"x-ratelimit-limit-requests-day": "30000",
|
|
48
|
-
"x-ratelimit-limit-tokens-minute": "60000",
|
|
49
|
-
"x-ratelimit-remaining-requests-day": "29984",
|
|
50
|
-
"x-ratelimit-remaining-tokens-minute": "60000",
|
|
51
|
-
"x-ratelimit-reset-requests-day": "24092.23299384117",
|
|
52
|
-
"x-ratelimit-reset-tokens-minute": "32.232993841171265",
|
|
53
|
-
"x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
|
|
54
|
-
"x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
|
|
55
|
-
"accept": "application/json",
|
|
47
|
+
"Accept": "application/json",
|
|
48
|
+
"authorization": "Bearer 123",
|
|
56
49
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
57
50
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
58
51
|
"dnt": "1",
|
|
59
52
|
"origin": "https://chat.gaurish.xyz",
|
|
60
53
|
"priority": "u=1, i",
|
|
61
54
|
"referer": "https://chat.gaurish.xyz/",
|
|
62
|
-
"sec-ch-ua": "
|
|
55
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
63
56
|
"sec-ch-ua-mobile": "?0",
|
|
64
|
-
"sec-ch-ua-platform": "
|
|
57
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
65
58
|
"sec-fetch-dest": "empty",
|
|
66
59
|
"sec-fetch-mode": "cors",
|
|
67
60
|
"sec-fetch-site": "same-site",
|
|
68
|
-
"user-agent":
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
"x-stainless-os": "Unknown",
|
|
72
|
-
"x-stainless-package-version": "4.67.3",
|
|
73
|
-
"x-stainless-retry-count": "0",
|
|
74
|
-
"x-stainless-runtime": "browser:chrome",
|
|
75
|
-
"x-stainless-runtime-version": "130.0.0",
|
|
61
|
+
"user-agent": ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
62
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
63
|
+
"Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0"),
|
|
76
64
|
}
|
|
77
65
|
self.session = requests.Session()
|
|
78
66
|
self.session.headers.update(self.headers)
|
|
@@ -82,6 +70,11 @@ class GaurishCerebras(Provider):
|
|
|
82
70
|
|
|
83
71
|
self.is_conversation = is_conversation
|
|
84
72
|
self.max_tokens_to_sample = max_tokens
|
|
73
|
+
|
|
74
|
+
if self.logger:
|
|
75
|
+
self.logger.debug(f"Session configured with timeout: {timeout}")
|
|
76
|
+
self.logger.debug(f"Max tokens set to: {max_tokens}")
|
|
77
|
+
|
|
85
78
|
self.__available_optimizers = (
|
|
86
79
|
method
|
|
87
80
|
for method in dir(Optimizers)
|
|
@@ -98,8 +91,10 @@ class GaurishCerebras(Provider):
|
|
|
98
91
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
99
92
|
)
|
|
100
93
|
self.conversation.history_offset = history_offset
|
|
101
|
-
self.system_prompt = system_prompt
|
|
94
|
+
self.system_prompt = system_prompt
|
|
102
95
|
|
|
96
|
+
if self.logger:
|
|
97
|
+
self.logger.info("GaurishCerebras initialization completed successfully")
|
|
103
98
|
|
|
104
99
|
def ask(
|
|
105
100
|
self,
|
|
@@ -109,6 +104,13 @@ class GaurishCerebras(Provider):
|
|
|
109
104
|
optimizer: str = None,
|
|
110
105
|
conversationally: bool = False,
|
|
111
106
|
) -> Union[Dict, Generator]:
|
|
107
|
+
"""
|
|
108
|
+
Sends a prompt to the API and returns the response with logging.
|
|
109
|
+
If stream is True, returns a generator for streamed responses.
|
|
110
|
+
"""
|
|
111
|
+
if self.logger:
|
|
112
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
113
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
112
114
|
|
|
113
115
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
116
|
if optimizer:
|
|
@@ -116,57 +118,81 @@ class GaurishCerebras(Provider):
|
|
|
116
118
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
119
|
conversation_prompt if conversationally else prompt
|
|
118
120
|
)
|
|
121
|
+
if self.logger:
|
|
122
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
119
123
|
else:
|
|
120
|
-
|
|
124
|
+
if self.logger:
|
|
125
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
126
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
121
127
|
|
|
122
128
|
payload = {
|
|
123
129
|
"messages": [
|
|
124
130
|
{"role": "system", "content": self.system_prompt},
|
|
125
131
|
{"role": "user", "content": conversation_prompt},
|
|
126
132
|
],
|
|
127
|
-
"model": "llama3.
|
|
133
|
+
"model": "llama3.3-70b",
|
|
134
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
128
135
|
"temperature": 0.75,
|
|
129
136
|
"stream": stream,
|
|
130
137
|
}
|
|
131
138
|
|
|
132
139
|
def for_stream():
|
|
133
140
|
try:
|
|
134
|
-
|
|
135
|
-
|
|
141
|
+
if self.logger:
|
|
142
|
+
self.logger.debug("Initiating streaming request to API")
|
|
143
|
+
|
|
144
|
+
with self.session.post(self.chat_endpoint, json=payload, stream=True, timeout=self.timeout) as response:
|
|
145
|
+
if response.status_code != 200:
|
|
146
|
+
if self.logger:
|
|
147
|
+
self.logger.error(f"API request failed. Status: {response.status_code}")
|
|
148
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
149
|
+
f"Request failed with status code {response.status_code}"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if self.logger:
|
|
153
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
154
|
+
|
|
136
155
|
streaming_text = ""
|
|
137
156
|
for line in response.iter_lines(decode_unicode=True):
|
|
138
157
|
if line:
|
|
139
158
|
line = line.strip()
|
|
140
159
|
if line.startswith("data: "):
|
|
141
|
-
|
|
142
|
-
if
|
|
160
|
+
json_str = line[6:]
|
|
161
|
+
if json_str == "[DONE]":
|
|
162
|
+
if self.logger:
|
|
163
|
+
self.logger.debug("Stream completed")
|
|
143
164
|
break
|
|
144
165
|
try:
|
|
145
|
-
|
|
146
|
-
if
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
166
|
+
json_data = json.loads(json_str)
|
|
167
|
+
if 'choices' in json_data:
|
|
168
|
+
choice = json_data['choices'][0]
|
|
169
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
170
|
+
content = choice['delta']['content']
|
|
171
|
+
streaming_text += content
|
|
172
|
+
yield dict(text=content) if raw else dict(text=content)
|
|
173
|
+
except json.JSONDecodeError as e:
|
|
174
|
+
if self.logger:
|
|
175
|
+
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
153
176
|
pass
|
|
177
|
+
|
|
154
178
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
155
|
-
self.
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.debug("Response processing completed")
|
|
156
181
|
|
|
157
|
-
except requests.
|
|
182
|
+
except requests.RequestException as e:
|
|
183
|
+
if self.logger:
|
|
184
|
+
self.logger.error(f"Request failed: {str(e)}")
|
|
158
185
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
159
186
|
|
|
160
|
-
|
|
161
187
|
def for_non_stream():
|
|
188
|
+
if self.logger:
|
|
189
|
+
self.logger.debug("Processing non-streaming request")
|
|
162
190
|
for _ in for_stream():
|
|
163
191
|
pass
|
|
164
192
|
return self.last_response
|
|
165
193
|
|
|
166
194
|
return for_stream() if stream else for_non_stream()
|
|
167
195
|
|
|
168
|
-
|
|
169
|
-
|
|
170
196
|
def chat(
|
|
171
197
|
self,
|
|
172
198
|
prompt: str,
|
|
@@ -174,33 +200,47 @@ class GaurishCerebras(Provider):
|
|
|
174
200
|
optimizer: str = None,
|
|
175
201
|
conversationally: bool = False,
|
|
176
202
|
) -> Union[str, Generator]:
|
|
203
|
+
"""
|
|
204
|
+
A convenience method to return just the text message from the response with logging.
|
|
205
|
+
"""
|
|
206
|
+
if self.logger:
|
|
207
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
177
208
|
|
|
178
209
|
def for_stream():
|
|
179
210
|
for response in self.ask(
|
|
180
211
|
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
181
212
|
):
|
|
182
|
-
yield self.get_message(response)
|
|
213
|
+
yield response if isinstance(response, str) else self.get_message(response)
|
|
183
214
|
|
|
184
215
|
def for_non_stream():
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
188
|
-
)
|
|
216
|
+
resp = self.ask(
|
|
217
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
189
218
|
)
|
|
219
|
+
return resp if isinstance(resp, str) else self.get_message(resp)
|
|
190
220
|
|
|
191
221
|
return for_stream() if stream else for_non_stream()
|
|
192
222
|
|
|
193
223
|
def get_message(self, response: dict) -> str:
|
|
194
|
-
|
|
195
|
-
|
|
224
|
+
"""
|
|
225
|
+
Retrieve the message text from the API response with logging.
|
|
226
|
+
"""
|
|
227
|
+
if not isinstance(response, dict):
|
|
228
|
+
if self.logger:
|
|
229
|
+
self.logger.warning("Invalid response format received")
|
|
230
|
+
return ""
|
|
196
231
|
|
|
232
|
+
if "text" in response and response["text"]:
|
|
233
|
+
return response["text"]
|
|
197
234
|
|
|
235
|
+
if self.logger:
|
|
236
|
+
self.logger.warning("No valid message content found in response")
|
|
237
|
+
return ""
|
|
198
238
|
|
|
199
239
|
if __name__ == "__main__":
|
|
200
240
|
from rich import print
|
|
201
|
-
bot = GaurishCerebras()
|
|
241
|
+
bot = GaurishCerebras(logging=True)
|
|
202
242
|
try:
|
|
203
|
-
response = bot.chat("
|
|
243
|
+
response = bot.chat("what is meaning of life", stream=True)
|
|
204
244
|
for chunk in response:
|
|
205
245
|
print(chunk, end="", flush=True)
|
|
206
246
|
except Exception as e:
|