webscout 7.8__py3-none-any.whl → 7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (41) hide show
  1. webscout/Bard.py +5 -25
  2. webscout/DWEBS.py +476 -476
  3. webscout/Extra/__init__.py +2 -0
  4. webscout/Extra/autocoder/__init__.py +1 -1
  5. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
  6. webscout/Extra/tempmail/__init__.py +26 -0
  7. webscout/Extra/tempmail/async_utils.py +141 -0
  8. webscout/Extra/tempmail/base.py +156 -0
  9. webscout/Extra/tempmail/cli.py +187 -0
  10. webscout/Extra/tempmail/mail_tm.py +361 -0
  11. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  12. webscout/Provider/Deepinfra.py +288 -286
  13. webscout/Provider/ElectronHub.py +709 -716
  14. webscout/Provider/ExaChat.py +20 -5
  15. webscout/Provider/Gemini.py +167 -165
  16. webscout/Provider/Groq.py +38 -24
  17. webscout/Provider/LambdaChat.py +2 -1
  18. webscout/Provider/TextPollinationsAI.py +232 -230
  19. webscout/Provider/__init__.py +0 -4
  20. webscout/Provider/copilot.py +427 -427
  21. webscout/Provider/freeaichat.py +8 -1
  22. webscout/Provider/uncovr.py +312 -299
  23. webscout/Provider/yep.py +64 -12
  24. webscout/__init__.py +38 -36
  25. webscout/cli.py +293 -293
  26. webscout/conversation.py +350 -17
  27. webscout/litprinter/__init__.py +59 -667
  28. webscout/optimizers.py +419 -419
  29. webscout/update_checker.py +14 -12
  30. webscout/version.py +1 -1
  31. webscout/webscout_search.py +1282 -1282
  32. webscout/webscout_search_async.py +813 -813
  33. {webscout-7.8.dist-info → webscout-7.9.dist-info}/METADATA +44 -39
  34. {webscout-7.8.dist-info → webscout-7.9.dist-info}/RECORD +38 -35
  35. webscout/Provider/DARKAI.py +0 -225
  36. webscout/Provider/EDITEE.py +0 -192
  37. webscout/litprinter/colors.py +0 -54
  38. {webscout-7.8.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
  39. {webscout-7.8.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
  40. {webscout-7.8.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
  41. {webscout-7.8.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
@@ -1,287 +1,289 @@
1
- import requests
2
- import json
3
- import os
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class DeepInfra(Provider):
14
- """
15
- A class to interact with the DeepInfra API with LitAgent user-agent.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
20
- "deepseek-ai/DeepSeek-R1",
21
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
22
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
23
- "deepseek-ai/DeepSeek-R1-Turbo",
24
- "deepseek-ai/DeepSeek-V3",
25
- # "google/gemma-2-27b-it", # >>>> NOT WORKING
26
- # "google/gemma-2-9b-it", # >>>> NOT WORKING
27
- "google/gemma-3-27b-it",
28
- # "google/gemini-1.5-flash", # >>>> NOT WORKING
29
- # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
30
- # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
31
- # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
32
- # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
33
- # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
34
- "meta-llama/Llama-3.2-90B-Vision-Instruct",
35
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
36
- # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
37
- # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
38
- # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
39
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
40
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
41
- "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
42
- # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
43
- "microsoft/phi-4",
44
- "microsoft/Phi-4-multimodal-instruct",
45
- "microsoft/WizardLM-2-8x22B",
46
- # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
47
- # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
48
- # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
49
- "mistralai/Mistral-Small-24B-Instruct-2501",
50
- "nvidia/Llama-3.1-Nemotron-70B-Instruct",
51
- # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
52
- # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
53
- "Qwen/QwQ-32B",
54
- # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
55
- "Qwen/Qwen2.5-72B-Instruct",
56
- "Qwen/Qwen2.5-Coder-32B-Instruct",
57
- # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
58
- # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
59
- "meta-llama/Llama-3.3-70B-Instruct",
60
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
61
- ]
62
-
63
- def __init__(
64
- self,
65
- is_conversation: bool = True,
66
- max_tokens: int = 2049, # Set a reasonable default
67
- timeout: int = 30,
68
- intro: str = None,
69
- filepath: str = None,
70
- update_file: bool = True,
71
- proxies: dict = {},
72
- history_offset: int = 10250,
73
- act: str = None,
74
- model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
75
- browser: str = "chrome"
76
- ):
77
- """Initializes the DeepInfra API client."""
78
- if model not in self.AVAILABLE_MODELS:
79
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
80
-
81
- self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
82
-
83
- # Initialize LitAgent for user agent generation
84
- self.agent = LitAgent()
85
- # Use fingerprinting to create a consistent browser identity
86
- self.fingerprint = self.agent.generate_fingerprint(browser)
87
-
88
- # Use the fingerprint for headers
89
- self.headers = {
90
- "Accept": self.fingerprint["accept"],
91
- "Accept-Encoding": "gzip, deflate, br, zstd",
92
- "Accept-Language": self.fingerprint["accept_language"],
93
- "Content-Type": "application/json",
94
- "Cache-Control": "no-cache",
95
- "Connection": "keep-alive",
96
- "Origin": "https://deepinfra.com",
97
- "Pragma": "no-cache",
98
- "Referer": "https://deepinfra.com/",
99
- "Sec-Fetch-Dest": "empty",
100
- "Sec-Fetch-Mode": "cors",
101
- "Sec-Fetch-Site": "same-site",
102
- "X-Deepinfra-Source": "web-embed",
103
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
104
- "Sec-CH-UA-Mobile": "?0",
105
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
106
- "User-Agent": self.fingerprint["user_agent"],
107
- }
108
-
109
- self.session = requests.Session()
110
- self.session.headers.update(self.headers)
111
- self.session.proxies.update(proxies)
112
-
113
- self.is_conversation = is_conversation
114
- self.max_tokens_to_sample = max_tokens
115
- self.timeout = timeout
116
- self.last_response = {}
117
- self.model = model
118
-
119
- self.__available_optimizers = (
120
- method
121
- for method in dir(Optimizers)
122
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
123
- )
124
- Conversation.intro = (
125
- AwesomePrompts().get_act(
126
- act, raise_not_found=True, default=None, case_insensitive=True
127
- )
128
- if act
129
- else intro or Conversation.intro
130
- )
131
-
132
- self.conversation = Conversation(
133
- is_conversation, self.max_tokens_to_sample, filepath, update_file
134
- )
135
- self.conversation.history_offset = history_offset
136
-
137
- def refresh_identity(self, browser: str = None):
138
- """
139
- Refreshes the browser identity fingerprint.
140
-
141
- Args:
142
- browser: Specific browser to use for the new fingerprint
143
- """
144
- browser = browser or self.fingerprint.get("browser_type", "chrome")
145
- self.fingerprint = self.agent.generate_fingerprint(browser)
146
-
147
- # Update headers with new fingerprint
148
- self.headers.update({
149
- "Accept": self.fingerprint["accept"],
150
- "Accept-Language": self.fingerprint["accept_language"],
151
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
152
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
153
- "User-Agent": self.fingerprint["user_agent"],
154
- })
155
-
156
- # Update session headers
157
- for header, value in self.headers.items():
158
- self.session.headers[header] = value
159
-
160
- return self.fingerprint
161
-
162
- def ask(
163
- self,
164
- prompt: str,
165
- stream: bool = False,
166
- raw: bool = False,
167
- optimizer: str = None,
168
- conversationally: bool = False,
169
- ) -> Union[Dict[str, Any], Generator]:
170
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
171
- if optimizer:
172
- if optimizer in self.__available_optimizers:
173
- conversation_prompt = getattr(Optimizers, optimizer)(
174
- conversation_prompt if conversationally else prompt
175
- )
176
- else:
177
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
178
-
179
- # Payload construction
180
- payload = {
181
- "model": self.model,
182
- "messages": [
183
- {"role": "system", "content": "You are a helpful assistant."},
184
- {"role": "user", "content": conversation_prompt},
185
- ],
186
- "stream": stream
187
- }
188
-
189
- def for_stream():
190
- try:
191
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
192
- if response.status_code != 200:
193
- raise exceptions.FailedToGenerateResponseError(
194
- f"Request failed with status code {response.status_code}"
195
- )
196
-
197
- streaming_text = ""
198
- for line in response.iter_lines(decode_unicode=True):
199
- if line:
200
- line = line.strip()
201
- if line.startswith("data: "):
202
- json_str = line[6:]
203
- if json_str == "[DONE]":
204
- break
205
- try:
206
- json_data = json.loads(json_str)
207
- if 'choices' in json_data:
208
- choice = json_data['choices'][0]
209
- if 'delta' in choice and 'content' in choice['delta']:
210
- content = choice['delta']['content']
211
- streaming_text += content
212
- resp = dict(text=content)
213
- yield resp if raw else resp
214
- except json.JSONDecodeError:
215
- continue
216
-
217
- self.last_response = {"text": streaming_text}
218
- self.conversation.update_chat_history(prompt, streaming_text)
219
-
220
- except requests.RequestException as e:
221
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
222
-
223
- def for_non_stream():
224
- try:
225
- response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
226
- if response.status_code != 200:
227
- raise exceptions.FailedToGenerateResponseError(
228
- f"Request failed with status code {response.status_code}"
229
- )
230
-
231
- response_data = response.json()
232
- if 'choices' in response_data and len(response_data['choices']) > 0:
233
- content = response_data['choices'][0].get('message', {}).get('content', '')
234
- self.last_response = {"text": content}
235
- self.conversation.update_chat_history(prompt, content)
236
- return {"text": content}
237
- else:
238
- raise exceptions.FailedToGenerateResponseError("No response content found")
239
- except Exception as e:
240
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
241
-
242
- return for_stream() if stream else for_non_stream()
243
-
244
- def chat(
245
- self,
246
- prompt: str,
247
- stream: bool = False,
248
- optimizer: str = None,
249
- conversationally: bool = False,
250
- ) -> Union[str, Generator[str, None, None]]:
251
- def for_stream():
252
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
253
- yield self.get_message(response)
254
- def for_non_stream():
255
- return self.get_message(
256
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
257
- )
258
- return for_stream() if stream else for_non_stream()
259
-
260
- def get_message(self, response: dict) -> str:
261
- assert isinstance(response, dict), "Response should be of dict data-type only"
262
- return response["text"]
263
-
264
- if __name__ == "__main__":
265
- print("-" * 80)
266
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
267
- print("-" * 80)
268
-
269
- for model in DeepInfra.AVAILABLE_MODELS:
270
- try:
271
- test_ai = DeepInfra(model=model, timeout=60)
272
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
273
- response_text = ""
274
- for chunk in response:
275
- response_text += chunk
276
-
277
- if response_text and len(response_text.strip()) > 0:
278
- status = "✓"
279
- # Clean and truncate response
280
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
281
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
282
- else:
283
- status = ""
284
- display_text = "Empty or invalid response"
285
- print(f"\r{model:<50} {status:<10} {display_text}")
286
- except Exception as e:
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class DeepInfra(Provider):
14
+ """
15
+ A class to interact with the DeepInfra API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
20
+ "deepseek-ai/DeepSeek-R1",
21
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
22
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
23
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
24
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
25
+ "deepseek-ai/DeepSeek-R1-Turbo",
26
+ "deepseek-ai/DeepSeek-V3",
27
+ "google/gemma-2-27b-it",
28
+ "google/gemma-2-9b-it",
29
+ "google/gemma-3-27b-it",
30
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
31
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
32
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
33
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
34
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
35
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
36
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
37
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
38
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
39
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
40
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
41
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
42
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
43
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
44
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
45
+ "microsoft/phi-4",
46
+ "microsoft/Phi-4-multimodal-instruct",
47
+ "microsoft/WizardLM-2-8x22B",
48
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
49
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
50
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
51
+ "mistralai/Mistral-Small-24B-Instruct-2501",
52
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
53
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
54
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
55
+ "Qwen/QwQ-32B",
56
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
57
+ "Qwen/Qwen2.5-72B-Instruct",
58
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
59
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
60
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
61
+ "meta-llama/Llama-3.3-70B-Instruct",
62
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
63
+ ]
64
+
65
+ def __init__(
66
+ self,
67
+ is_conversation: bool = True,
68
+ max_tokens: int = 2049, # Set a reasonable default
69
+ timeout: int = 30,
70
+ intro: str = None,
71
+ filepath: str = None,
72
+ update_file: bool = True,
73
+ proxies: dict = {},
74
+ history_offset: int = 10250,
75
+ act: str = None,
76
+ model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
77
+ browser: str = "chrome"
78
+ ):
79
+ """Initializes the DeepInfra API client."""
80
+ if model not in self.AVAILABLE_MODELS:
81
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
82
+
83
+ self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
84
+
85
+ # Initialize LitAgent for user agent generation
86
+ self.agent = LitAgent()
87
+ # Use fingerprinting to create a consistent browser identity
88
+ self.fingerprint = self.agent.generate_fingerprint(browser)
89
+
90
+ # Use the fingerprint for headers
91
+ self.headers = {
92
+ "Accept": self.fingerprint["accept"],
93
+ "Accept-Encoding": "gzip, deflate, br, zstd",
94
+ "Accept-Language": self.fingerprint["accept_language"],
95
+ "Content-Type": "application/json",
96
+ "Cache-Control": "no-cache",
97
+ "Connection": "keep-alive",
98
+ "Origin": "https://deepinfra.com",
99
+ "Pragma": "no-cache",
100
+ "Referer": "https://deepinfra.com/",
101
+ "Sec-Fetch-Dest": "empty",
102
+ "Sec-Fetch-Mode": "cors",
103
+ "Sec-Fetch-Site": "same-site",
104
+ "X-Deepinfra-Source": "web-embed",
105
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
106
+ "Sec-CH-UA-Mobile": "?0",
107
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
108
+ "User-Agent": self.fingerprint["user_agent"],
109
+ }
110
+
111
+ self.session = requests.Session()
112
+ self.session.headers.update(self.headers)
113
+ self.session.proxies.update(proxies)
114
+
115
+ self.is_conversation = is_conversation
116
+ self.max_tokens_to_sample = max_tokens
117
+ self.timeout = timeout
118
+ self.last_response = {}
119
+ self.model = model
120
+
121
+ self.__available_optimizers = (
122
+ method
123
+ for method in dir(Optimizers)
124
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
125
+ )
126
+ Conversation.intro = (
127
+ AwesomePrompts().get_act(
128
+ act, raise_not_found=True, default=None, case_insensitive=True
129
+ )
130
+ if act
131
+ else intro or Conversation.intro
132
+ )
133
+
134
+ self.conversation = Conversation(
135
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
136
+ )
137
+ self.conversation.history_offset = history_offset
138
+
139
+ def refresh_identity(self, browser: str = None):
140
+ """
141
+ Refreshes the browser identity fingerprint.
142
+
143
+ Args:
144
+ browser: Specific browser to use for the new fingerprint
145
+ """
146
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
147
+ self.fingerprint = self.agent.generate_fingerprint(browser)
148
+
149
+ # Update headers with new fingerprint
150
+ self.headers.update({
151
+ "Accept": self.fingerprint["accept"],
152
+ "Accept-Language": self.fingerprint["accept_language"],
153
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
154
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
155
+ "User-Agent": self.fingerprint["user_agent"],
156
+ })
157
+
158
+ # Update session headers
159
+ for header, value in self.headers.items():
160
+ self.session.headers[header] = value
161
+
162
+ return self.fingerprint
163
+
164
+ def ask(
165
+ self,
166
+ prompt: str,
167
+ stream: bool = False,
168
+ raw: bool = False,
169
+ optimizer: str = None,
170
+ conversationally: bool = False,
171
+ ) -> Union[Dict[str, Any], Generator]:
172
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
173
+ if optimizer:
174
+ if optimizer in self.__available_optimizers:
175
+ conversation_prompt = getattr(Optimizers, optimizer)(
176
+ conversation_prompt if conversationally else prompt
177
+ )
178
+ else:
179
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
180
+
181
+ # Payload construction
182
+ payload = {
183
+ "model": self.model,
184
+ "messages": [
185
+ {"role": "system", "content": "You are a helpful assistant."},
186
+ {"role": "user", "content": conversation_prompt},
187
+ ],
188
+ "stream": stream
189
+ }
190
+
191
+ def for_stream():
192
+ try:
193
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
194
+ if response.status_code != 200:
195
+ raise exceptions.FailedToGenerateResponseError(
196
+ f"Request failed with status code {response.status_code}"
197
+ )
198
+
199
+ streaming_text = ""
200
+ for line in response.iter_lines(decode_unicode=True):
201
+ if line:
202
+ line = line.strip()
203
+ if line.startswith("data: "):
204
+ json_str = line[6:]
205
+ if json_str == "[DONE]":
206
+ break
207
+ try:
208
+ json_data = json.loads(json_str)
209
+ if 'choices' in json_data:
210
+ choice = json_data['choices'][0]
211
+ if 'delta' in choice and 'content' in choice['delta']:
212
+ content = choice['delta']['content']
213
+ streaming_text += content
214
+ resp = dict(text=content)
215
+ yield resp if raw else resp
216
+ except json.JSONDecodeError:
217
+ continue
218
+
219
+ self.last_response = {"text": streaming_text}
220
+ self.conversation.update_chat_history(prompt, streaming_text)
221
+
222
+ except requests.RequestException as e:
223
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
224
+
225
+ def for_non_stream():
226
+ try:
227
+ response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
228
+ if response.status_code != 200:
229
+ raise exceptions.FailedToGenerateResponseError(
230
+ f"Request failed with status code {response.status_code}"
231
+ )
232
+
233
+ response_data = response.json()
234
+ if 'choices' in response_data and len(response_data['choices']) > 0:
235
+ content = response_data['choices'][0].get('message', {}).get('content', '')
236
+ self.last_response = {"text": content}
237
+ self.conversation.update_chat_history(prompt, content)
238
+ return {"text": content}
239
+ else:
240
+ raise exceptions.FailedToGenerateResponseError("No response content found")
241
+ except Exception as e:
242
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
243
+
244
+ return for_stream() if stream else for_non_stream()
245
+
246
+ def chat(
247
+ self,
248
+ prompt: str,
249
+ stream: bool = False,
250
+ optimizer: str = None,
251
+ conversationally: bool = False,
252
+ ) -> Union[str, Generator[str, None, None]]:
253
+ def for_stream():
254
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
255
+ yield self.get_message(response)
256
+ def for_non_stream():
257
+ return self.get_message(
258
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
259
+ )
260
+ return for_stream() if stream else for_non_stream()
261
+
262
+ def get_message(self, response: dict) -> str:
263
+ assert isinstance(response, dict), "Response should be of dict data-type only"
264
+ return response["text"]
265
+
266
+ if __name__ == "__main__":
267
+ print("-" * 80)
268
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
269
+ print("-" * 80)
270
+
271
+ for model in DeepInfra.AVAILABLE_MODELS:
272
+ try:
273
+ test_ai = DeepInfra(model=model, timeout=60)
274
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
275
+ response_text = ""
276
+ for chunk in response:
277
+ response_text += chunk
278
+
279
+ if response_text and len(response_text.strip()) > 0:
280
+ status = "✓"
281
+ # Clean and truncate response
282
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
283
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
284
+ else:
285
+ status = "✗"
286
+ display_text = "Empty or invalid response"
287
+ print(f"\r{model:<50} {status:<10} {display_text}")
288
+ except Exception as e:
287
289
  print(f"\r{model:<50} {'✗':<10} {str(e)}")