webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (66) hide show
  1. webscout/Bard.py +5 -25
  2. webscout/DWEBS.py +476 -476
  3. webscout/Extra/GitToolkit/__init__.py +10 -0
  4. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  5. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  6. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  7. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  8. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  9. webscout/Extra/__init__.py +2 -0
  10. webscout/Extra/autocoder/__init__.py +1 -1
  11. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
  12. webscout/Extra/tempmail/__init__.py +26 -0
  13. webscout/Extra/tempmail/async_utils.py +141 -0
  14. webscout/Extra/tempmail/base.py +156 -0
  15. webscout/Extra/tempmail/cli.py +187 -0
  16. webscout/Extra/tempmail/mail_tm.py +361 -0
  17. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  18. webscout/Provider/AISEARCH/__init__.py +5 -1
  19. webscout/Provider/AISEARCH/hika_search.py +194 -0
  20. webscout/Provider/AISEARCH/monica_search.py +246 -0
  21. webscout/Provider/AISEARCH/scira_search.py +320 -0
  22. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  23. webscout/Provider/AllenAI.py +255 -122
  24. webscout/Provider/DeepSeek.py +1 -2
  25. webscout/Provider/Deepinfra.py +296 -286
  26. webscout/Provider/ElectronHub.py +709 -716
  27. webscout/Provider/ExaAI.py +261 -0
  28. webscout/Provider/ExaChat.py +28 -6
  29. webscout/Provider/Gemini.py +167 -165
  30. webscout/Provider/GithubChat.py +2 -1
  31. webscout/Provider/Groq.py +38 -24
  32. webscout/Provider/LambdaChat.py +2 -1
  33. webscout/Provider/Netwrck.py +3 -2
  34. webscout/Provider/OpenGPT.py +199 -0
  35. webscout/Provider/PI.py +39 -24
  36. webscout/Provider/TextPollinationsAI.py +232 -230
  37. webscout/Provider/Youchat.py +326 -296
  38. webscout/Provider/__init__.py +10 -4
  39. webscout/Provider/ai4chat.py +58 -56
  40. webscout/Provider/akashgpt.py +34 -22
  41. webscout/Provider/copilot.py +427 -427
  42. webscout/Provider/freeaichat.py +9 -2
  43. webscout/Provider/labyrinth.py +121 -20
  44. webscout/Provider/llmchatco.py +306 -0
  45. webscout/Provider/scira_chat.py +271 -0
  46. webscout/Provider/typefully.py +280 -0
  47. webscout/Provider/uncovr.py +312 -299
  48. webscout/Provider/yep.py +64 -12
  49. webscout/__init__.py +38 -36
  50. webscout/cli.py +293 -293
  51. webscout/conversation.py +350 -17
  52. webscout/litprinter/__init__.py +59 -667
  53. webscout/optimizers.py +419 -419
  54. webscout/update_checker.py +14 -12
  55. webscout/version.py +1 -1
  56. webscout/webscout_search.py +1346 -1282
  57. webscout/webscout_search_async.py +877 -813
  58. {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
  59. {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
  60. webscout/Provider/DARKAI.py +0 -225
  61. webscout/Provider/EDITEE.py +0 -192
  62. webscout/litprinter/colors.py +0 -54
  63. {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
  64. {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
  65. {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
  66. {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
@@ -1,287 +1,297 @@
1
- import requests
2
- import json
3
- import os
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class DeepInfra(Provider):
14
- """
15
- A class to interact with the DeepInfra API with LitAgent user-agent.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
20
- "deepseek-ai/DeepSeek-R1",
21
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
22
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
23
- "deepseek-ai/DeepSeek-R1-Turbo",
24
- "deepseek-ai/DeepSeek-V3",
25
- # "google/gemma-2-27b-it", # >>>> NOT WORKING
26
- # "google/gemma-2-9b-it", # >>>> NOT WORKING
27
- "google/gemma-3-27b-it",
28
- # "google/gemini-1.5-flash", # >>>> NOT WORKING
29
- # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
30
- # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
31
- # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
32
- # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
33
- # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
34
- "meta-llama/Llama-3.2-90B-Vision-Instruct",
35
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
36
- # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
37
- # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
38
- # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
39
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
40
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
41
- "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
42
- # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
43
- "microsoft/phi-4",
44
- "microsoft/Phi-4-multimodal-instruct",
45
- "microsoft/WizardLM-2-8x22B",
46
- # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
47
- # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
48
- # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
49
- "mistralai/Mistral-Small-24B-Instruct-2501",
50
- "nvidia/Llama-3.1-Nemotron-70B-Instruct",
51
- # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
52
- # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
53
- "Qwen/QwQ-32B",
54
- # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
55
- "Qwen/Qwen2.5-72B-Instruct",
56
- "Qwen/Qwen2.5-Coder-32B-Instruct",
57
- # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
58
- # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
59
- "meta-llama/Llama-3.3-70B-Instruct",
60
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
61
- ]
62
-
63
- def __init__(
64
- self,
65
- is_conversation: bool = True,
66
- max_tokens: int = 2049, # Set a reasonable default
67
- timeout: int = 30,
68
- intro: str = None,
69
- filepath: str = None,
70
- update_file: bool = True,
71
- proxies: dict = {},
72
- history_offset: int = 10250,
73
- act: str = None,
74
- model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
75
- browser: str = "chrome"
76
- ):
77
- """Initializes the DeepInfra API client."""
78
- if model not in self.AVAILABLE_MODELS:
79
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
80
-
81
- self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
82
-
83
- # Initialize LitAgent for user agent generation
84
- self.agent = LitAgent()
85
- # Use fingerprinting to create a consistent browser identity
86
- self.fingerprint = self.agent.generate_fingerprint(browser)
87
-
88
- # Use the fingerprint for headers
89
- self.headers = {
90
- "Accept": self.fingerprint["accept"],
91
- "Accept-Encoding": "gzip, deflate, br, zstd",
92
- "Accept-Language": self.fingerprint["accept_language"],
93
- "Content-Type": "application/json",
94
- "Cache-Control": "no-cache",
95
- "Connection": "keep-alive",
96
- "Origin": "https://deepinfra.com",
97
- "Pragma": "no-cache",
98
- "Referer": "https://deepinfra.com/",
99
- "Sec-Fetch-Dest": "empty",
100
- "Sec-Fetch-Mode": "cors",
101
- "Sec-Fetch-Site": "same-site",
102
- "X-Deepinfra-Source": "web-embed",
103
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
104
- "Sec-CH-UA-Mobile": "?0",
105
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
106
- "User-Agent": self.fingerprint["user_agent"],
107
- }
108
-
109
- self.session = requests.Session()
110
- self.session.headers.update(self.headers)
111
- self.session.proxies.update(proxies)
112
-
113
- self.is_conversation = is_conversation
114
- self.max_tokens_to_sample = max_tokens
115
- self.timeout = timeout
116
- self.last_response = {}
117
- self.model = model
118
-
119
- self.__available_optimizers = (
120
- method
121
- for method in dir(Optimizers)
122
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
123
- )
124
- Conversation.intro = (
125
- AwesomePrompts().get_act(
126
- act, raise_not_found=True, default=None, case_insensitive=True
127
- )
128
- if act
129
- else intro or Conversation.intro
130
- )
131
-
132
- self.conversation = Conversation(
133
- is_conversation, self.max_tokens_to_sample, filepath, update_file
134
- )
135
- self.conversation.history_offset = history_offset
136
-
137
- def refresh_identity(self, browser: str = None):
138
- """
139
- Refreshes the browser identity fingerprint.
140
-
141
- Args:
142
- browser: Specific browser to use for the new fingerprint
143
- """
144
- browser = browser or self.fingerprint.get("browser_type", "chrome")
145
- self.fingerprint = self.agent.generate_fingerprint(browser)
146
-
147
- # Update headers with new fingerprint
148
- self.headers.update({
149
- "Accept": self.fingerprint["accept"],
150
- "Accept-Language": self.fingerprint["accept_language"],
151
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
152
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
153
- "User-Agent": self.fingerprint["user_agent"],
154
- })
155
-
156
- # Update session headers
157
- for header, value in self.headers.items():
158
- self.session.headers[header] = value
159
-
160
- return self.fingerprint
161
-
162
- def ask(
163
- self,
164
- prompt: str,
165
- stream: bool = False,
166
- raw: bool = False,
167
- optimizer: str = None,
168
- conversationally: bool = False,
169
- ) -> Union[Dict[str, Any], Generator]:
170
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
171
- if optimizer:
172
- if optimizer in self.__available_optimizers:
173
- conversation_prompt = getattr(Optimizers, optimizer)(
174
- conversation_prompt if conversationally else prompt
175
- )
176
- else:
177
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
178
-
179
- # Payload construction
180
- payload = {
181
- "model": self.model,
182
- "messages": [
183
- {"role": "system", "content": "You are a helpful assistant."},
184
- {"role": "user", "content": conversation_prompt},
185
- ],
186
- "stream": stream
187
- }
188
-
189
- def for_stream():
190
- try:
191
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
192
- if response.status_code != 200:
193
- raise exceptions.FailedToGenerateResponseError(
194
- f"Request failed with status code {response.status_code}"
195
- )
196
-
197
- streaming_text = ""
198
- for line in response.iter_lines(decode_unicode=True):
199
- if line:
200
- line = line.strip()
201
- if line.startswith("data: "):
202
- json_str = line[6:]
203
- if json_str == "[DONE]":
204
- break
205
- try:
206
- json_data = json.loads(json_str)
207
- if 'choices' in json_data:
208
- choice = json_data['choices'][0]
209
- if 'delta' in choice and 'content' in choice['delta']:
210
- content = choice['delta']['content']
211
- streaming_text += content
212
- resp = dict(text=content)
213
- yield resp if raw else resp
214
- except json.JSONDecodeError:
215
- continue
216
-
217
- self.last_response = {"text": streaming_text}
218
- self.conversation.update_chat_history(prompt, streaming_text)
219
-
220
- except requests.RequestException as e:
221
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
222
-
223
- def for_non_stream():
224
- try:
225
- response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
226
- if response.status_code != 200:
227
- raise exceptions.FailedToGenerateResponseError(
228
- f"Request failed with status code {response.status_code}"
229
- )
230
-
231
- response_data = response.json()
232
- if 'choices' in response_data and len(response_data['choices']) > 0:
233
- content = response_data['choices'][0].get('message', {}).get('content', '')
234
- self.last_response = {"text": content}
235
- self.conversation.update_chat_history(prompt, content)
236
- return {"text": content}
237
- else:
238
- raise exceptions.FailedToGenerateResponseError("No response content found")
239
- except Exception as e:
240
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
241
-
242
- return for_stream() if stream else for_non_stream()
243
-
244
- def chat(
245
- self,
246
- prompt: str,
247
- stream: bool = False,
248
- optimizer: str = None,
249
- conversationally: bool = False,
250
- ) -> Union[str, Generator[str, None, None]]:
251
- def for_stream():
252
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
253
- yield self.get_message(response)
254
- def for_non_stream():
255
- return self.get_message(
256
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
257
- )
258
- return for_stream() if stream else for_non_stream()
259
-
260
- def get_message(self, response: dict) -> str:
261
- assert isinstance(response, dict), "Response should be of dict data-type only"
262
- return response["text"]
263
-
264
- if __name__ == "__main__":
265
- print("-" * 80)
266
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
267
- print("-" * 80)
268
-
269
- for model in DeepInfra.AVAILABLE_MODELS:
270
- try:
271
- test_ai = DeepInfra(model=model, timeout=60)
272
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
273
- response_text = ""
274
- for chunk in response:
275
- response_text += chunk
276
-
277
- if response_text and len(response_text.strip()) > 0:
278
- status = "✓"
279
- # Clean and truncate response
280
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
281
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
282
- else:
283
- status = ""
284
- display_text = "Empty or invalid response"
285
- print(f"\r{model:<50} {status:<10} {display_text}")
286
- except Exception as e:
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class DeepInfra(Provider):
14
+ """
15
+ A class to interact with the DeepInfra API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
20
+
21
+ "deepseek-ai/DeepSeek-R1",
22
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
23
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
24
+ "deepseek-ai/DeepSeek-R1-Turbo",
25
+ "deepseek-ai/DeepSeek-V3",
26
+
27
+ "google/gemma-2-27b-it",
28
+ "google/gemma-2-9b-it",
29
+ "google/gemma-3-27b-it",
30
+ "google/gemma-3-12b-it",
31
+ "google/gemma-3-4b-it",
32
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
33
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
34
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
35
+
36
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
37
+
38
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
39
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
40
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
41
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
42
+ # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
43
+ # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
44
+ "meta-llama/Llama-3.3-70B-Instruct",
45
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
46
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
47
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
48
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
49
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
50
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
51
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
52
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
53
+
54
+ "microsoft/phi-4",
55
+ "microsoft/Phi-4-multimodal-instruct",
56
+ "microsoft/WizardLM-2-8x22B",
57
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
58
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
59
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
60
+ "mistralai/Mistral-Small-24B-Instruct-2501",
61
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
62
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
63
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
64
+ "Qwen/QwQ-32B",
65
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
66
+ "Qwen/Qwen2.5-72B-Instruct",
67
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
68
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
69
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
70
+ ]
71
+
72
+ def __init__(
73
+ self,
74
+ is_conversation: bool = True,
75
+ max_tokens: int = 2049, # Set a reasonable default
76
+ timeout: int = 30,
77
+ intro: str = None,
78
+ filepath: str = None,
79
+ update_file: bool = True,
80
+ proxies: dict = {},
81
+ history_offset: int = 10250,
82
+ act: str = None,
83
+ model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
84
+ system_prompt: str = "You are a helpful assistant.",
85
+ browser: str = "chrome"
86
+ ):
87
+ """Initializes the DeepInfra API client."""
88
+ if model not in self.AVAILABLE_MODELS:
89
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
90
+
91
+ self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
92
+
93
+ # Initialize LitAgent for user agent generation
94
+ self.agent = LitAgent()
95
+ # Use fingerprinting to create a consistent browser identity
96
+ self.fingerprint = self.agent.generate_fingerprint(browser)
97
+
98
+ # Use the fingerprint for headers
99
+ self.headers = {
100
+ "Accept": self.fingerprint["accept"],
101
+ "Accept-Encoding": "gzip, deflate, br, zstd",
102
+ "Accept-Language": self.fingerprint["accept_language"],
103
+ "Content-Type": "application/json",
104
+ "Cache-Control": "no-cache",
105
+ "Connection": "keep-alive",
106
+ "Origin": "https://deepinfra.com",
107
+ "Pragma": "no-cache",
108
+ "Referer": "https://deepinfra.com/",
109
+ "Sec-Fetch-Dest": "empty",
110
+ "Sec-Fetch-Mode": "cors",
111
+ "Sec-Fetch-Site": "same-site",
112
+ "X-Deepinfra-Source": "web-embed",
113
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
114
+ "Sec-CH-UA-Mobile": "?0",
115
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
116
+ "User-Agent": self.fingerprint["user_agent"],
117
+ }
118
+
119
+ self.session = requests.Session()
120
+ self.session.headers.update(self.headers)
121
+ self.session.proxies.update(proxies)
122
+ self.system_prompt = system_prompt
123
+ self.is_conversation = is_conversation
124
+ self.max_tokens_to_sample = max_tokens
125
+ self.timeout = timeout
126
+ self.last_response = {}
127
+ self.model = model
128
+
129
+ self.__available_optimizers = (
130
+ method
131
+ for method in dir(Optimizers)
132
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
133
+ )
134
+ Conversation.intro = (
135
+ AwesomePrompts().get_act(
136
+ act, raise_not_found=True, default=None, case_insensitive=True
137
+ )
138
+ if act
139
+ else intro or Conversation.intro
140
+ )
141
+
142
+ self.conversation = Conversation(
143
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
144
+ )
145
+ self.conversation.history_offset = history_offset
146
+
147
+ def refresh_identity(self, browser: str = None):
148
+ """
149
+ Refreshes the browser identity fingerprint.
150
+
151
+ Args:
152
+ browser: Specific browser to use for the new fingerprint
153
+ """
154
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
155
+ self.fingerprint = self.agent.generate_fingerprint(browser)
156
+
157
+ # Update headers with new fingerprint
158
+ self.headers.update({
159
+ "Accept": self.fingerprint["accept"],
160
+ "Accept-Language": self.fingerprint["accept_language"],
161
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
162
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
163
+ "User-Agent": self.fingerprint["user_agent"],
164
+ })
165
+
166
+ # Update session headers
167
+ for header, value in self.headers.items():
168
+ self.session.headers[header] = value
169
+
170
+ return self.fingerprint
171
+
172
+ def ask(
173
+ self,
174
+ prompt: str,
175
+ stream: bool = False,
176
+ raw: bool = False,
177
+ optimizer: str = None,
178
+ conversationally: bool = False,
179
+ ) -> Union[Dict[str, Any], Generator]:
180
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
181
+ if optimizer:
182
+ if optimizer in self.__available_optimizers:
183
+ conversation_prompt = getattr(Optimizers, optimizer)(
184
+ conversation_prompt if conversationally else prompt
185
+ )
186
+ else:
187
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
188
+
189
+ # Payload construction
190
+ payload = {
191
+ "model": self.model,
192
+ "messages": [
193
+ {"role": "system", "content": self.system_prompt},
194
+ {"role": "user", "content": conversation_prompt},
195
+ ],
196
+ "stream": stream
197
+ }
198
+
199
+ def for_stream():
200
+ try:
201
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
202
+ if response.status_code != 200:
203
+ raise exceptions.FailedToGenerateResponseError(
204
+ f"Request failed with status code {response.status_code}"
205
+ )
206
+
207
+ streaming_text = ""
208
+ for line in response.iter_lines(decode_unicode=True):
209
+ if line:
210
+ line = line.strip()
211
+ if line.startswith("data: "):
212
+ json_str = line[6:]
213
+ if json_str == "[DONE]":
214
+ break
215
+ try:
216
+ json_data = json.loads(json_str)
217
+ if 'choices' in json_data:
218
+ choice = json_data['choices'][0]
219
+ if 'delta' in choice and 'content' in choice['delta']:
220
+ content = choice['delta']['content']
221
+ streaming_text += content
222
+ resp = dict(text=content)
223
+ yield resp if raw else resp
224
+ except json.JSONDecodeError:
225
+ continue
226
+
227
+ self.last_response = {"text": streaming_text}
228
+ self.conversation.update_chat_history(prompt, streaming_text)
229
+
230
+ except requests.RequestException as e:
231
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
232
+
233
+ def for_non_stream():
234
+ try:
235
+ response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
236
+ if response.status_code != 200:
237
+ raise exceptions.FailedToGenerateResponseError(
238
+ f"Request failed with status code {response.status_code}"
239
+ )
240
+
241
+ response_data = response.json()
242
+ if 'choices' in response_data and len(response_data['choices']) > 0:
243
+ content = response_data['choices'][0].get('message', {}).get('content', '')
244
+ self.last_response = {"text": content}
245
+ self.conversation.update_chat_history(prompt, content)
246
+ return {"text": content}
247
+ else:
248
+ raise exceptions.FailedToGenerateResponseError("No response content found")
249
+ except Exception as e:
250
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
251
+
252
+ return for_stream() if stream else for_non_stream()
253
+
254
+ def chat(
255
+ self,
256
+ prompt: str,
257
+ stream: bool = False,
258
+ optimizer: str = None,
259
+ conversationally: bool = False,
260
+ ) -> Union[str, Generator[str, None, None]]:
261
+ def for_stream():
262
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
263
+ yield self.get_message(response)
264
+ def for_non_stream():
265
+ return self.get_message(
266
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
267
+ )
268
+ return for_stream() if stream else for_non_stream()
269
+
270
+ def get_message(self, response: dict) -> str:
271
+ assert isinstance(response, dict), "Response should be of dict data-type only"
272
+ return response["text"]
273
+
274
+ if __name__ == "__main__":
275
+ print("-" * 80)
276
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
277
+ print("-" * 80)
278
+
279
+ for model in DeepInfra.AVAILABLE_MODELS:
280
+ try:
281
+ test_ai = DeepInfra(model=model, timeout=60)
282
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
283
+ response_text = ""
284
+ for chunk in response:
285
+ response_text += chunk
286
+
287
+ if response_text and len(response_text.strip()) > 0:
288
+ status = "✓"
289
+ # Clean and truncate response
290
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
291
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
292
+ else:
293
+ status = "✗"
294
+ display_text = "Empty or invalid response"
295
+ print(f"\r{model:<50} {status:<10} {display_text}")
296
+ except Exception as e:
287
297
  print(f"\r{model:<50} {'✗':<10} {str(e)}")