webscout 7.5__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (118) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +29 -11
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/DeepSeek.py +25 -17
  24. webscout/Provider/Deepinfra.py +115 -48
  25. webscout/Provider/Gemini.py +1 -1
  26. webscout/Provider/Glider.py +25 -8
  27. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  28. webscout/Provider/HeckAI.py +23 -7
  29. webscout/Provider/Jadve.py +20 -5
  30. webscout/Provider/Netwrck.py +42 -19
  31. webscout/Provider/PI.py +4 -2
  32. webscout/Provider/Perplexitylabs.py +26 -6
  33. webscout/Provider/PizzaGPT.py +10 -51
  34. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  35. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  36. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  37. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  38. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  39. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  40. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  41. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  42. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  43. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  44. webscout/Provider/TTI/__init__.py +2 -3
  45. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  46. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  47. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  48. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  49. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  50. webscout/Provider/TTS/__init__.py +2 -2
  51. webscout/Provider/TTS/deepgram.py +12 -39
  52. webscout/Provider/TTS/elevenlabs.py +14 -40
  53. webscout/Provider/TTS/gesserit.py +11 -35
  54. webscout/Provider/TTS/murfai.py +13 -39
  55. webscout/Provider/TTS/parler.py +17 -40
  56. webscout/Provider/TTS/speechma.py +180 -0
  57. webscout/Provider/TTS/streamElements.py +17 -44
  58. webscout/Provider/TextPollinationsAI.py +39 -59
  59. webscout/Provider/Venice.py +25 -8
  60. webscout/Provider/WiseCat.py +27 -5
  61. webscout/Provider/Youchat.py +64 -37
  62. webscout/Provider/__init__.py +0 -6
  63. webscout/Provider/akashgpt.py +20 -5
  64. webscout/Provider/flowith.py +20 -5
  65. webscout/Provider/freeaichat.py +32 -45
  66. webscout/Provider/koala.py +20 -5
  67. webscout/Provider/llamatutor.py +1 -1
  68. webscout/Provider/llmchat.py +30 -8
  69. webscout/Provider/multichat.py +65 -9
  70. webscout/Provider/talkai.py +1 -0
  71. webscout/Provider/turboseek.py +3 -0
  72. webscout/Provider/tutorai.py +2 -0
  73. webscout/Provider/typegpt.py +154 -64
  74. webscout/Provider/x0gpt.py +3 -1
  75. webscout/Provider/yep.py +102 -20
  76. webscout/__init__.py +3 -0
  77. webscout/cli.py +4 -40
  78. webscout/conversation.py +1 -10
  79. webscout/litagent/__init__.py +2 -2
  80. webscout/litagent/agent.py +351 -20
  81. webscout/litagent/constants.py +34 -5
  82. webscout/litprinter/__init__.py +0 -3
  83. webscout/models.py +181 -0
  84. webscout/optimizers.py +1 -1
  85. webscout/prompt_manager.py +2 -8
  86. webscout/scout/core/scout.py +1 -4
  87. webscout/scout/core/search_result.py +1 -1
  88. webscout/scout/core/text_utils.py +1 -1
  89. webscout/scout/core.py +2 -5
  90. webscout/scout/element.py +1 -1
  91. webscout/scout/parsers/html_parser.py +1 -1
  92. webscout/scout/utils.py +0 -1
  93. webscout/swiftcli/__init__.py +1 -3
  94. webscout/tempid.py +1 -1
  95. webscout/update_checker.py +1 -3
  96. webscout/version.py +1 -1
  97. webscout/webscout_search_async.py +1 -2
  98. webscout/yep_search.py +297 -297
  99. {webscout-7.5.dist-info → webscout-7.6.dist-info}/LICENSE.md +4 -4
  100. {webscout-7.5.dist-info → webscout-7.6.dist-info}/METADATA +101 -390
  101. {webscout-7.5.dist-info → webscout-7.6.dist-info}/RECORD +104 -110
  102. webscout/Extra/autollama.py +0 -231
  103. webscout/Provider/Amigo.py +0 -274
  104. webscout/Provider/Bing.py +0 -243
  105. webscout/Provider/DiscordRocks.py +0 -253
  106. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  107. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  108. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  109. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  110. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  111. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  112. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  113. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  114. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  115. webscout/Provider/TTS/voicepod.py +0 -117
  116. {webscout-7.5.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  117. {webscout-7.5.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  118. {webscout-7.5.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -5,13 +5,12 @@ import json
5
5
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
- from webscout.Litlogger import Logger, LogFormat
9
8
  from webscout import LitAgent as Lit
10
9
 
11
10
 
12
11
  class ChatGPTGratis(Provider):
13
12
  """
14
- A class to interact with the chatgptgratis.eu backend API with logging and real-time streaming.
13
+ A class to interact with the chatgptgratis.eu backend API with real-time streaming.
15
14
  """
16
15
  AVAILABLE_MODELS = [
17
16
  "Meta-Llama-3.2-1B-Instruct",
@@ -20,14 +19,12 @@ class ChatGPTGratis(Provider):
20
19
  "Meta-Llama-3.1-70B-Instruct",
21
20
  "Meta-Llama-3.1-405B-Instruct",
22
21
  "gpt4o"
23
-
24
22
  ]
25
23
 
26
24
  def __init__(
27
25
  self,
28
- model: str = "gpt4o",
26
+ model: str = "Meta-Llama-3.2-1B-Instruct",
29
27
  timeout: int = 30,
30
- logging: bool = False,
31
28
  proxies: Optional[Dict[str, str]] = None,
32
29
  intro: Optional[str] = None,
33
30
  filepath: Optional[str] = None,
@@ -41,14 +38,6 @@ class ChatGPTGratis(Provider):
41
38
  if model not in self.AVAILABLE_MODELS:
42
39
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
40
 
44
- self.logger = Logger(
45
- name="ChatGPTGratis",
46
- format=LogFormat.MODERN_EMOJI,
47
- ) if logging else None
48
-
49
- if self.logger:
50
- self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
51
-
52
41
  self.session = requests.Session()
53
42
  self.timeout = timeout
54
43
  self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
@@ -78,9 +67,6 @@ class ChatGPTGratis(Provider):
78
67
  )
79
68
  self.conversation.history_offset = history_offset
80
69
 
81
- if self.logger:
82
- self.logger.info("ChatGPTGratis initialized successfully.")
83
-
84
70
  def ask(
85
71
  self,
86
72
  prompt: str,
@@ -93,10 +79,6 @@ class ChatGPTGratis(Provider):
93
79
  Sends a request to the API and returns the response.
94
80
  If stream is True, yields response chunks as they are received.
95
81
  """
96
- if self.logger:
97
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
98
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
99
-
100
82
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
83
  if optimizer:
102
84
  available_opts = (
@@ -107,22 +89,15 @@ class ChatGPTGratis(Provider):
107
89
  conversation_prompt = getattr(Optimizers, optimizer)(
108
90
  conversation_prompt if conversationally else prompt
109
91
  )
110
- if self.logger:
111
- self.logger.debug(f"Applied optimizer: {optimizer}")
112
92
  else:
113
- if self.logger:
114
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
115
93
  raise Exception(f"Optimizer is not one of {list(available_opts)}")
116
94
 
117
95
  payload = {
118
96
  "message": conversation_prompt,
119
97
  "model": self.model,
120
-
121
98
  }
122
99
 
123
100
  def for_stream() -> Generator[Dict[str, Any], None, None]:
124
- if self.logger:
125
- self.logger.debug("Initiating streaming request to API")
126
101
  response = self.session.post(
127
102
  self.api_endpoint,
128
103
  json=payload,
@@ -130,23 +105,15 @@ class ChatGPTGratis(Provider):
130
105
  timeout=self.timeout
131
106
  )
132
107
  if not response.ok:
133
- if self.logger:
134
- self.logger.error(
135
- f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
136
- )
137
108
  raise exceptions.FailedToGenerateResponseError(
138
109
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
139
110
  )
140
- if self.logger:
141
- self.logger.info(f"API connection established. Status: {response.status_code}")
142
111
 
143
112
  full_response = ""
144
113
  for line in response.iter_lines():
145
114
  if line:
146
115
  line_decoded = line.decode('utf-8').strip()
147
116
  if line_decoded == "data: [DONE]":
148
- if self.logger:
149
- self.logger.debug("Stream completed.")
150
117
  break
151
118
  if line_decoded.startswith("data: "):
152
119
  try:
@@ -158,18 +125,12 @@ class ChatGPTGratis(Provider):
158
125
  content = ""
159
126
  full_response += content
160
127
  yield content if raw else {"text": content}
161
- except json.JSONDecodeError as e:
162
- if self.logger:
163
- self.logger.error(f"JSON parsing error: {str(e)}")
128
+ except json.JSONDecodeError:
164
129
  continue
165
130
  # Update last response and conversation history.
166
131
  self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
167
- if self.logger:
168
- self.logger.debug("Response processing completed.")
169
132
 
170
133
  def for_non_stream() -> Dict[str, Any]:
171
- if self.logger:
172
- self.logger.debug("Processing non-streaming request")
173
134
  collected = ""
174
135
  for chunk in for_stream():
175
136
  collected += chunk["text"] if isinstance(chunk, dict) else chunk
@@ -188,9 +149,6 @@ class ChatGPTGratis(Provider):
188
149
  Returns the response as a string.
189
150
  For streaming requests, yields each response chunk as a string.
190
151
  """
191
- if self.logger:
192
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
193
-
194
152
  def stream_response() -> Generator[str, None, None]:
195
153
  for response in self.ask(
196
154
  prompt, stream=True, optimizer=optimizer, conversationally=conversationally
@@ -213,14 +171,24 @@ class ChatGPTGratis(Provider):
213
171
 
214
172
 
215
173
  if __name__ == "__main__":
216
- from rich import print
217
-
218
- # Create an instance of the ChatGPTGratis with logging enabled for testing.
219
- client = ChatGPTGratis(
220
- model="Meta-Llama-3.2-1B-Instruct",
221
- logging=False
222
- )
223
- prompt_input = input(">>> ")
224
- response = client.chat(prompt_input, stream=True)
225
- for chunk in response:
226
- print(chunk, end="", flush=True)
174
+ print("-" * 80)
175
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
176
+ print("-" * 80)
177
+
178
+ for model in ChatGPTGratis.AVAILABLE_MODELS:
179
+ try:
180
+ test_ai = ChatGPTGratis(model=model, timeout=60)
181
+ response = test_ai.chat("Say 'Hello' in one word")
182
+ response_text = response
183
+
184
+ if response_text and len(response_text.strip()) > 0:
185
+ status = "✓"
186
+ # Clean and truncate response
187
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
188
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
189
+ else:
190
+ status = "✗"
191
+ display_text = "Empty or invalid response"
192
+ print(f"{model:<50} {status:<10} {display_text}")
193
+ except Exception as e:
194
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -13,11 +13,11 @@ class DeepSeek(Provider):
13
13
  A class to interact with the DeepSeek AI API.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = {
17
- "deepseek-v3": "deepseek-v3",
18
- "deepseek-r1": "deepseek-r1",
19
- "deepseek-llm-67b-chat": "deepseek-llm-67b-chat"
20
- }
16
+ AVAILABLE_MODELS = [
17
+ "deepseek-v3",
18
+ "deepseek-r1",
19
+ "deepseek-llm-67b-chat"
20
+ ]
21
21
 
22
22
  def __init__(
23
23
  self,
@@ -175,15 +175,23 @@ class DeepSeek(Provider):
175
175
  return response["text"]
176
176
 
177
177
  if __name__ == "__main__":
178
- from rich import print
179
-
180
- # Example usage
181
- ai = DeepSeek(system_prompt="You are an expert AI assistant.")
182
-
183
- try:
184
- # Send a prompt and stream the response
185
- response = ai.chat("Write me a short poem about AI.", stream=True)
186
- for chunk in response:
187
- print(chunk, end="", flush=True)
188
- except Exception as e:
189
- print(f"Error: {e}")
178
+ print("-" * 80)
179
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
180
+ print("-" * 80)
181
+
182
+ for model in DeepSeek.AVAILABLE_MODELS:
183
+ try:
184
+ test_ai = DeepSeek(model=model, timeout=60)
185
+ response = test_ai.chat("Say 'Hello' in one word")
186
+ response_text = response
187
+
188
+ if response_text and len(response_text.strip()) > 0:
189
+ status = "✓"
190
+ # Truncate response if too long
191
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
192
+ else:
193
+ status = "✗"
194
+ display_text = "Empty or invalid response"
195
+ print(f"{model:<50} {status:<10} {display_text}")
196
+ except Exception as e:
197
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -16,46 +16,46 @@ class DeepInfra(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "anthropic/claude-3-7-sonnet-latest",
19
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
20
20
  "deepseek-ai/DeepSeek-R1",
21
21
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
22
22
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
23
23
  "deepseek-ai/DeepSeek-R1-Turbo",
24
24
  "deepseek-ai/DeepSeek-V3",
25
- "google/gemma-2-27b-it",
26
- "google/gemma-2-9b-it",
25
+ # "google/gemma-2-27b-it", # >>>> NOT WORKING
26
+ # "google/gemma-2-9b-it", # >>>> NOT WORKING
27
27
  "google/gemma-3-27b-it",
28
- "google/gemini-1.5-flash",
29
- "google/gemini-1.5-flash-8b",
30
- "google/gemini-2.0-flash-001",
31
- "Gryphe/MythoMax-L2-13b",
32
- "meta-llama/Llama-3.2-1B-Instruct",
33
- "meta-llama/Llama-3.2-3B-Instruct",
28
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
29
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
30
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
31
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
32
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
33
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
34
34
  "meta-llama/Llama-3.2-90B-Vision-Instruct",
35
35
  "meta-llama/Llama-3.2-11B-Vision-Instruct",
36
- "meta-llama/Meta-Llama-3-70B-Instruct",
37
- "meta-llama/Meta-Llama-3-8B-Instruct",
38
- "meta-llama/Meta-Llama-3.1-70B-Instruct",
36
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
37
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
38
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
39
39
  "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
40
40
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
41
41
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
42
- "meta-llama/Meta-Llama-3.1-405B-Instruct",
42
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
43
43
  "microsoft/phi-4",
44
44
  "microsoft/Phi-4-multimodal-instruct",
45
45
  "microsoft/WizardLM-2-8x22B",
46
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
47
- "mistralai/Mistral-7B-Instruct-v0.3",
48
- "mistralai/Mistral-Nemo-Instruct-2407",
46
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
47
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
48
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
49
49
  "mistralai/Mistral-Small-24B-Instruct-2501",
50
50
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
51
- "NousResearch/Hermes-3-Llama-3.1-405B",
52
- "NovaSky-AI/Sky-T1-32B-Preview",
51
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
52
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
53
53
  "Qwen/QwQ-32B",
54
- "Qwen/Qwen2.5-7B-Instruct",
54
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
55
55
  "Qwen/Qwen2.5-72B-Instruct",
56
56
  "Qwen/Qwen2.5-Coder-32B-Instruct",
57
- "Sao10K/L3.1-70B-Euryale-v2.2",
58
- "Sao10K/L3.3-70B-Euryale-v2.3",
57
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
58
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
59
59
  "meta-llama/Llama-3.3-70B-Instruct",
60
60
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
61
61
  ]
@@ -71,32 +71,41 @@ class DeepInfra(Provider):
71
71
  proxies: dict = {},
72
72
  history_offset: int = 10250,
73
73
  act: str = None,
74
- model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo" # Updated default model
74
+ model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
75
+ browser: str = "chrome"
75
76
  ):
76
77
  """Initializes the DeepInfra API client."""
77
78
  if model not in self.AVAILABLE_MODELS:
78
79
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
79
80
 
80
81
  self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
81
- # Use LitAgent for user-agent instead of hardcoded string.
82
+
83
+ # Initialize LitAgent for user agent generation
84
+ self.agent = LitAgent()
85
+ # Use fingerprinting to create a consistent browser identity
86
+ self.fingerprint = self.agent.generate_fingerprint(browser)
87
+
88
+ # Use the fingerprint for headers
82
89
  self.headers = {
83
- 'User-Agent': LitAgent().random(),
84
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
85
- 'Cache-Control': 'no-cache',
86
- 'Connection': 'keep-alive',
87
- 'Content-Type': 'application/json',
88
- 'Origin': 'https://deepinfra.com',
89
- 'Pragma': 'no-cache',
90
- 'Referer': 'https://deepinfra.com/',
91
- 'Sec-Fetch-Dest': 'empty',
92
- 'Sec-Fetch-Mode': 'cors',
93
- 'Sec-Fetch-Site': 'same-site',
94
- 'X-Deepinfra-Source': 'web-embed',
95
- 'accept': 'text/event-stream',
96
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
97
- 'sec-ch-ua-mobile': '?0',
98
- 'sec-ch-ua-platform': '"macOS"'
90
+ "Accept": self.fingerprint["accept"],
91
+ "Accept-Encoding": "gzip, deflate, br, zstd",
92
+ "Accept-Language": self.fingerprint["accept_language"],
93
+ "Content-Type": "application/json",
94
+ "Cache-Control": "no-cache",
95
+ "Connection": "keep-alive",
96
+ "Origin": "https://deepinfra.com",
97
+ "Pragma": "no-cache",
98
+ "Referer": "https://deepinfra.com/",
99
+ "Sec-Fetch-Dest": "empty",
100
+ "Sec-Fetch-Mode": "cors",
101
+ "Sec-Fetch-Site": "same-site",
102
+ "X-Deepinfra-Source": "web-embed",
103
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
104
+ "Sec-CH-UA-Mobile": "?0",
105
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
106
+ "User-Agent": self.fingerprint["user_agent"],
99
107
  }
108
+
100
109
  self.session = requests.Session()
101
110
  self.session.headers.update(self.headers)
102
111
  self.session.proxies.update(proxies)
@@ -125,6 +134,31 @@ class DeepInfra(Provider):
125
134
  )
126
135
  self.conversation.history_offset = history_offset
127
136
 
137
+ def refresh_identity(self, browser: str = None):
138
+ """
139
+ Refreshes the browser identity fingerprint.
140
+
141
+ Args:
142
+ browser: Specific browser to use for the new fingerprint
143
+ """
144
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
145
+ self.fingerprint = self.agent.generate_fingerprint(browser)
146
+
147
+ # Update headers with new fingerprint
148
+ self.headers.update({
149
+ "Accept": self.fingerprint["accept"],
150
+ "Accept-Language": self.fingerprint["accept_language"],
151
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
152
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
153
+ "User-Agent": self.fingerprint["user_agent"],
154
+ })
155
+
156
+ # Update session headers
157
+ for header, value in self.headers.items():
158
+ self.session.headers[header] = value
159
+
160
+ return self.fingerprint
161
+
128
162
  def ask(
129
163
  self,
130
164
  prompt: str,
@@ -180,15 +214,30 @@ class DeepInfra(Provider):
180
214
  except json.JSONDecodeError:
181
215
  continue
182
216
 
217
+ self.last_response = {"text": streaming_text}
183
218
  self.conversation.update_chat_history(prompt, streaming_text)
184
219
 
185
220
  except requests.RequestException as e:
186
221
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
187
222
 
188
223
  def for_non_stream():
189
- for _ in for_stream():
190
- pass
191
- return self.last_response
224
+ try:
225
+ response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
226
+ if response.status_code != 200:
227
+ raise exceptions.FailedToGenerateResponseError(
228
+ f"Request failed with status code {response.status_code}"
229
+ )
230
+
231
+ response_data = response.json()
232
+ if 'choices' in response_data and len(response_data['choices']) > 0:
233
+ content = response_data['choices'][0].get('message', {}).get('content', '')
234
+ self.last_response = {"text": content}
235
+ self.conversation.update_chat_history(prompt, content)
236
+ return {"text": content}
237
+ else:
238
+ raise exceptions.FailedToGenerateResponseError("No response content found")
239
+ except Exception as e:
240
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
192
241
 
193
242
  return for_stream() if stream else for_non_stream()
194
243
 
@@ -198,7 +247,7 @@ class DeepInfra(Provider):
198
247
  stream: bool = False,
199
248
  optimizer: str = None,
200
249
  conversationally: bool = False,
201
- ) -> str:
250
+ ) -> Union[str, Generator[str, None, None]]:
202
251
  def for_stream():
203
252
  for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
204
253
  yield self.get_message(response)
@@ -213,8 +262,26 @@ class DeepInfra(Provider):
213
262
  return response["text"]
214
263
 
215
264
  if __name__ == "__main__":
216
- from rich import print
217
- ai = DeepInfra(timeout=5000)
218
- response = ai.chat("write a poem about AI", stream=True)
219
- for chunk in response:
220
- print(chunk, end="", flush=True)
265
+ print("-" * 80)
266
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
267
+ print("-" * 80)
268
+
269
+ for model in DeepInfra.AVAILABLE_MODELS:
270
+ try:
271
+ test_ai = DeepInfra(model=model, timeout=60)
272
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
273
+ response_text = ""
274
+ for chunk in response:
275
+ response_text += chunk
276
+
277
+ if response_text and len(response_text.strip()) > 0:
278
+ status = "✓"
279
+ # Clean and truncate response
280
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
281
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
282
+ else:
283
+ status = "✗"
284
+ display_text = "Empty or invalid response"
285
+ print(f"\r{model:<50} {status:<10} {display_text}")
286
+ except Exception as e:
287
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -10,7 +10,7 @@ from ..AIbase import Provider, AsyncProvider
10
10
  from ..Bard import Chatbot, Model
11
11
 
12
12
  # Import Logger and related classes (assumed similar to what is in yep.py)
13
- from webscout import Logger, LogFormat
13
+ from webscout.Litlogger import Logger, LogFormat
14
14
 
15
15
  warnings.simplefilter("ignore", category=UserWarning)
16
16
 
@@ -12,12 +12,12 @@ class GliderAI(Provider):
12
12
  A class to interact with the Glider.so API.
13
13
  """
14
14
 
15
- AVAILABLE_MODELS = {
15
+ AVAILABLE_MODELS = [
16
16
  "chat-llama-3-1-70b",
17
17
  "chat-llama-3-1-8b",
18
18
  "chat-llama-3-2-3b",
19
19
  "deepseek-ai/DeepSeek-R1",
20
- }
20
+ ]
21
21
 
22
22
  def __init__(
23
23
  self,
@@ -180,9 +180,26 @@ class GliderAI(Provider):
180
180
  return response["text"]
181
181
 
182
182
  if __name__ == "__main__":
183
- from rich import print
184
- # For testing
185
- ai = GliderAI(model="chat-llama-3-1-70b")
186
- response = ai.chat("Meaning of Life", stream=True)
187
- for chunk in response:
188
- print(chunk, end="", flush=True)
183
+ print("-" * 80)
184
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
185
+ print("-" * 80)
186
+
187
+ for model in GliderAI.AVAILABLE_MODELS:
188
+ try:
189
+ test_ai = GliderAI(model=model, timeout=60)
190
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
191
+ response_text = ""
192
+ for chunk in response:
193
+ response_text += chunk
194
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
195
+
196
+ if response_text and len(response_text.strip()) > 0:
197
+ status = "✓"
198
+ # Truncate response if too long
199
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
200
+ else:
201
+ status = "✗"
202
+ display_text = "Empty or invalid response"
203
+ print(f"\r{model:<50} {status:<10} {display_text}")
204
+ except Exception as e:
205
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,10 +1,10 @@
1
1
  from dataclasses import dataclass
2
- from enum import Enum, auto
2
+ from enum import Enum
3
3
  import requests
4
4
  import json
5
5
  import re
6
6
  import uuid
7
- from typing import List, Dict, Generator, Optional, Any, TypedDict, Literal, Union, Final
7
+ from typing import List, Dict, Generator, Optional, Any, TypedDict, Final
8
8
 
9
9
  # Type definitions
10
10
  class Role(Enum):
@@ -208,10 +208,26 @@ class HeckAI(Provider):
208
208
  return response["text"]
209
209
 
210
210
  if __name__ == "__main__":
211
- from rich import print
212
- ai = HeckAI(timeout=120)
213
- response = ai.chat("Write a short poem about artificial intelligence", stream=False)
214
- print(response)
215
- # for chunk in response:
216
- # chunk = ai.fix_encoding(chunk)
217
- # print(chunk, end="", flush=True)
211
+ print("-" * 80)
212
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
213
+ print("-" * 80)
214
+
215
+ for model in HeckAI.AVAILABLE_MODELS:
216
+ try:
217
+ test_ai = HeckAI(model=model, timeout=60)
218
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
219
+ response_text = ""
220
+ for chunk in response:
221
+ response_text += chunk
222
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
223
+
224
+ if response_text and len(response_text.strip()) > 0:
225
+ status = "✓"
226
+ # Truncate response if too long
227
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
228
+ else:
229
+ status = "✗"
230
+ display_text = "Empty or invalid response"
231
+ print(f"\r{model:<50} {status:<10} {display_text}")
232
+ except Exception as e:
233
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -244,8 +244,23 @@ class JadveOpenAI(Provider):
244
244
  return response["text"]
245
245
 
246
246
  if __name__ == "__main__":
247
- from rich import print
248
- ai = JadveOpenAI(timeout=5000)
249
- response = ai.chat("Who made u?", stream=True)
250
- for chunk in response:
251
- print(chunk, end="", flush=True)
247
+ print("-" * 80)
248
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
249
+ print("-" * 80)
250
+
251
+ for model in JadveOpenAI.AVAILABLE_MODELS:
252
+ try:
253
+ test_ai = JadveOpenAI(model=model, timeout=60)
254
+ response = test_ai.chat("Say 'Hello' in one word")
255
+ response_text = response
256
+
257
+ if response_text and len(response_text.strip()) > 0:
258
+ status = "✓"
259
+ # Truncate response if too long
260
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
261
+ else:
262
+ status = "✗"
263
+ display_text = "Empty or invalid response"
264
+ print(f"{model:<50} {status:<10} {display_text}")
265
+ except Exception as e:
266
+ print(f"{model:<50} {'✗':<10} {str(e)}")