webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +2 -2
  2. webscout/Provider/Blackboxai.py +2 -0
  3. webscout/Provider/ChatSandbox.py +2 -1
  4. webscout/Provider/Deepinfra.py +1 -1
  5. webscout/Provider/HeckAI.py +1 -1
  6. webscout/Provider/LambdaChat.py +1 -0
  7. webscout/Provider/MCPCore.py +7 -3
  8. webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
  9. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  10. webscout/Provider/OPENAI/FalconH1.py +457 -0
  11. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  12. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  13. webscout/Provider/OPENAI/PI.py +427 -0
  14. webscout/Provider/OPENAI/Qwen3.py +303 -282
  15. webscout/Provider/OPENAI/TwoAI.py +29 -12
  16. webscout/Provider/OPENAI/__init__.py +3 -1
  17. webscout/Provider/OPENAI/ai4chat.py +33 -23
  18. webscout/Provider/OPENAI/api.py +78 -12
  19. webscout/Provider/OPENAI/base.py +2 -0
  20. webscout/Provider/OPENAI/c4ai.py +31 -10
  21. webscout/Provider/OPENAI/chatgpt.py +41 -22
  22. webscout/Provider/OPENAI/chatgptclone.py +32 -13
  23. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  24. webscout/Provider/OPENAI/copilot.py +26 -10
  25. webscout/Provider/OPENAI/deepinfra.py +327 -321
  26. webscout/Provider/OPENAI/e2b.py +77 -99
  27. webscout/Provider/OPENAI/exaai.py +13 -10
  28. webscout/Provider/OPENAI/exachat.py +10 -6
  29. webscout/Provider/OPENAI/flowith.py +7 -3
  30. webscout/Provider/OPENAI/freeaichat.py +10 -6
  31. webscout/Provider/OPENAI/glider.py +10 -6
  32. webscout/Provider/OPENAI/heckai.py +11 -8
  33. webscout/Provider/OPENAI/llmchatco.py +9 -7
  34. webscout/Provider/OPENAI/mcpcore.py +10 -7
  35. webscout/Provider/OPENAI/multichat.py +3 -1
  36. webscout/Provider/OPENAI/netwrck.py +10 -6
  37. webscout/Provider/OPENAI/oivscode.py +12 -9
  38. webscout/Provider/OPENAI/opkfc.py +14 -3
  39. webscout/Provider/OPENAI/scirachat.py +14 -8
  40. webscout/Provider/OPENAI/sonus.py +10 -6
  41. webscout/Provider/OPENAI/standardinput.py +18 -9
  42. webscout/Provider/OPENAI/textpollinations.py +14 -7
  43. webscout/Provider/OPENAI/toolbaz.py +16 -10
  44. webscout/Provider/OPENAI/typefully.py +14 -7
  45. webscout/Provider/OPENAI/typegpt.py +10 -6
  46. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  47. webscout/Provider/OPENAI/venice.py +10 -6
  48. webscout/Provider/OPENAI/writecream.py +166 -163
  49. webscout/Provider/OPENAI/x0gpt.py +367 -365
  50. webscout/Provider/OPENAI/yep.py +384 -382
  51. webscout/Provider/PI.py +2 -1
  52. webscout/Provider/__init__.py +0 -2
  53. webscout/Provider/granite.py +41 -6
  54. webscout/Provider/oivscode.py +37 -37
  55. webscout/Provider/scnet.py +1 -0
  56. webscout/version.py +1 -1
  57. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
  58. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
  59. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  60. webscout/Provider/ChatGPTGratis.py +0 -194
  61. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,194 +0,0 @@
1
- from typing import Union, Any, Dict, Generator, Optional
2
- import requests
3
- import json
4
-
5
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
- from webscout.AIbase import Provider
7
- from webscout import exceptions
8
- from webscout.litagent import LitAgent as Lit
9
-
10
-
11
- class ChatGPTGratis(Provider):
12
- """
13
- A class to interact with the chatgptgratis.eu backend API with real-time streaming.
14
- """
15
- AVAILABLE_MODELS = [
16
- "Meta-Llama-3.2-1B-Instruct",
17
- "Meta-Llama-3.2-3B-Instruct",
18
- "Meta-Llama-3.1-8B-Instruct",
19
- "Meta-Llama-3.1-70B-Instruct",
20
- "Meta-Llama-3.1-405B-Instruct",
21
- "gpt4o"
22
- ]
23
-
24
- def __init__(
25
- self,
26
- model: str = "Meta-Llama-3.2-1B-Instruct",
27
- timeout: int = 30,
28
- proxies: Optional[Dict[str, str]] = None,
29
- intro: Optional[str] = None,
30
- filepath: Optional[str] = None,
31
- update_file: bool = True,
32
- history_offset: int = 10250,
33
- act: Optional[str] = None,
34
- ) -> None:
35
- """
36
- Initializes the ChatGPTGratis.
37
- """
38
- if model not in self.AVAILABLE_MODELS:
39
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
40
-
41
- self.session = requests.Session()
42
- self.timeout = timeout
43
- self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
44
- self.model = model
45
-
46
- # Set up headers similar to a browser request with dynamic User-Agent
47
- self.headers = {
48
- "Accept": "*/*",
49
- "Content-Type": "application/json",
50
- "Origin": "https://chatgptgratis.eu",
51
- "Referer": "https://chatgptgratis.eu/chat.html",
52
- "User-Agent": Lit().random(),
53
- }
54
- self.session.headers.update(self.headers)
55
- self.session.proxies = proxies or {}
56
-
57
- # Set up conversation history and prompts
58
- Conversation.intro = (
59
- AwesomePrompts().get_act(
60
- act, raise_not_found=True, default=None, case_insensitive=True
61
- )
62
- if act
63
- else intro or Conversation.intro
64
- )
65
- self.conversation = Conversation(
66
- True, 8096, filepath, update_file
67
- )
68
- self.conversation.history_offset = history_offset
69
-
70
- def ask(
71
- self,
72
- prompt: str,
73
- stream: bool = False,
74
- raw: bool = False,
75
- optimizer: Optional[str] = None,
76
- conversationally: bool = False,
77
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
78
- """
79
- Sends a request to the API and returns the response.
80
- If stream is True, yields response chunks as they are received.
81
- """
82
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
83
- if optimizer:
84
- available_opts = (
85
- method for method in dir(Optimizers)
86
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
87
- )
88
- if optimizer in available_opts:
89
- conversation_prompt = getattr(Optimizers, optimizer)(
90
- conversation_prompt if conversationally else prompt
91
- )
92
- else:
93
- raise Exception(f"Optimizer is not one of {list(available_opts)}")
94
-
95
- payload = {
96
- "message": conversation_prompt,
97
- "model": self.model,
98
- }
99
-
100
- def for_stream() -> Generator[Dict[str, Any], None, None]:
101
- response = self.session.post(
102
- self.api_endpoint,
103
- json=payload,
104
- stream=True,
105
- timeout=self.timeout
106
- )
107
- if not response.ok:
108
- raise exceptions.FailedToGenerateResponseError(
109
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
110
- )
111
-
112
- full_response = ""
113
- for line in response.iter_lines():
114
- if line:
115
- line_decoded = line.decode('utf-8').strip()
116
- if line_decoded == "data: [DONE]":
117
- break
118
- if line_decoded.startswith("data: "):
119
- try:
120
- json_data = json.loads(line_decoded[6:])
121
- choices = json_data.get("choices", [])
122
- if choices and "delta" in choices[0]:
123
- content = choices[0]["delta"].get("content", "")
124
- else:
125
- content = ""
126
- full_response += content
127
- yield content if raw else {"text": content}
128
- except json.JSONDecodeError:
129
- continue
130
- # Update last response and conversation history.
131
- self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
132
-
133
- def for_non_stream() -> Dict[str, Any]:
134
- collected = ""
135
- for chunk in for_stream():
136
- collected += chunk["text"] if isinstance(chunk, dict) else chunk
137
- return {"text": collected}
138
-
139
- return for_stream() if stream else for_non_stream()
140
-
141
- def chat(
142
- self,
143
- prompt: str,
144
- stream: bool = False,
145
- optimizer: Optional[str] = None,
146
- conversationally: bool = False,
147
- ) -> Union[str, Generator[str, None, None]]:
148
- """
149
- Returns the response as a string.
150
- For streaming requests, yields each response chunk as a string.
151
- """
152
- def stream_response() -> Generator[str, None, None]:
153
- for response in self.ask(
154
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
155
- ):
156
- yield self.get_message(response)
157
-
158
- def non_stream_response() -> str:
159
- return self.get_message(self.ask(
160
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
161
- ))
162
-
163
- return stream_response() if stream else non_stream_response()
164
-
165
- def get_message(self, response: dict) -> str:
166
- """
167
- Extracts and returns the text message from the response dictionary.
168
- """
169
- assert isinstance(response, dict), "Response must be a dictionary."
170
- return response.get("text", "")
171
-
172
-
173
- if __name__ == "__main__":
174
- print("-" * 80)
175
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
176
- print("-" * 80)
177
-
178
- for model in ChatGPTGratis.AVAILABLE_MODELS:
179
- try:
180
- test_ai = ChatGPTGratis(model=model, timeout=60)
181
- response = test_ai.chat("Say 'Hello' in one word")
182
- response_text = response
183
-
184
- if response_text and len(response_text.strip()) > 0:
185
- status = "✓"
186
- # Clean and truncate response
187
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
188
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
189
- else:
190
- status = "✗"
191
- display_text = "Empty or invalid response"
192
- print(f"{model:<50} {status:<10} {display_text}")
193
- except Exception as e:
194
- print(f"{model:<50} {'✗':<10} {str(e)}")