webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +2 -2
  2. webscout/Provider/Blackboxai.py +2 -0
  3. webscout/Provider/ChatSandbox.py +2 -1
  4. webscout/Provider/Deepinfra.py +1 -1
  5. webscout/Provider/HeckAI.py +1 -1
  6. webscout/Provider/LambdaChat.py +1 -0
  7. webscout/Provider/MCPCore.py +7 -3
  8. webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
  9. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  10. webscout/Provider/OPENAI/FalconH1.py +457 -0
  11. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  12. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  13. webscout/Provider/OPENAI/PI.py +427 -0
  14. webscout/Provider/OPENAI/Qwen3.py +303 -282
  15. webscout/Provider/OPENAI/TwoAI.py +29 -12
  16. webscout/Provider/OPENAI/__init__.py +3 -1
  17. webscout/Provider/OPENAI/ai4chat.py +33 -23
  18. webscout/Provider/OPENAI/api.py +78 -12
  19. webscout/Provider/OPENAI/base.py +2 -0
  20. webscout/Provider/OPENAI/c4ai.py +31 -10
  21. webscout/Provider/OPENAI/chatgpt.py +41 -22
  22. webscout/Provider/OPENAI/chatgptclone.py +32 -13
  23. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  24. webscout/Provider/OPENAI/copilot.py +26 -10
  25. webscout/Provider/OPENAI/deepinfra.py +327 -321
  26. webscout/Provider/OPENAI/e2b.py +77 -99
  27. webscout/Provider/OPENAI/exaai.py +13 -10
  28. webscout/Provider/OPENAI/exachat.py +10 -6
  29. webscout/Provider/OPENAI/flowith.py +7 -3
  30. webscout/Provider/OPENAI/freeaichat.py +10 -6
  31. webscout/Provider/OPENAI/glider.py +10 -6
  32. webscout/Provider/OPENAI/heckai.py +11 -8
  33. webscout/Provider/OPENAI/llmchatco.py +9 -7
  34. webscout/Provider/OPENAI/mcpcore.py +10 -7
  35. webscout/Provider/OPENAI/multichat.py +3 -1
  36. webscout/Provider/OPENAI/netwrck.py +10 -6
  37. webscout/Provider/OPENAI/oivscode.py +12 -9
  38. webscout/Provider/OPENAI/opkfc.py +14 -3
  39. webscout/Provider/OPENAI/scirachat.py +14 -8
  40. webscout/Provider/OPENAI/sonus.py +10 -6
  41. webscout/Provider/OPENAI/standardinput.py +18 -9
  42. webscout/Provider/OPENAI/textpollinations.py +14 -7
  43. webscout/Provider/OPENAI/toolbaz.py +16 -10
  44. webscout/Provider/OPENAI/typefully.py +14 -7
  45. webscout/Provider/OPENAI/typegpt.py +10 -6
  46. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  47. webscout/Provider/OPENAI/venice.py +10 -6
  48. webscout/Provider/OPENAI/writecream.py +166 -163
  49. webscout/Provider/OPENAI/x0gpt.py +367 -365
  50. webscout/Provider/OPENAI/yep.py +384 -382
  51. webscout/Provider/PI.py +2 -1
  52. webscout/Provider/__init__.py +0 -2
  53. webscout/Provider/granite.py +41 -6
  54. webscout/Provider/oivscode.py +37 -37
  55. webscout/Provider/scnet.py +1 -0
  56. webscout/version.py +1 -1
  57. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
  58. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
  59. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  60. webscout/Provider/ChatGPTGratis.py +0 -194
  61. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,163 +1,166 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, count_tokens
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- print("Warning: LitAgent not found. Using default user agent.")
19
-
20
- class Completions(BaseCompletions):
21
- def __init__(self, client: 'Writecream'):
22
- self._client = client
23
-
24
- def create(
25
- *,
26
- self,
27
- model: str = None, # Not used by Writecream, for compatibility
28
- messages: List[Dict[str, str]],
29
- max_tokens: Optional[int] = None, # Not used by Writecream
30
- stream: bool = False,
31
- temperature: Optional[float] = None, # Not used by Writecream
32
- top_p: Optional[float] = None, # Not used by Writecream
33
- **kwargs: Any
34
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
- """
36
- Creates a model response for the given chat conversation.
37
- Mimics openai.chat.completions.create
38
- """
39
- payload = messages
40
- request_id = f"chatcmpl-{uuid.uuid4()}"
41
- created_time = int(time.time())
42
- if stream:
43
- return self._create_stream(request_id, created_time, payload)
44
- else:
45
- return self._create_non_stream(request_id, created_time, payload)
46
-
47
- def _create_stream(
48
- self, request_id: str, created_time: int, payload: List[Dict[str, str]]
49
- ) -> Generator[ChatCompletionChunk, None, None]:
50
- # Writecream does not support streaming, so yield the full response as a single chunk
51
- completion = self._create_non_stream(request_id, created_time, payload)
52
- content = completion.choices[0].message.content
53
- # Yield as a single chunk
54
- delta = ChoiceDelta(content=content)
55
- choice = Choice(index=0, delta=delta, finish_reason=None)
56
- chunk = ChatCompletionChunk(
57
- id=request_id,
58
- choices=[choice],
59
- created=created_time,
60
- model="writecream",
61
- )
62
- yield chunk
63
- # Final chunk with finish_reason
64
- delta = ChoiceDelta(content=None)
65
- choice = Choice(index=0, delta=delta, finish_reason="stop")
66
- chunk = ChatCompletionChunk(
67
- id=request_id,
68
- choices=[choice],
69
- created=created_time,
70
- model="writecream",
71
- )
72
- yield chunk
73
-
74
- def _create_non_stream(
75
- self, request_id: str, created_time: int, payload: List[Dict[str, str]]
76
- ) -> ChatCompletion:
77
- try:
78
- params = {
79
- "query": json.dumps(payload),
80
- "link": "writecream.com"
81
- }
82
- response = self._client.session.get(
83
- self._client.base_url,
84
- params=params,
85
- headers=self._client.headers,
86
- timeout=self._client.timeout
87
- )
88
- response.raise_for_status()
89
- data = response.json()
90
- # Extract the response content according to the new API format
91
- content = data.get("response_content", "")
92
- # Estimate tokens
93
- prompt_tokens = sum(count_tokens(m.get("content", "")) for m in payload)
94
- completion_tokens = count_tokens(content)
95
- usage = CompletionUsage(
96
- prompt_tokens=prompt_tokens,
97
- completion_tokens=completion_tokens,
98
- total_tokens=prompt_tokens + completion_tokens
99
- )
100
- message = ChatCompletionMessage(role="assistant", content=content)
101
- choice = Choice(index=0, message=message, finish_reason="stop")
102
- completion = ChatCompletion(
103
- id=request_id,
104
- choices=[choice],
105
- created=created_time,
106
- model="writecream",
107
- usage=usage
108
- )
109
- return completion
110
- except Exception as e:
111
- print(f"Error during Writecream request: {e}")
112
- raise IOError(f"Writecream request failed: {e}") from e
113
-
114
- class Chat(BaseChat):
115
- def __init__(self, client: 'Writecream'):
116
- self.completions = Completions(client)
117
-
118
- class Writecream(OpenAICompatibleProvider):
119
- """
120
- OpenAI-compatible client for Writecream API.
121
-
122
- Usage:
123
- client = Writecream()
124
- response = client.chat.completions.create(
125
- messages=[{"role": "system", "content": "You are a helpful assistant."},
126
- {"role": "user", "content": "What is the capital of France?"}]
127
- )
128
- print(response.choices[0].message.content)
129
- """
130
- AVAILABLE_MODELS = ["writecream"]
131
-
132
- def __init__(self, timeout: Optional[int] = 30, browser: str = "chrome"):
133
- self.timeout = timeout
134
- self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
135
- self.session = requests.Session()
136
- agent = LitAgent()
137
- self.headers = {
138
- "User-Agent": agent.random(),
139
- "Referer": "https://www.writecream.com/chatgpt-chat/"
140
- }
141
- self.session.headers.update(self.headers)
142
- self.chat = Chat(self)
143
-
144
- def convert_model_name(self, model: str) -> str:
145
- return "writecream"
146
-
147
- @property
148
- def models(self):
149
- class _ModelList:
150
- def list(inner_self):
151
- return Writecream.AVAILABLE_MODELS
152
- return _ModelList()
153
-
154
- # Simple test if run directly
155
- if __name__ == "__main__":
156
- client = Writecream()
157
- response = client.chat.completions.create(
158
- messages=[
159
- {"role": "system", "content": "You are a helpful assistant."},
160
- {"role": "user", "content": "What is the capital of France?"}
161
- ]
162
- )
163
- print(response.choices[0].message.content)
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ print("Warning: LitAgent not found. Using default user agent.")
19
+
20
+ class Completions(BaseCompletions):
21
+ def __init__(self, client: 'Writecream'):
22
+ self._client = client
23
+
24
+ def create(
25
+ self,
26
+ *,
27
+ model: str = None, # Not used by Writecream, for compatibility
28
+ messages: List[Dict[str, str]],
29
+ max_tokens: Optional[int] = None, # Not used by Writecream
30
+ stream: bool = False,
31
+ temperature: Optional[float] = None, # Not used by Writecream
32
+ top_p: Optional[float] = None, # Not used by Writecream
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Creates a model response for the given chat conversation.
39
+ Mimics openai.chat.completions.create
40
+ """
41
+ payload = messages
42
+ request_id = f"chatcmpl-{uuid.uuid4()}"
43
+ created_time = int(time.time())
44
+ if stream:
45
+ return self._create_stream(request_id, created_time, payload, timeout, proxies)
46
+ else:
47
+ return self._create_non_stream(request_id, created_time, payload, timeout, proxies)
48
+
49
+ def _create_stream(
50
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
51
+ ) -> Generator[ChatCompletionChunk, None, None]:
52
+ # Writecream does not support streaming, so yield the full response as a single chunk
53
+ completion = self._create_non_stream(request_id, created_time, payload, timeout, proxies)
54
+ content = completion.choices[0].message.content
55
+ # Yield as a single chunk
56
+ delta = ChoiceDelta(content=content)
57
+ choice = Choice(index=0, delta=delta, finish_reason=None)
58
+ chunk = ChatCompletionChunk(
59
+ id=request_id,
60
+ choices=[choice],
61
+ created=created_time,
62
+ model="writecream",
63
+ )
64
+ yield chunk
65
+ # Final chunk with finish_reason
66
+ delta = ChoiceDelta(content=None)
67
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
68
+ chunk = ChatCompletionChunk(
69
+ id=request_id,
70
+ choices=[choice],
71
+ created=created_time,
72
+ model="writecream",
73
+ )
74
+ yield chunk
75
+
76
+ def _create_non_stream(
77
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
78
+ ) -> ChatCompletion:
79
+ try:
80
+ params = {
81
+ "query": json.dumps(payload),
82
+ "link": "writecream.com"
83
+ }
84
+ response = self._client.session.get(
85
+ self._client.base_url,
86
+ params=params,
87
+ headers=self._client.headers,
88
+ timeout=timeout or self._client.timeout,
89
+ proxies=proxies or getattr(self._client, "proxies", None)
90
+ )
91
+ response.raise_for_status()
92
+ data = response.json()
93
+ # Extract the response content according to the new API format
94
+ content = data.get("response_content", "")
95
+ # Estimate tokens
96
+ prompt_tokens = sum(count_tokens(m.get("content", "")) for m in payload)
97
+ completion_tokens = count_tokens(content)
98
+ usage = CompletionUsage(
99
+ prompt_tokens=prompt_tokens,
100
+ completion_tokens=completion_tokens,
101
+ total_tokens=prompt_tokens + completion_tokens
102
+ )
103
+ message = ChatCompletionMessage(role="assistant", content=content)
104
+ choice = Choice(index=0, message=message, finish_reason="stop")
105
+ completion = ChatCompletion(
106
+ id=request_id,
107
+ choices=[choice],
108
+ created=created_time,
109
+ model="writecream",
110
+ usage=usage
111
+ )
112
+ return completion
113
+ except Exception as e:
114
+ print(f"Error during Writecream request: {e}")
115
+ raise IOError(f"Writecream request failed: {e}") from e
116
+
117
+ class Chat(BaseChat):
118
+ def __init__(self, client: 'Writecream'):
119
+ self.completions = Completions(client)
120
+
121
+ class Writecream(OpenAICompatibleProvider):
122
+ """
123
+ OpenAI-compatible client for Writecream API.
124
+
125
+ Usage:
126
+ client = Writecream()
127
+ response = client.chat.completions.create(
128
+ messages=[{"role": "system", "content": "You are a helpful assistant."},
129
+ {"role": "user", "content": "What is the capital of France?"}]
130
+ )
131
+ print(response.choices[0].message.content)
132
+ """
133
+ AVAILABLE_MODELS = ["writecream"]
134
+
135
+ def __init__(self, browser: str = "chrome"):
136
+ self.timeout = None
137
+ self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
138
+ self.session = requests.Session()
139
+ agent = LitAgent()
140
+ self.headers = {
141
+ "User-Agent": agent.random(),
142
+ "Referer": "https://www.writecream.com/chatgpt-chat/"
143
+ }
144
+ self.session.headers.update(self.headers)
145
+ self.chat = Chat(self)
146
+
147
+ def convert_model_name(self, model: str) -> str:
148
+ return "writecream"
149
+
150
+ @property
151
+ def models(self):
152
+ class _ModelList:
153
+ def list(inner_self):
154
+ return Writecream.AVAILABLE_MODELS
155
+ return _ModelList()
156
+
157
+ # Simple test if run directly
158
+ if __name__ == "__main__":
159
+ client = Writecream()
160
+ response = client.chat.completions.create(
161
+ messages=[
162
+ {"role": "system", "content": "You are a helpful assistant."},
163
+ {"role": "user", "content": "What is the capital of France?"}
164
+ ]
165
+ )
166
+ print(response.choices[0].message.content)