webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,163 +1,166 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- print("Warning: LitAgent not found. Using default user agent.")
19
-
20
- class Completions(BaseCompletions):
21
- def __init__(self, client: 'Writecream'):
22
- self._client = client
23
-
24
- def create(
25
- *,
26
- self,
27
- model: str = None, # Not used by Writecream, for compatibility
28
- messages: List[Dict[str, str]],
29
- max_tokens: Optional[int] = None, # Not used by Writecream
30
- stream: bool = False,
31
- temperature: Optional[float] = None, # Not used by Writecream
32
- top_p: Optional[float] = None, # Not used by Writecream
33
- **kwargs: Any
34
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
- """
36
- Creates a model response for the given chat conversation.
37
- Mimics openai.chat.completions.create
38
- """
39
- payload = messages
40
- request_id = f"chatcmpl-{uuid.uuid4()}"
41
- created_time = int(time.time())
42
- if stream:
43
- return self._create_stream(request_id, created_time, payload)
44
- else:
45
- return self._create_non_stream(request_id, created_time, payload)
46
-
47
- def _create_stream(
48
- self, request_id: str, created_time: int, payload: List[Dict[str, str]]
49
- ) -> Generator[ChatCompletionChunk, None, None]:
50
- # Writecream does not support streaming, so yield the full response as a single chunk
51
- completion = self._create_non_stream(request_id, created_time, payload)
52
- content = completion.choices[0].message.content
53
- # Yield as a single chunk
54
- delta = ChoiceDelta(content=content)
55
- choice = Choice(index=0, delta=delta, finish_reason=None)
56
- chunk = ChatCompletionChunk(
57
- id=request_id,
58
- choices=[choice],
59
- created=created_time,
60
- model="writecream",
61
- )
62
- yield chunk
63
- # Final chunk with finish_reason
64
- delta = ChoiceDelta(content=None)
65
- choice = Choice(index=0, delta=delta, finish_reason="stop")
66
- chunk = ChatCompletionChunk(
67
- id=request_id,
68
- choices=[choice],
69
- created=created_time,
70
- model="writecream",
71
- )
72
- yield chunk
73
-
74
- def _create_non_stream(
75
- self, request_id: str, created_time: int, payload: List[Dict[str, str]]
76
- ) -> ChatCompletion:
77
- try:
78
- params = {
79
- "query": json.dumps(payload),
80
- "link": "writecream.com"
81
- }
82
- response = self._client.session.get(
83
- self._client.base_url,
84
- params=params,
85
- headers=self._client.headers,
86
- timeout=self._client.timeout
87
- )
88
- response.raise_for_status()
89
- data = response.json()
90
- # Extract the response content according to the new API format
91
- content = data.get("response_content", "")
92
- # Estimate tokens
93
- prompt_tokens = sum(len(m.get("content", "").split()) for m in payload)
94
- completion_tokens = len(content.split())
95
- usage = CompletionUsage(
96
- prompt_tokens=prompt_tokens,
97
- completion_tokens=completion_tokens,
98
- total_tokens=prompt_tokens + completion_tokens
99
- )
100
- message = ChatCompletionMessage(role="assistant", content=content)
101
- choice = Choice(index=0, message=message, finish_reason="stop")
102
- completion = ChatCompletion(
103
- id=request_id,
104
- choices=[choice],
105
- created=created_time,
106
- model="writecream",
107
- usage=usage
108
- )
109
- return completion
110
- except Exception as e:
111
- print(f"Error during Writecream request: {e}")
112
- raise IOError(f"Writecream request failed: {e}") from e
113
-
114
- class Chat(BaseChat):
115
- def __init__(self, client: 'Writecream'):
116
- self.completions = Completions(client)
117
-
118
- class Writecream(OpenAICompatibleProvider):
119
- """
120
- OpenAI-compatible client for Writecream API.
121
-
122
- Usage:
123
- client = Writecream()
124
- response = client.chat.completions.create(
125
- messages=[{"role": "system", "content": "You are a helpful assistant."},
126
- {"role": "user", "content": "What is the capital of France?"}]
127
- )
128
- print(response.choices[0].message.content)
129
- """
130
- AVAILABLE_MODELS = ["writecream"]
131
-
132
- def __init__(self, timeout: Optional[int] = 30, browser: str = "chrome"):
133
- self.timeout = timeout
134
- self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
135
- self.session = requests.Session()
136
- agent = LitAgent()
137
- self.headers = {
138
- "User-Agent": agent.random(),
139
- "Referer": "https://www.writecream.com/chatgpt-chat/"
140
- }
141
- self.session.headers.update(self.headers)
142
- self.chat = Chat(self)
143
-
144
- def convert_model_name(self, model: str) -> str:
145
- return "writecream"
146
-
147
- @property
148
- def models(self):
149
- class _ModelList:
150
- def list(inner_self):
151
- return Writecream.AVAILABLE_MODELS
152
- return _ModelList()
153
-
154
- # Simple test if run directly
155
- if __name__ == "__main__":
156
- client = Writecream()
157
- response = client.chat.completions.create(
158
- messages=[
159
- {"role": "system", "content": "You are a helpful assistant."},
160
- {"role": "user", "content": "What is the capital of France?"}
161
- ]
162
- )
163
- print(response.choices[0].message.content)
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ print("Warning: LitAgent not found. Using default user agent.")
19
+
20
+ class Completions(BaseCompletions):
21
+ def __init__(self, client: 'Writecream'):
22
+ self._client = client
23
+
24
+ def create(
25
+ self,
26
+ *,
27
+ model: str = None, # Not used by Writecream, for compatibility
28
+ messages: List[Dict[str, str]],
29
+ max_tokens: Optional[int] = None, # Not used by Writecream
30
+ stream: bool = False,
31
+ temperature: Optional[float] = None, # Not used by Writecream
32
+ top_p: Optional[float] = None, # Not used by Writecream
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Creates a model response for the given chat conversation.
39
+ Mimics openai.chat.completions.create
40
+ """
41
+ payload = messages
42
+ request_id = f"chatcmpl-{uuid.uuid4()}"
43
+ created_time = int(time.time())
44
+ if stream:
45
+ return self._create_stream(request_id, created_time, payload, timeout, proxies)
46
+ else:
47
+ return self._create_non_stream(request_id, created_time, payload, timeout, proxies)
48
+
49
+ def _create_stream(
50
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
51
+ ) -> Generator[ChatCompletionChunk, None, None]:
52
+ # Writecream does not support streaming, so yield the full response as a single chunk
53
+ completion = self._create_non_stream(request_id, created_time, payload, timeout, proxies)
54
+ content = completion.choices[0].message.content
55
+ # Yield as a single chunk
56
+ delta = ChoiceDelta(content=content)
57
+ choice = Choice(index=0, delta=delta, finish_reason=None)
58
+ chunk = ChatCompletionChunk(
59
+ id=request_id,
60
+ choices=[choice],
61
+ created=created_time,
62
+ model="writecream",
63
+ )
64
+ yield chunk
65
+ # Final chunk with finish_reason
66
+ delta = ChoiceDelta(content=None)
67
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
68
+ chunk = ChatCompletionChunk(
69
+ id=request_id,
70
+ choices=[choice],
71
+ created=created_time,
72
+ model="writecream",
73
+ )
74
+ yield chunk
75
+
76
+ def _create_non_stream(
77
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
78
+ ) -> ChatCompletion:
79
+ try:
80
+ params = {
81
+ "query": json.dumps(payload),
82
+ "link": "writecream.com"
83
+ }
84
+ response = self._client.session.get(
85
+ self._client.base_url,
86
+ params=params,
87
+ headers=self._client.headers,
88
+ timeout=timeout or self._client.timeout,
89
+ proxies=proxies or getattr(self._client, "proxies", None)
90
+ )
91
+ response.raise_for_status()
92
+ data = response.json()
93
+ # Extract the response content according to the new API format
94
+ content = data.get("response_content", "")
95
+ # Estimate tokens
96
+ prompt_tokens = sum(count_tokens(m.get("content", "")) for m in payload)
97
+ completion_tokens = count_tokens(content)
98
+ usage = CompletionUsage(
99
+ prompt_tokens=prompt_tokens,
100
+ completion_tokens=completion_tokens,
101
+ total_tokens=prompt_tokens + completion_tokens
102
+ )
103
+ message = ChatCompletionMessage(role="assistant", content=content)
104
+ choice = Choice(index=0, message=message, finish_reason="stop")
105
+ completion = ChatCompletion(
106
+ id=request_id,
107
+ choices=[choice],
108
+ created=created_time,
109
+ model="writecream",
110
+ usage=usage
111
+ )
112
+ return completion
113
+ except Exception as e:
114
+ print(f"Error during Writecream request: {e}")
115
+ raise IOError(f"Writecream request failed: {e}") from e
116
+
117
+ class Chat(BaseChat):
118
+ def __init__(self, client: 'Writecream'):
119
+ self.completions = Completions(client)
120
+
121
+ class Writecream(OpenAICompatibleProvider):
122
+ """
123
+ OpenAI-compatible client for Writecream API.
124
+
125
+ Usage:
126
+ client = Writecream()
127
+ response = client.chat.completions.create(
128
+ messages=[{"role": "system", "content": "You are a helpful assistant."},
129
+ {"role": "user", "content": "What is the capital of France?"}]
130
+ )
131
+ print(response.choices[0].message.content)
132
+ """
133
+ AVAILABLE_MODELS = ["writecream"]
134
+
135
+ def __init__(self, browser: str = "chrome"):
136
+ self.timeout = None
137
+ self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
138
+ self.session = requests.Session()
139
+ agent = LitAgent()
140
+ self.headers = {
141
+ "User-Agent": agent.random(),
142
+ "Referer": "https://www.writecream.com/chatgpt-chat/"
143
+ }
144
+ self.session.headers.update(self.headers)
145
+ self.chat = Chat(self)
146
+
147
+ def convert_model_name(self, model: str) -> str:
148
+ return "writecream"
149
+
150
+ @property
151
+ def models(self):
152
+ class _ModelList:
153
+ def list(inner_self):
154
+ return Writecream.AVAILABLE_MODELS
155
+ return _ModelList()
156
+
157
+ # Simple test if run directly
158
+ if __name__ == "__main__":
159
+ client = Writecream()
160
+ response = client.chat.completions.create(
161
+ messages=[
162
+ {"role": "system", "content": "You are a helpful assistant."},
163
+ {"role": "user", "content": "What is the capital of France?"}
164
+ ]
165
+ )
166
+ print(response.choices[0].message.content)
@@ -9,7 +9,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
9
9
  from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
10
  from .utils import (
11
11
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
13
  )
14
14
 
15
15
  # Import LitAgent
@@ -30,6 +30,8 @@ class Completions(BaseCompletions):
30
30
  stream: bool = False,
31
31
  temperature: Optional[float] = None,
32
32
  top_p: Optional[float] = None,
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
33
35
  **kwargs: Any
34
36
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
37
  """
@@ -60,12 +62,12 @@ class Completions(BaseCompletions):
60
62
  created_time = int(time.time())
61
63
 
62
64
  if stream:
63
- return self._create_stream(request_id, created_time, model, payload)
65
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
64
66
  else:
65
- return self._create_non_stream(request_id, created_time, model, payload)
67
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
66
68
 
67
69
  def _create_stream(
68
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
70
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
69
71
  ) -> Generator[ChatCompletionChunk, None, None]:
70
72
  try:
71
73
  response = self._client.session.post(
@@ -73,7 +75,8 @@ class Completions(BaseCompletions):
73
75
  headers=self._client.headers,
74
76
  json=payload,
75
77
  stream=True,
76
- timeout=self._client.timeout
78
+ timeout=timeout or self._client.timeout,
79
+ proxies=proxies or getattr(self._client, "proxies", None)
77
80
  )
78
81
 
79
82
  # Handle non-200 responses
@@ -82,15 +85,11 @@ class Completions(BaseCompletions):
82
85
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
83
86
  )
84
87
 
85
- # Track token usage across chunks
86
- prompt_tokens = 0
88
+ # Use count_tokens for prompt tokens
89
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
87
90
  completion_tokens = 0
88
91
  total_tokens = 0
89
92
 
90
- # Estimate prompt tokens based on message length
91
- for msg in payload.get("messages", []):
92
- prompt_tokens += len(msg.get("content", "").split())
93
-
94
93
  for line in response.iter_lines():
95
94
  if line:
96
95
  decoded_line = line.decode('utf-8').strip()
@@ -103,8 +102,8 @@ class Completions(BaseCompletions):
103
102
  # Format the content (replace escaped newlines)
104
103
  content = self._client.format_text(content)
105
104
 
106
- # Update token counts
107
- completion_tokens += 1
105
+ # Update token counts using count_tokens
106
+ completion_tokens += count_tokens(content)
108
107
  total_tokens = prompt_tokens + completion_tokens
109
108
 
110
109
  # Create the delta object
@@ -131,20 +130,15 @@ class Completions(BaseCompletions):
131
130
  system_fingerprint=None
132
131
  )
133
132
 
134
- # Convert to dict for proper formatting
135
- chunk_dict = chunk.to_dict()
136
-
137
- # Add usage information to match OpenAI format
138
- usage_dict = {
133
+ # Set usage directly on the chunk object
134
+ chunk.usage = {
139
135
  "prompt_tokens": prompt_tokens,
140
136
  "completion_tokens": completion_tokens,
141
137
  "total_tokens": total_tokens,
142
138
  "estimated_cost": None
143
139
  }
144
140
 
145
- chunk_dict["usage"] = usage_dict
146
-
147
- # Return the chunk object for internal processing
141
+ # Return the chunk object with usage information
148
142
  yield chunk
149
143
 
150
144
  # Final chunk with finish_reason="stop"
@@ -169,8 +163,8 @@ class Completions(BaseCompletions):
169
163
  system_fingerprint=None
170
164
  )
171
165
 
172
- chunk_dict = chunk.to_dict()
173
- chunk_dict["usage"] = {
166
+ # Set usage directly on the chunk object
167
+ chunk.usage = {
174
168
  "prompt_tokens": prompt_tokens,
175
169
  "completion_tokens": completion_tokens,
176
170
  "total_tokens": total_tokens,
@@ -184,16 +178,16 @@ class Completions(BaseCompletions):
184
178
  raise IOError(f"X0GPT request failed: {e}") from e
185
179
 
186
180
  def _create_non_stream(
187
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
181
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
188
182
  ) -> ChatCompletion:
189
183
  try:
190
- # For non-streaming, we still use streaming internally to collect the full response
191
184
  response = self._client.session.post(
192
185
  self._client.api_endpoint,
193
186
  headers=self._client.headers,
194
187
  json=payload,
195
188
  stream=True,
196
- timeout=self._client.timeout
189
+ timeout=timeout or self._client.timeout,
190
+ proxies=proxies or getattr(self._client, "proxies", None)
197
191
  )
198
192
 
199
193
  # Handle non-200 responses
@@ -214,12 +208,9 @@ class Completions(BaseCompletions):
214
208
  # Format the text (replace escaped newlines)
215
209
  full_text = self._client.format_text(full_text)
216
210
 
217
- # Estimate token counts
218
- prompt_tokens = 0
219
- for msg in payload.get("messages", []):
220
- prompt_tokens += len(msg.get("content", "").split())
221
-
222
- completion_tokens = len(full_text.split())
211
+ # Use count_tokens for accurate token counts
212
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
213
+ completion_tokens = count_tokens(full_text)
223
214
  total_tokens = prompt_tokens + completion_tokens
224
215
 
225
216
  # Create the message object
@@ -268,26 +259,24 @@ class X0GPT(OpenAICompatibleProvider):
268
259
  Usage:
269
260
  client = X0GPT()
270
261
  response = client.chat.completions.create(
271
- model="gpt-4",
262
+ model="X0GPT",
272
263
  messages=[{"role": "user", "content": "Hello!"}]
273
264
  )
274
265
  """
275
266
 
276
- AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
267
+ AVAILABLE_MODELS = ["X0GPT"]
277
268
 
278
269
  def __init__(
279
270
  self,
280
- timeout: Optional[int] = None,
281
271
  browser: str = "chrome"
282
272
  ):
283
273
  """
284
274
  Initialize the X0GPT client.
285
275
 
286
276
  Args:
287
- timeout: Request timeout in seconds (None for no timeout)
288
277
  browser: Browser to emulate in user agent
289
278
  """
290
- self.timeout = timeout
279
+ self.timeout = None
291
280
  self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
292
281
  self.session = requests.Session()
293
282