webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,378 @@
1
+ import json
2
+ import time
3
+ import uuid
4
+ import re
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from curl_cffi import CurlError
8
+ from curl_cffi.requests import Session
9
+ from uuid import uuid4
10
+
11
+ # Import base classes and utility structures
12
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from .utils import (
14
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
+ ChatCompletionMessage, CompletionUsage, count_tokens
16
+ )
17
+
18
+ from webscout.AIutel import sanitize_stream
19
+ from webscout.litagent import LitAgent
20
+
21
+ class Completions(BaseCompletions):
22
+ def __init__(self, client: 'Cloudflare'):
23
+ self._client = client
24
+
25
+ def create(
26
+ self,
27
+ *,
28
+ model: str,
29
+ messages: List[Dict[str, str]],
30
+ max_tokens: Optional[int] = None,
31
+ stream: bool = False,
32
+ temperature: Optional[float] = None,
33
+ top_p: Optional[float] = None,
34
+ **kwargs: Any
35
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
36
+ """
37
+ Create a chat completion with Cloudflare API.
38
+
39
+ Args:
40
+ model: The model to use (from AVAILABLE_MODELS)
41
+ messages: List of message dictionaries with 'role' and 'content'
42
+ max_tokens: Maximum number of tokens to generate
43
+ stream: Whether to stream the response
44
+ temperature: Sampling temperature (0-1)
45
+ top_p: Nucleus sampling parameter (0-1)
46
+ **kwargs: Additional parameters to pass to the API
47
+
48
+ Returns:
49
+ If stream=False, returns a ChatCompletion object
50
+ If stream=True, returns a Generator yielding ChatCompletionChunk objects
51
+ """
52
+ # Prepare the payload
53
+ payload = {
54
+ "messages": messages,
55
+ "lora": None,
56
+ "model": model,
57
+ "max_tokens": max_tokens or 600,
58
+ "stream": True # Always use streaming API
59
+ }
60
+
61
+ # Generate request ID and timestamp
62
+ request_id = str(uuid.uuid4())
63
+ created_time = int(time.time())
64
+
65
+ # Use streaming implementation if requested
66
+ if stream:
67
+ return self._create_streaming(
68
+ request_id=request_id,
69
+ created_time=created_time,
70
+ model=model,
71
+ payload=payload
72
+ )
73
+
74
+ # Otherwise use non-streaming implementation
75
+ return self._create_non_streaming(
76
+ request_id=request_id,
77
+ created_time=created_time,
78
+ model=model,
79
+ payload=payload
80
+ )
81
+
82
+ def _create_streaming(
83
+ self,
84
+ *,
85
+ request_id: str,
86
+ created_time: int,
87
+ model: str,
88
+ payload: Dict[str, Any]
89
+ ) -> Generator[ChatCompletionChunk, None, None]:
90
+ """Implementation for streaming chat completions."""
91
+ try:
92
+ response = self._client.session.post(
93
+ self._client.chat_endpoint,
94
+ headers=self._client.headers,
95
+ cookies=self._client.cookies,
96
+ data=json.dumps(payload),
97
+ stream=True,
98
+ timeout=self._client.timeout,
99
+ impersonate="chrome120"
100
+ )
101
+ response.raise_for_status()
102
+
103
+ # Process the stream using sanitize_stream
104
+ # This handles the extraction of content from Cloudflare's response format
105
+ processed_stream = sanitize_stream(
106
+ data=response.iter_content(chunk_size=None),
107
+ intro_value=None,
108
+ to_json=False,
109
+ skip_markers=None,
110
+ content_extractor=self._cloudflare_extractor,
111
+ yield_raw_on_error=False
112
+ )
113
+
114
+ # Track accumulated content for token counting
115
+ accumulated_content = ""
116
+
117
+ # Stream the chunks
118
+ for content_chunk in processed_stream:
119
+ if content_chunk and isinstance(content_chunk, str):
120
+ accumulated_content += content_chunk
121
+
122
+ # Create and yield a chunk
123
+ delta = ChoiceDelta(content=content_chunk)
124
+ choice = Choice(index=0, delta=delta, finish_reason=None)
125
+
126
+ # Estimate token usage using count_tokens
127
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload["messages"]])
128
+ completion_tokens = count_tokens(accumulated_content)
129
+
130
+ chunk = ChatCompletionChunk(
131
+ id=request_id,
132
+ choices=[choice],
133
+ created=created_time,
134
+ model=model
135
+ )
136
+
137
+ yield chunk
138
+
139
+ # Final chunk with finish_reason
140
+ delta = ChoiceDelta(content=None)
141
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
142
+ chunk = ChatCompletionChunk(
143
+ id=request_id,
144
+ choices=[choice],
145
+ created=created_time,
146
+ model=model
147
+ )
148
+
149
+ yield chunk
150
+
151
+ except CurlError as e:
152
+ raise IOError(f"Cloudflare streaming request failed (CurlError): {e}") from e
153
+ except Exception as e:
154
+ raise IOError(f"Cloudflare streaming request failed: {e}") from e
155
+
156
+ def _create_non_streaming(
157
+ self,
158
+ *,
159
+ request_id: str,
160
+ created_time: int,
161
+ model: str,
162
+ payload: Dict[str, Any]
163
+ ) -> ChatCompletion:
164
+ """Implementation for non-streaming chat completions."""
165
+ try:
166
+ response = self._client.session.post(
167
+ self._client.chat_endpoint,
168
+ headers=self._client.headers,
169
+ cookies=self._client.cookies,
170
+ data=json.dumps(payload),
171
+ stream=True, # Still use streaming API but collect all chunks
172
+ timeout=self._client.timeout,
173
+ impersonate="chrome120"
174
+ )
175
+ response.raise_for_status()
176
+
177
+ # Process the stream and collect all content
178
+ processed_stream = sanitize_stream(
179
+ data=response.iter_content(chunk_size=None),
180
+ intro_value=None,
181
+ to_json=False,
182
+ skip_markers=None,
183
+ content_extractor=self._cloudflare_extractor,
184
+ yield_raw_on_error=False
185
+ )
186
+
187
+ full_content = ""
188
+ for content_chunk in processed_stream:
189
+ if content_chunk and isinstance(content_chunk, str):
190
+ full_content += content_chunk
191
+
192
+ # Create the completion message
193
+ message = ChatCompletionMessage(
194
+ role="assistant",
195
+ content=full_content
196
+ )
197
+
198
+ # Create the choice
199
+ choice = Choice(
200
+ index=0,
201
+ message=message,
202
+ finish_reason="stop"
203
+ )
204
+
205
+ # Estimate token usage using count_tokens
206
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload["messages"]])
207
+ completion_tokens = count_tokens(full_content)
208
+ usage = CompletionUsage(
209
+ prompt_tokens=prompt_tokens,
210
+ completion_tokens=completion_tokens,
211
+ total_tokens=prompt_tokens + completion_tokens
212
+ )
213
+
214
+ # Create the completion object
215
+ completion = ChatCompletion(
216
+ id=request_id,
217
+ choices=[choice],
218
+ created=created_time,
219
+ model=model,
220
+ usage=usage,
221
+ )
222
+
223
+ return completion
224
+
225
+ except CurlError as e:
226
+ raise IOError(f"Cloudflare request failed (CurlError): {e}") from e
227
+ except Exception as e:
228
+ raise IOError(f"Cloudflare request failed: {e}") from e
229
+
230
+ @staticmethod
231
+ def _cloudflare_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
232
+ """
233
+ Extracts content from Cloudflare stream JSON objects.
234
+
235
+ Args:
236
+ chunk: The chunk to extract content from
237
+
238
+ Returns:
239
+ Extracted content or None if extraction failed
240
+ """
241
+ if isinstance(chunk, str):
242
+ # Use re.search to find the pattern 0:"<content>"
243
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
244
+ if match:
245
+ # Decode potential unicode escapes and handle escaped quotes/backslashes
246
+ content = match.group(1).encode().decode('unicode_escape')
247
+ return content.replace('\\\\', '\\').replace('\\"', '"')
248
+ return None
249
+
250
+
251
+ class Chat(BaseChat):
252
+ def __init__(self, client: 'Cloudflare'):
253
+ self.completions = Completions(client)
254
+
255
+
256
+ class Cloudflare(OpenAICompatibleProvider):
257
+ """
258
+ OpenAI-compatible client for Cloudflare API.
259
+
260
+ Usage:
261
+ client = Cloudflare()
262
+ response = client.chat.completions.create(
263
+ model="@cf/meta/llama-3-8b-instruct",
264
+ messages=[{"role": "user", "content": "Hello!"}]
265
+ )
266
+ print(response.choices[0].message.content)
267
+ """
268
+
269
+ AVAILABLE_MODELS = [
270
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq",
271
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
272
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
273
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
274
+ "@cf/thebloke/discolm-german-7b-v1-awq",
275
+ "@cf/tiiuae/falcon-7b-instruct",
276
+ "@cf/google/gemma-3-12b-it",
277
+ "@hf/google/gemma-7b-it",
278
+ "@hf/nousresearch/hermes-2-pro-mistral-7b",
279
+ "@hf/thebloke/llama-2-13b-chat-awq",
280
+ "@cf/meta/llama-2-7b-chat-fp16",
281
+ "@cf/meta/llama-2-7b-chat-int8",
282
+ "@cf/meta/llama-3-8b-instruct",
283
+ "@cf/meta/llama-3-8b-instruct-awq",
284
+ "@cf/meta/llama-3.1-8b-instruct-awq",
285
+ "@cf/meta/llama-3.1-8b-instruct-fp8",
286
+ "@cf/meta/llama-3.2-11b-vision-instruct",
287
+ "@cf/meta/llama-3.2-1b-instruct",
288
+ "@cf/meta/llama-3.2-3b-instruct",
289
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
290
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
291
+ "@cf/meta/llama-guard-3-8b",
292
+ "@hf/thebloke/llamaguard-7b-awq",
293
+ "@hf/meta-llama/meta-llama-3-8b-instruct",
294
+ "@cf/mistral/mistral-7b-instruct-v0.1",
295
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
296
+ "@hf/mistral/mistral-7b-instruct-v0.2",
297
+ "@cf/mistralai/mistral-small-3.1-24b-instruct",
298
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
299
+ "@cf/openchat/openchat-3.5-0106",
300
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
301
+ "@cf/microsoft/phi-2",
302
+ "@cf/qwen/qwen1.5-0.5b-chat",
303
+ "@cf/qwen/qwen1.5-1.8b-chat",
304
+ "@cf/qwen/qwen1.5-14b-chat-awq",
305
+ "@cf/qwen/qwen1.5-7b-chat-awq",
306
+ "@cf/qwen/qwen2.5-coder-32b-instruct",
307
+ "@cf/qwen/qwq-32b",
308
+ "@cf/defog/sqlcoder-7b-2",
309
+ "@hf/nexusflow/starling-lm-7b-beta",
310
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
311
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
312
+ "@hf/thebloke/zephyr-7b-beta-awq"
313
+ ]
314
+
315
+ def __init__(
316
+ self,
317
+ api_key: Optional[str] = None, # Not used but included for compatibility
318
+ timeout: int = 30,
319
+ proxies: dict = {},
320
+ ):
321
+ """
322
+ Initialize the Cloudflare client.
323
+
324
+ Args:
325
+ api_key: Not used but included for compatibility with OpenAI interface
326
+ timeout: Request timeout in seconds
327
+ proxies: Optional proxy configuration
328
+ """
329
+ self.timeout = timeout
330
+ self.proxies = proxies
331
+ self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
332
+
333
+ # Initialize session
334
+ self.session = Session()
335
+
336
+ # Set headers
337
+ self.headers = {
338
+ 'Accept': 'text/event-stream',
339
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
340
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
341
+ 'Content-Type': 'application/json',
342
+ 'DNT': '1',
343
+ 'Origin': 'https://playground.ai.cloudflare.com',
344
+ 'Referer': 'https://playground.ai.cloudflare.com/',
345
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
346
+ 'Sec-CH-UA-Mobile': '?0',
347
+ 'Sec-CH-UA-Platform': '"Windows"',
348
+ 'Sec-Fetch-Dest': 'empty',
349
+ 'Sec-Fetch-Mode': 'cors',
350
+ 'Sec-Fetch-Site': 'same-origin',
351
+ 'User-Agent': LitAgent().random()
352
+ }
353
+
354
+ # Set cookies
355
+ self.cookies = {
356
+ 'cfzs_amplitude': uuid4().hex,
357
+ 'cfz_amplitude': uuid4().hex,
358
+ '__cf_bm': uuid4().hex,
359
+ }
360
+
361
+ # Apply headers and proxies to session
362
+ self.session.headers.update(self.headers)
363
+ self.session.proxies = proxies
364
+
365
+ # Initialize chat interface
366
+ self.chat = Chat(self)
367
+
368
+ @property
369
+ def models(self):
370
+ class _ModelList:
371
+ def list(inner_self):
372
+ return type(self).AVAILABLE_MODELS
373
+ return _ModelList()
374
+
375
+ # @classmethod
376
+ # def models(cls):
377
+ # """Return the list of available models for Cloudflare."""
378
+ # return cls.AVAILABLE_MODELS
@@ -0,0 +1,283 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenAI-compatible client for the FreeGemini provider,
4
+ which uses the free-gemini.vercel.app service.
5
+ """
6
+
7
+ import time
8
+ import uuid
9
+ import json
10
+ from typing import List, Dict, Optional, Union, Generator, Any
11
+
12
+ from curl_cffi.requests import Session
13
+
14
+ from webscout.litagent import LitAgent
15
+ from webscout.AIutel import sanitize_stream
16
+ from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
17
+ from webscout.Provider.OPENAI.utils import (
18
+ ChatCompletion,
19
+ ChatCompletionChunk,
20
+ Choice,
21
+ ChatCompletionMessage,
22
+ ChoiceDelta,
23
+ CompletionUsage,
24
+ format_prompt,
25
+ get_system_prompt,
26
+ count_tokens
27
+ )
28
+
29
+ # ANSI escape codes for formatting
30
+ BOLD = "\033[1m"
31
+ RED = "\033[91m"
32
+ RESET = "\033[0m"
33
+
34
+ class Completions(BaseCompletions):
35
+ def __init__(self, client: 'FreeGemini'):
36
+ self._client = client
37
+
38
+ def create(
39
+ self,
40
+ *,
41
+ model: str,
42
+ messages: List[Dict[str, str]],
43
+ max_tokens: Optional[int] = None,
44
+ stream: bool = False,
45
+ temperature: Optional[float] = None,
46
+ top_p: Optional[float] = None,
47
+ **kwargs: Any
48
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
49
+ """
50
+ Creates a model response for the given chat conversation.
51
+ Mimics openai.chat.completions.create
52
+ """
53
+ request_id = f"chatcmpl-{uuid.uuid4()}"
54
+ created_time = int(time.time())
55
+
56
+ api_payload = {
57
+ "contents": messages,
58
+ "generationConfig": {
59
+ "temperature": temperature,
60
+ "maxOutputTokens": max_tokens,
61
+ "topP": top_p
62
+ },
63
+ "safetySettings": [
64
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
65
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
66
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
67
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}
68
+ ]
69
+ }
70
+
71
+ if stream:
72
+ return self._create_stream(request_id, created_time, model, api_payload)
73
+ else:
74
+ return self._create_non_stream(request_id, created_time, model, api_payload)
75
+
76
+ def _create_stream(
77
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
78
+ ) -> Generator[ChatCompletionChunk, None, None]:
79
+ try:
80
+ response = self._client.session.post(
81
+ self._client.api_endpoint,
82
+ json=payload,
83
+ stream=True,
84
+ timeout=self._client.timeout,
85
+ impersonate="chrome120"
86
+ )
87
+ response.raise_for_status()
88
+
89
+ # Track token usage across chunks
90
+ completion_tokens = 0
91
+ streaming_text = ""
92
+
93
+ processed_stream = sanitize_stream(
94
+ data=response.iter_content(chunk_size=None),
95
+ intro_value="data:",
96
+ to_json=True,
97
+ content_extractor=self._gemini_extractor,
98
+ yield_raw_on_error=False
99
+ )
100
+
101
+ for text_chunk in processed_stream:
102
+ if text_chunk and isinstance(text_chunk, str):
103
+ streaming_text += text_chunk
104
+ completion_tokens += count_tokens(text_chunk)
105
+
106
+ delta = ChoiceDelta(content=text_chunk, role="assistant")
107
+ choice = Choice(index=0, delta=delta, finish_reason=None)
108
+ chunk = ChatCompletionChunk(
109
+ id=request_id,
110
+ choices=[choice],
111
+ created=created_time,
112
+ model=model
113
+ )
114
+ yield chunk
115
+
116
+ # Final chunk with finish_reason
117
+ delta = ChoiceDelta(content=None)
118
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
119
+ chunk = ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[choice],
122
+ created=created_time,
123
+ model=model
124
+ )
125
+ yield chunk
126
+
127
+ except Exception as e:
128
+ print(f"{RED}Error during FreeGemini stream request: {e}{RESET}")
129
+ raise IOError(f"FreeGemini stream request failed: {e}") from e
130
+
131
+ def _create_non_stream(
132
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
133
+ ) -> ChatCompletion:
134
+ try:
135
+ # For non-streaming, we'll still use streaming since the API returns data in chunks
136
+ response = self._client.session.post(
137
+ self._client.api_endpoint,
138
+ json=payload,
139
+ stream=True, # API always returns streaming format
140
+ timeout=self._client.timeout,
141
+ impersonate="chrome120"
142
+ )
143
+ response.raise_for_status()
144
+
145
+ # Process the streaming response to get the full text
146
+ full_text_response = ""
147
+
148
+ # Process each chunk using the same method as streaming
149
+ for line in response.iter_lines():
150
+ if line and line.startswith(b"data:"):
151
+ # Extract the JSON part
152
+ json_str = line[5:].strip().decode('utf-8')
153
+ if json_str != "[DONE]":
154
+ try:
155
+ data = json.loads(json_str)
156
+ # Use the existing extractor to get the text
157
+ text_chunk = self._gemini_extractor(data)
158
+ if text_chunk:
159
+ full_text_response += text_chunk
160
+ except json.JSONDecodeError:
161
+ # Skip invalid JSON
162
+ pass
163
+
164
+ # Create usage statistics using count_tokens
165
+ prompt_tokens = count_tokens(str(payload))
166
+ completion_tokens = count_tokens(full_text_response)
167
+ total_tokens = prompt_tokens + completion_tokens
168
+
169
+ usage = CompletionUsage(
170
+ prompt_tokens=prompt_tokens,
171
+ completion_tokens=completion_tokens,
172
+ total_tokens=total_tokens
173
+ )
174
+
175
+ # Create the message and choice objects
176
+ message = ChatCompletionMessage(
177
+ role="assistant",
178
+ content=full_text_response
179
+ )
180
+ choice = Choice(
181
+ index=0,
182
+ message=message,
183
+ finish_reason="stop"
184
+ )
185
+
186
+ # Create the completion object
187
+ completion = ChatCompletion(
188
+ id=request_id,
189
+ choices=[choice],
190
+ created=created_time,
191
+ model=model,
192
+ usage=usage
193
+ )
194
+
195
+ return completion
196
+
197
+ except Exception as e:
198
+ print(f"{RED}Error during FreeGemini non-stream request: {e}{RESET}")
199
+ raise IOError(f"FreeGemini request failed: {e}") from e
200
+
201
+ @staticmethod
202
+ def _gemini_extractor(data: Dict) -> Optional[str]:
203
+ """Extract text content from Gemini API response stream data."""
204
+ try:
205
+ if "candidates" in data and data["candidates"]:
206
+ candidate = data["candidates"][0]
207
+ if "content" in candidate and "parts" in candidate["content"]:
208
+ parts = candidate["content"]["parts"]
209
+ if parts and "text" in parts[0]:
210
+ return parts[0]["text"]
211
+ except (KeyError, IndexError, TypeError):
212
+ pass
213
+ return None
214
+
215
+ class Chat(BaseChat):
216
+ def __init__(self, client: 'FreeGemini'):
217
+ self.completions = Completions(client)
218
+
219
+
220
+ class FreeGemini(OpenAICompatibleProvider):
221
+ """
222
+ OpenAI-compatible client for FreeGemini API.
223
+
224
+ Usage:
225
+ client = FreeGemini()
226
+ response = client.chat.completions.create(
227
+ model="gemini-2.0-flash",
228
+ messages=[{"role": "user", "content": "Hello!"}]
229
+ )
230
+ print(response.choices[0].message.content)
231
+ """
232
+
233
+ AVAILABLE_MODELS = ["gemini-2.0-flash"]
234
+
235
+ def __init__(
236
+ self,
237
+ timeout: int = 30,
238
+ ):
239
+ """
240
+ Initialize the FreeGemini client.
241
+
242
+ Args:
243
+ timeout: Request timeout in seconds
244
+ """
245
+ self.timeout = timeout
246
+ # Update the API endpoint to match the working implementation
247
+ self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
248
+
249
+ # Initialize session with curl_cffi for better Cloudflare handling
250
+ self.session = Session()
251
+
252
+ # Use LitAgent for fingerprinting
253
+ self.agent = LitAgent()
254
+
255
+ # Set headers for the requests
256
+ self.headers = {
257
+ "Content-Type": "application/json",
258
+ "Accept": "application/json, text/event-stream",
259
+ "User-Agent": self.agent.random(),
260
+ "Origin": "https://free-gemini.vercel.app",
261
+ "Referer": "https://free-gemini.vercel.app/",
262
+ }
263
+
264
+ # Update session headers
265
+ self.session.headers.update(self.headers)
266
+
267
+ # Initialize chat interface
268
+ self.chat = Chat(self)
269
+ @property
270
+ def models(self):
271
+ class _ModelList:
272
+ def list(inner_self):
273
+ return type(self).AVAILABLE_MODELS
274
+ return _ModelList()
275
+ if __name__ == "__main__":
276
+ # Example usage
277
+ client = FreeGemini()
278
+ conversation_prompt = "Hello!"
279
+ response = client.chat.completions.create(
280
+ model="gemini-2.0-flash",
281
+ messages=[{"role": "user", "parts": [{"text": conversation_prompt}]}]
282
+ )
283
+ print(response.choices[0].message.content)