webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,297 +1,297 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
OpenAI-compatible client for the FreeGemini provider,
|
|
4
|
-
which uses the free-gemini.vercel.app service.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import time
|
|
8
|
-
import uuid
|
|
9
|
-
import json
|
|
10
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
|
-
|
|
12
|
-
from curl_cffi.requests import Session
|
|
13
|
-
|
|
14
|
-
from webscout.litagent import LitAgent
|
|
15
|
-
from webscout.AIutel import sanitize_stream
|
|
16
|
-
from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
17
|
-
from webscout.Provider.OPENAI.utils import (
|
|
18
|
-
ChatCompletion,
|
|
19
|
-
ChatCompletionChunk,
|
|
20
|
-
Choice,
|
|
21
|
-
ChatCompletionMessage,
|
|
22
|
-
ChoiceDelta,
|
|
23
|
-
CompletionUsage,
|
|
24
|
-
format_prompt,
|
|
25
|
-
get_system_prompt,
|
|
26
|
-
count_tokens
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
# ANSI escape codes for formatting
|
|
30
|
-
BOLD = "\033[1m"
|
|
31
|
-
RED = "\033[91m"
|
|
32
|
-
RESET = "\033[0m"
|
|
33
|
-
|
|
34
|
-
class Completions(BaseCompletions):
|
|
35
|
-
def __init__(self, client: 'FreeGemini'):
|
|
36
|
-
self._client = client
|
|
37
|
-
|
|
38
|
-
def create(
|
|
39
|
-
self,
|
|
40
|
-
*,
|
|
41
|
-
model: str,
|
|
42
|
-
messages: List[Dict[str, str]],
|
|
43
|
-
max_tokens: Optional[int] = None,
|
|
44
|
-
stream: bool = False,
|
|
45
|
-
temperature: Optional[float] = None,
|
|
46
|
-
top_p: Optional[float] = None,
|
|
47
|
-
timeout: Optional[int] = None,
|
|
48
|
-
proxies: Optional[dict] = None,
|
|
49
|
-
**kwargs: Any
|
|
50
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
51
|
-
"""
|
|
52
|
-
Creates a model response for the given chat conversation.
|
|
53
|
-
Mimics openai.chat.completions.create
|
|
54
|
-
"""
|
|
55
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
56
|
-
created_time = int(time.time())
|
|
57
|
-
|
|
58
|
-
api_payload = {
|
|
59
|
-
"contents": messages,
|
|
60
|
-
"generationConfig": {
|
|
61
|
-
"temperature": temperature,
|
|
62
|
-
"maxOutputTokens": max_tokens,
|
|
63
|
-
"topP": top_p
|
|
64
|
-
},
|
|
65
|
-
"safetySettings": [
|
|
66
|
-
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
67
|
-
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
|
|
68
|
-
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
69
|
-
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}
|
|
70
|
-
]
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
if stream:
|
|
74
|
-
return self._create_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
|
|
75
|
-
else:
|
|
76
|
-
return self._create_non_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
|
|
77
|
-
|
|
78
|
-
def _create_stream(
|
|
79
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
80
|
-
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
81
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
|
-
original_proxies = self._client.session.proxies
|
|
83
|
-
if proxies is not None:
|
|
84
|
-
self._client.session.proxies = proxies
|
|
85
|
-
else:
|
|
86
|
-
# Ensure session proxies are reset if no specific proxies are passed for this call
|
|
87
|
-
self._client.session.proxies = {}
|
|
88
|
-
try:
|
|
89
|
-
response = self._client.session.post(
|
|
90
|
-
self._client.api_endpoint,
|
|
91
|
-
json=payload,
|
|
92
|
-
stream=True,
|
|
93
|
-
timeout=timeout if timeout is not None else self._client.timeout,
|
|
94
|
-
impersonate="chrome120"
|
|
95
|
-
)
|
|
96
|
-
response.raise_for_status()
|
|
97
|
-
|
|
98
|
-
# Track token usage across chunks
|
|
99
|
-
completion_tokens = 0
|
|
100
|
-
streaming_text = ""
|
|
101
|
-
|
|
102
|
-
processed_stream = sanitize_stream(
|
|
103
|
-
data=response.iter_content(chunk_size=None),
|
|
104
|
-
intro_value="data:",
|
|
105
|
-
to_json=True,
|
|
106
|
-
content_extractor=self._gemini_extractor,
|
|
107
|
-
yield_raw_on_error=False
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
for text_chunk in processed_stream:
|
|
111
|
-
if text_chunk and isinstance(text_chunk, str):
|
|
112
|
-
streaming_text += text_chunk
|
|
113
|
-
completion_tokens += count_tokens(text_chunk)
|
|
114
|
-
|
|
115
|
-
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
116
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
117
|
-
chunk = ChatCompletionChunk(
|
|
118
|
-
id=request_id,
|
|
119
|
-
choices=[choice],
|
|
120
|
-
created=created_time,
|
|
121
|
-
model=model
|
|
122
|
-
)
|
|
123
|
-
yield chunk
|
|
124
|
-
|
|
125
|
-
# Final chunk with finish_reason
|
|
126
|
-
delta = ChoiceDelta(content=None)
|
|
127
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
128
|
-
chunk = ChatCompletionChunk(
|
|
129
|
-
id=request_id,
|
|
130
|
-
choices=[choice],
|
|
131
|
-
created=created_time,
|
|
132
|
-
model=model
|
|
133
|
-
)
|
|
134
|
-
yield chunk
|
|
135
|
-
|
|
136
|
-
except Exception as e:
|
|
137
|
-
print(f"{RED}Error during FreeGemini stream request: {e}{RESET}")
|
|
138
|
-
raise IOError(f"FreeGemini stream request failed: {e}") from e
|
|
139
|
-
finally:
|
|
140
|
-
self._client.session.proxies = original_proxies
|
|
141
|
-
|
|
142
|
-
def _create_non_stream(
|
|
143
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
144
|
-
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
145
|
-
) -> ChatCompletion:
|
|
146
|
-
original_proxies = self._client.session.proxies
|
|
147
|
-
if proxies is not None:
|
|
148
|
-
self._client.session.proxies = proxies
|
|
149
|
-
else:
|
|
150
|
-
self._client.session.proxies = {}
|
|
151
|
-
try:
|
|
152
|
-
# For non-streaming, we'll still use streaming since the API returns data in chunks
|
|
153
|
-
response = self._client.session.post(
|
|
154
|
-
self._client.api_endpoint,
|
|
155
|
-
json=payload,
|
|
156
|
-
stream=True, # API always returns streaming format
|
|
157
|
-
timeout=timeout if timeout is not None else self._client.timeout,
|
|
158
|
-
impersonate="chrome120"
|
|
159
|
-
)
|
|
160
|
-
response.raise_for_status()
|
|
161
|
-
|
|
162
|
-
# Process the streaming response to get the full text
|
|
163
|
-
full_text_response = ""
|
|
164
|
-
|
|
165
|
-
# Process each chunk using the same method as streaming
|
|
166
|
-
for line in response.iter_lines():
|
|
167
|
-
if line and line.startswith(b"data:"):
|
|
168
|
-
# Extract the JSON part
|
|
169
|
-
json_str = line[5:].strip().decode('utf-8')
|
|
170
|
-
if json_str != "[DONE]":
|
|
171
|
-
try:
|
|
172
|
-
data = json.loads(json_str)
|
|
173
|
-
# Use the existing extractor to get the text
|
|
174
|
-
text_chunk = self._gemini_extractor(data)
|
|
175
|
-
if text_chunk:
|
|
176
|
-
full_text_response += text_chunk
|
|
177
|
-
except json.JSONDecodeError:
|
|
178
|
-
# Skip invalid JSON
|
|
179
|
-
pass
|
|
180
|
-
|
|
181
|
-
# Create usage statistics using count_tokens
|
|
182
|
-
prompt_tokens = count_tokens(str(payload))
|
|
183
|
-
completion_tokens = count_tokens(full_text_response)
|
|
184
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
185
|
-
|
|
186
|
-
usage = CompletionUsage(
|
|
187
|
-
prompt_tokens=prompt_tokens,
|
|
188
|
-
completion_tokens=completion_tokens,
|
|
189
|
-
total_tokens=total_tokens
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
# Create the message and choice objects
|
|
193
|
-
message = ChatCompletionMessage(
|
|
194
|
-
role="assistant",
|
|
195
|
-
content=full_text_response
|
|
196
|
-
)
|
|
197
|
-
choice = Choice(
|
|
198
|
-
index=0,
|
|
199
|
-
message=message,
|
|
200
|
-
finish_reason="stop"
|
|
201
|
-
)
|
|
202
|
-
|
|
203
|
-
# Create the completion object
|
|
204
|
-
completion = ChatCompletion(
|
|
205
|
-
id=request_id,
|
|
206
|
-
choices=[choice],
|
|
207
|
-
created=created_time,
|
|
208
|
-
model=model,
|
|
209
|
-
usage=usage
|
|
210
|
-
)
|
|
211
|
-
|
|
212
|
-
return completion
|
|
213
|
-
|
|
214
|
-
except Exception as e:
|
|
215
|
-
print(f"{RED}Error during FreeGemini non-stream request: {e}{RESET}")
|
|
216
|
-
raise IOError(f"FreeGemini request failed: {e}") from e
|
|
217
|
-
finally:
|
|
218
|
-
self._client.session.proxies = original_proxies
|
|
219
|
-
|
|
220
|
-
@staticmethod
|
|
221
|
-
def _gemini_extractor(data: Dict) -> Optional[str]:
|
|
222
|
-
"""Extract text content from Gemini API response stream data."""
|
|
223
|
-
try:
|
|
224
|
-
if "candidates" in data and data["candidates"]:
|
|
225
|
-
candidate = data["candidates"][0]
|
|
226
|
-
if "content" in candidate and "parts" in candidate["content"]:
|
|
227
|
-
parts = candidate["content"]["parts"]
|
|
228
|
-
if parts and "text" in parts[0]:
|
|
229
|
-
return parts[0]["text"]
|
|
230
|
-
except (KeyError, IndexError, TypeError):
|
|
231
|
-
pass
|
|
232
|
-
return None
|
|
233
|
-
|
|
234
|
-
class Chat(BaseChat):
|
|
235
|
-
def __init__(self, client: 'FreeGemini'):
|
|
236
|
-
self.completions = Completions(client)
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
class FreeGemini(OpenAICompatibleProvider):
|
|
240
|
-
"""
|
|
241
|
-
OpenAI-compatible client for FreeGemini API.
|
|
242
|
-
|
|
243
|
-
Usage:
|
|
244
|
-
client = FreeGemini()
|
|
245
|
-
response = client.chat.completions.create(
|
|
246
|
-
model="gemini-2.0-flash",
|
|
247
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
248
|
-
)
|
|
249
|
-
print(response.choices[0].message.content)
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
AVAILABLE_MODELS = ["gemini-2.0-flash"]
|
|
253
|
-
|
|
254
|
-
def __init__(self, proxies: Optional[dict] = None):
|
|
255
|
-
"""
|
|
256
|
-
Initialize the FreeGemini client.
|
|
257
|
-
|
|
258
|
-
Args:
|
|
259
|
-
proxies: Optional proxy configuration dictionary
|
|
260
|
-
"""
|
|
261
|
-
super().__init__(proxies=proxies)
|
|
262
|
-
self.timeout = 30
|
|
263
|
-
# Update the API endpoint to match the working implementation
|
|
264
|
-
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
|
|
265
|
-
|
|
266
|
-
# Use LitAgent for fingerprinting
|
|
267
|
-
self.agent = LitAgent()
|
|
268
|
-
|
|
269
|
-
# Set headers for the requests
|
|
270
|
-
self.headers = {
|
|
271
|
-
"Content-Type": "application/json",
|
|
272
|
-
"Accept": "application/json, text/event-stream",
|
|
273
|
-
"User-Agent": self.agent.random(),
|
|
274
|
-
"Origin": "https://free-gemini.vercel.app",
|
|
275
|
-
"Referer": "https://free-gemini.vercel.app/",
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
# Update session headers
|
|
279
|
-
self.session.headers.update(self.headers)
|
|
280
|
-
|
|
281
|
-
# Initialize chat interface
|
|
282
|
-
self.chat = Chat(self)
|
|
283
|
-
@property
|
|
284
|
-
def models(self):
|
|
285
|
-
class _ModelList:
|
|
286
|
-
def list(inner_self):
|
|
287
|
-
return type(self).AVAILABLE_MODELS
|
|
288
|
-
return _ModelList()
|
|
289
|
-
if __name__ == "__main__":
|
|
290
|
-
# Example usage
|
|
291
|
-
client = FreeGemini()
|
|
292
|
-
conversation_prompt = "Hello!"
|
|
293
|
-
response = client.chat.completions.create(
|
|
294
|
-
model="gemini-2.0-flash",
|
|
295
|
-
messages=[{"role": "user", "parts": [{"text": conversation_prompt}]}]
|
|
296
|
-
)
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
OpenAI-compatible client for the FreeGemini provider,
|
|
4
|
+
which uses the free-gemini.vercel.app service.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import time
|
|
8
|
+
import uuid
|
|
9
|
+
import json
|
|
10
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
|
+
|
|
12
|
+
from curl_cffi.requests import Session
|
|
13
|
+
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
from webscout.AIutel import sanitize_stream
|
|
16
|
+
from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
17
|
+
from webscout.Provider.OPENAI.utils import (
|
|
18
|
+
ChatCompletion,
|
|
19
|
+
ChatCompletionChunk,
|
|
20
|
+
Choice,
|
|
21
|
+
ChatCompletionMessage,
|
|
22
|
+
ChoiceDelta,
|
|
23
|
+
CompletionUsage,
|
|
24
|
+
format_prompt,
|
|
25
|
+
get_system_prompt,
|
|
26
|
+
count_tokens
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# ANSI escape codes for formatting
|
|
30
|
+
BOLD = "\033[1m"
|
|
31
|
+
RED = "\033[91m"
|
|
32
|
+
RESET = "\033[0m"
|
|
33
|
+
|
|
34
|
+
class Completions(BaseCompletions):
|
|
35
|
+
def __init__(self, client: 'FreeGemini'):
|
|
36
|
+
self._client = client
|
|
37
|
+
|
|
38
|
+
def create(
|
|
39
|
+
self,
|
|
40
|
+
*,
|
|
41
|
+
model: str,
|
|
42
|
+
messages: List[Dict[str, str]],
|
|
43
|
+
max_tokens: Optional[int] = None,
|
|
44
|
+
stream: bool = False,
|
|
45
|
+
temperature: Optional[float] = None,
|
|
46
|
+
top_p: Optional[float] = None,
|
|
47
|
+
timeout: Optional[int] = None,
|
|
48
|
+
proxies: Optional[dict] = None,
|
|
49
|
+
**kwargs: Any
|
|
50
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
51
|
+
"""
|
|
52
|
+
Creates a model response for the given chat conversation.
|
|
53
|
+
Mimics openai.chat.completions.create
|
|
54
|
+
"""
|
|
55
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
56
|
+
created_time = int(time.time())
|
|
57
|
+
|
|
58
|
+
api_payload = {
|
|
59
|
+
"contents": messages,
|
|
60
|
+
"generationConfig": {
|
|
61
|
+
"temperature": temperature,
|
|
62
|
+
"maxOutputTokens": max_tokens,
|
|
63
|
+
"topP": top_p
|
|
64
|
+
},
|
|
65
|
+
"safetySettings": [
|
|
66
|
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
67
|
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
|
|
68
|
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
69
|
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}
|
|
70
|
+
]
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
if stream:
|
|
74
|
+
return self._create_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
|
|
75
|
+
else:
|
|
76
|
+
return self._create_non_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
|
|
77
|
+
|
|
78
|
+
def _create_stream(
|
|
79
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
80
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
81
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
|
+
original_proxies = self._client.session.proxies
|
|
83
|
+
if proxies is not None:
|
|
84
|
+
self._client.session.proxies = proxies
|
|
85
|
+
else:
|
|
86
|
+
# Ensure session proxies are reset if no specific proxies are passed for this call
|
|
87
|
+
self._client.session.proxies = {}
|
|
88
|
+
try:
|
|
89
|
+
response = self._client.session.post(
|
|
90
|
+
self._client.api_endpoint,
|
|
91
|
+
json=payload,
|
|
92
|
+
stream=True,
|
|
93
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
94
|
+
impersonate="chrome120"
|
|
95
|
+
)
|
|
96
|
+
response.raise_for_status()
|
|
97
|
+
|
|
98
|
+
# Track token usage across chunks
|
|
99
|
+
completion_tokens = 0
|
|
100
|
+
streaming_text = ""
|
|
101
|
+
|
|
102
|
+
processed_stream = sanitize_stream(
|
|
103
|
+
data=response.iter_content(chunk_size=None),
|
|
104
|
+
intro_value="data:",
|
|
105
|
+
to_json=True,
|
|
106
|
+
content_extractor=self._gemini_extractor,
|
|
107
|
+
yield_raw_on_error=False
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
for text_chunk in processed_stream:
|
|
111
|
+
if text_chunk and isinstance(text_chunk, str):
|
|
112
|
+
streaming_text += text_chunk
|
|
113
|
+
completion_tokens += count_tokens(text_chunk)
|
|
114
|
+
|
|
115
|
+
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
116
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
117
|
+
chunk = ChatCompletionChunk(
|
|
118
|
+
id=request_id,
|
|
119
|
+
choices=[choice],
|
|
120
|
+
created=created_time,
|
|
121
|
+
model=model
|
|
122
|
+
)
|
|
123
|
+
yield chunk
|
|
124
|
+
|
|
125
|
+
# Final chunk with finish_reason
|
|
126
|
+
delta = ChoiceDelta(content=None)
|
|
127
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
128
|
+
chunk = ChatCompletionChunk(
|
|
129
|
+
id=request_id,
|
|
130
|
+
choices=[choice],
|
|
131
|
+
created=created_time,
|
|
132
|
+
model=model
|
|
133
|
+
)
|
|
134
|
+
yield chunk
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
print(f"{RED}Error during FreeGemini stream request: {e}{RESET}")
|
|
138
|
+
raise IOError(f"FreeGemini stream request failed: {e}") from e
|
|
139
|
+
finally:
|
|
140
|
+
self._client.session.proxies = original_proxies
|
|
141
|
+
|
|
142
|
+
def _create_non_stream(
|
|
143
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
144
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
145
|
+
) -> ChatCompletion:
|
|
146
|
+
original_proxies = self._client.session.proxies
|
|
147
|
+
if proxies is not None:
|
|
148
|
+
self._client.session.proxies = proxies
|
|
149
|
+
else:
|
|
150
|
+
self._client.session.proxies = {}
|
|
151
|
+
try:
|
|
152
|
+
# For non-streaming, we'll still use streaming since the API returns data in chunks
|
|
153
|
+
response = self._client.session.post(
|
|
154
|
+
self._client.api_endpoint,
|
|
155
|
+
json=payload,
|
|
156
|
+
stream=True, # API always returns streaming format
|
|
157
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
158
|
+
impersonate="chrome120"
|
|
159
|
+
)
|
|
160
|
+
response.raise_for_status()
|
|
161
|
+
|
|
162
|
+
# Process the streaming response to get the full text
|
|
163
|
+
full_text_response = ""
|
|
164
|
+
|
|
165
|
+
# Process each chunk using the same method as streaming
|
|
166
|
+
for line in response.iter_lines():
|
|
167
|
+
if line and line.startswith(b"data:"):
|
|
168
|
+
# Extract the JSON part
|
|
169
|
+
json_str = line[5:].strip().decode('utf-8')
|
|
170
|
+
if json_str != "[DONE]":
|
|
171
|
+
try:
|
|
172
|
+
data = json.loads(json_str)
|
|
173
|
+
# Use the existing extractor to get the text
|
|
174
|
+
text_chunk = self._gemini_extractor(data)
|
|
175
|
+
if text_chunk:
|
|
176
|
+
full_text_response += text_chunk
|
|
177
|
+
except json.JSONDecodeError:
|
|
178
|
+
# Skip invalid JSON
|
|
179
|
+
pass
|
|
180
|
+
|
|
181
|
+
# Create usage statistics using count_tokens
|
|
182
|
+
prompt_tokens = count_tokens(str(payload))
|
|
183
|
+
completion_tokens = count_tokens(full_text_response)
|
|
184
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
185
|
+
|
|
186
|
+
usage = CompletionUsage(
|
|
187
|
+
prompt_tokens=prompt_tokens,
|
|
188
|
+
completion_tokens=completion_tokens,
|
|
189
|
+
total_tokens=total_tokens
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# Create the message and choice objects
|
|
193
|
+
message = ChatCompletionMessage(
|
|
194
|
+
role="assistant",
|
|
195
|
+
content=full_text_response
|
|
196
|
+
)
|
|
197
|
+
choice = Choice(
|
|
198
|
+
index=0,
|
|
199
|
+
message=message,
|
|
200
|
+
finish_reason="stop"
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# Create the completion object
|
|
204
|
+
completion = ChatCompletion(
|
|
205
|
+
id=request_id,
|
|
206
|
+
choices=[choice],
|
|
207
|
+
created=created_time,
|
|
208
|
+
model=model,
|
|
209
|
+
usage=usage
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
return completion
|
|
213
|
+
|
|
214
|
+
except Exception as e:
|
|
215
|
+
print(f"{RED}Error during FreeGemini non-stream request: {e}{RESET}")
|
|
216
|
+
raise IOError(f"FreeGemini request failed: {e}") from e
|
|
217
|
+
finally:
|
|
218
|
+
self._client.session.proxies = original_proxies
|
|
219
|
+
|
|
220
|
+
@staticmethod
|
|
221
|
+
def _gemini_extractor(data: Dict) -> Optional[str]:
|
|
222
|
+
"""Extract text content from Gemini API response stream data."""
|
|
223
|
+
try:
|
|
224
|
+
if "candidates" in data and data["candidates"]:
|
|
225
|
+
candidate = data["candidates"][0]
|
|
226
|
+
if "content" in candidate and "parts" in candidate["content"]:
|
|
227
|
+
parts = candidate["content"]["parts"]
|
|
228
|
+
if parts and "text" in parts[0]:
|
|
229
|
+
return parts[0]["text"]
|
|
230
|
+
except (KeyError, IndexError, TypeError):
|
|
231
|
+
pass
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
class Chat(BaseChat):
|
|
235
|
+
def __init__(self, client: 'FreeGemini'):
|
|
236
|
+
self.completions = Completions(client)
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
class FreeGemini(OpenAICompatibleProvider):
|
|
240
|
+
"""
|
|
241
|
+
OpenAI-compatible client for FreeGemini API.
|
|
242
|
+
|
|
243
|
+
Usage:
|
|
244
|
+
client = FreeGemini()
|
|
245
|
+
response = client.chat.completions.create(
|
|
246
|
+
model="gemini-2.0-flash",
|
|
247
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
248
|
+
)
|
|
249
|
+
print(response.choices[0].message.content)
|
|
250
|
+
"""
|
|
251
|
+
|
|
252
|
+
AVAILABLE_MODELS = ["gemini-2.0-flash"]
|
|
253
|
+
|
|
254
|
+
def __init__(self, proxies: Optional[dict] = None):
|
|
255
|
+
"""
|
|
256
|
+
Initialize the FreeGemini client.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
proxies: Optional proxy configuration dictionary
|
|
260
|
+
"""
|
|
261
|
+
super().__init__(proxies=proxies)
|
|
262
|
+
self.timeout = 30
|
|
263
|
+
# Update the API endpoint to match the working implementation
|
|
264
|
+
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
|
|
265
|
+
|
|
266
|
+
# Use LitAgent for fingerprinting
|
|
267
|
+
self.agent = LitAgent()
|
|
268
|
+
|
|
269
|
+
# Set headers for the requests
|
|
270
|
+
self.headers = {
|
|
271
|
+
"Content-Type": "application/json",
|
|
272
|
+
"Accept": "application/json, text/event-stream",
|
|
273
|
+
"User-Agent": self.agent.random(),
|
|
274
|
+
"Origin": "https://free-gemini.vercel.app",
|
|
275
|
+
"Referer": "https://free-gemini.vercel.app/",
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
# Update session headers
|
|
279
|
+
self.session.headers.update(self.headers)
|
|
280
|
+
|
|
281
|
+
# Initialize chat interface
|
|
282
|
+
self.chat = Chat(self)
|
|
283
|
+
@property
|
|
284
|
+
def models(self):
|
|
285
|
+
class _ModelList:
|
|
286
|
+
def list(inner_self):
|
|
287
|
+
return type(self).AVAILABLE_MODELS
|
|
288
|
+
return _ModelList()
|
|
289
|
+
if __name__ == "__main__":
|
|
290
|
+
# Example usage
|
|
291
|
+
client = FreeGemini()
|
|
292
|
+
conversation_prompt = "Hello!"
|
|
293
|
+
response = client.chat.completions.create(
|
|
294
|
+
model="gemini-2.0-flash",
|
|
295
|
+
messages=[{"role": "user", "parts": [{"text": conversation_prompt}]}]
|
|
296
|
+
)
|
|
297
297
|
print(response.choices[0].message.content)
|