webscout 8.3.7__py3-none-any.whl → 2025.10.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Andi.py +1 -1
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +237 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +27 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +663 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/search/__init__.py +51 -0
- webscout/search/base.py +195 -0
- webscout/search/duckduckgo_main.py +54 -0
- webscout/search/engines/__init__.py +48 -0
- webscout/search/engines/bing.py +84 -0
- webscout/search/engines/bing_news.py +52 -0
- webscout/search/engines/brave.py +43 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +78 -0
- webscout/search/engines/duckduckgo/base.py +187 -0
- webscout/search/engines/duckduckgo/images.py +97 -0
- webscout/search/engines/duckduckgo/maps.py +168 -0
- webscout/search/engines/duckduckgo/news.py +68 -0
- webscout/search/engines/duckduckgo/suggestions.py +21 -0
- webscout/search/engines/duckduckgo/text.py +211 -0
- webscout/search/engines/duckduckgo/translate.py +47 -0
- webscout/search/engines/duckduckgo/videos.py +63 -0
- webscout/search/engines/duckduckgo/weather.py +74 -0
- webscout/search/engines/mojeek.py +37 -0
- webscout/search/engines/wikipedia.py +56 -0
- webscout/search/engines/yahoo.py +65 -0
- webscout/search/engines/yahoo_news.py +64 -0
- webscout/search/engines/yandex.py +43 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +32 -0
- webscout/search/engines/yep/images.py +99 -0
- webscout/search/engines/yep/suggestions.py +35 -0
- webscout/search/engines/yep/text.py +114 -0
- webscout/search/http_client.py +156 -0
- webscout/search/results.py +137 -0
- webscout/search/yep_main.py +44 -0
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/version.py.bak +2 -0
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/METADATA +936 -937
- webscout-2025.10.13.dist-info/RECORD +329 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout/webscout_search.py +0 -1183
- webscout/webscout_search_async.py +0 -649
- webscout/yep_search.py +0 -346
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
|
@@ -1,340 +1,340 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage, ToolCall, ToolFunction, count_tokens
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Import LitAgent for browser fingerprinting
|
|
15
|
-
from webscout.litagent import LitAgent
|
|
16
|
-
|
|
17
|
-
# ANSI escape codes for formatting
|
|
18
|
-
BOLD = "\033[1m"
|
|
19
|
-
RED = "\033[91m"
|
|
20
|
-
RESET = "\033[0m"
|
|
21
|
-
|
|
22
|
-
class Completions(BaseCompletions):
|
|
23
|
-
def __init__(self, client: 'TextPollinations'):
|
|
24
|
-
self._client = client
|
|
25
|
-
|
|
26
|
-
def create(
|
|
27
|
-
self,
|
|
28
|
-
*,
|
|
29
|
-
model: str,
|
|
30
|
-
messages: List[Dict[str, str]],
|
|
31
|
-
max_tokens: Optional[int] = None,
|
|
32
|
-
stream: bool = False,
|
|
33
|
-
temperature: Optional[float] = None,
|
|
34
|
-
top_p: Optional[float] = None,
|
|
35
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
36
|
-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
37
|
-
timeout: Optional[int] = None,
|
|
38
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
39
|
-
**kwargs: Any
|
|
40
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
-
"""
|
|
42
|
-
Creates a model response for the given chat conversation.
|
|
43
|
-
Mimics openai.chat.completions.create
|
|
44
|
-
"""
|
|
45
|
-
payload = {
|
|
46
|
-
"model": model,
|
|
47
|
-
"messages": messages,
|
|
48
|
-
"stream": stream,
|
|
49
|
-
}
|
|
50
|
-
if max_tokens is not None:
|
|
51
|
-
payload["max_tokens"] = max_tokens
|
|
52
|
-
if temperature is not None:
|
|
53
|
-
payload["temperature"] = temperature
|
|
54
|
-
if top_p is not None:
|
|
55
|
-
payload["top_p"] = top_p
|
|
56
|
-
if tools is not None:
|
|
57
|
-
payload["tools"] = tools
|
|
58
|
-
if tool_choice is not None:
|
|
59
|
-
payload["tool_choice"] = tool_choice
|
|
60
|
-
|
|
61
|
-
payload.update(kwargs)
|
|
62
|
-
|
|
63
|
-
request_id = str(uuid.uuid4())
|
|
64
|
-
created_time = int(time.time())
|
|
65
|
-
|
|
66
|
-
if stream:
|
|
67
|
-
return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
-
else:
|
|
69
|
-
return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
-
|
|
71
|
-
def _create_streaming(
|
|
72
|
-
self,
|
|
73
|
-
request_id: str,
|
|
74
|
-
created_time: int,
|
|
75
|
-
model: str,
|
|
76
|
-
payload: Dict[str, Any],
|
|
77
|
-
timeout: Optional[int] = None,
|
|
78
|
-
proxies: Optional[Dict[str, str]] = None
|
|
79
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
80
|
-
"""Implementation for streaming chat completions."""
|
|
81
|
-
try:
|
|
82
|
-
|
|
83
|
-
# Make the streaming request
|
|
84
|
-
response = self._client.session.post(
|
|
85
|
-
self._client.api_endpoint,
|
|
86
|
-
headers=self._client.headers,
|
|
87
|
-
json=payload,
|
|
88
|
-
stream=True,
|
|
89
|
-
timeout=timeout or self._client.timeout,
|
|
90
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
91
|
-
)
|
|
92
|
-
|
|
93
|
-
if not response.ok:
|
|
94
|
-
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
95
|
-
|
|
96
|
-
# Process the streaming response
|
|
97
|
-
full_response = ""
|
|
98
|
-
|
|
99
|
-
for line in response.iter_lines():
|
|
100
|
-
if line:
|
|
101
|
-
line = line.decode('utf-8').strip()
|
|
102
|
-
if line == "data: [DONE]":
|
|
103
|
-
break
|
|
104
|
-
if line.startswith('data: '):
|
|
105
|
-
try:
|
|
106
|
-
json_data = json.loads(line[6:])
|
|
107
|
-
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
108
|
-
choice = json_data['choices'][0]
|
|
109
|
-
if 'delta' in choice:
|
|
110
|
-
delta_obj = ChoiceDelta()
|
|
111
|
-
|
|
112
|
-
# Handle content in delta
|
|
113
|
-
if 'content' in choice['delta']:
|
|
114
|
-
content = choice['delta']['content']
|
|
115
|
-
full_response += content
|
|
116
|
-
delta_obj.content = content
|
|
117
|
-
|
|
118
|
-
# Handle tool calls in delta
|
|
119
|
-
if 'tool_calls' in choice['delta']:
|
|
120
|
-
tool_calls = []
|
|
121
|
-
for tool_call_data in choice['delta']['tool_calls']:
|
|
122
|
-
if 'function' in tool_call_data:
|
|
123
|
-
function = ToolFunction(
|
|
124
|
-
name=tool_call_data['function'].get('name', ''),
|
|
125
|
-
arguments=tool_call_data['function'].get('arguments', '')
|
|
126
|
-
)
|
|
127
|
-
tool_call = ToolCall(
|
|
128
|
-
id=tool_call_data.get('id', str(uuid.uuid4())),
|
|
129
|
-
type=tool_call_data.get('type', 'function'),
|
|
130
|
-
function=function
|
|
131
|
-
)
|
|
132
|
-
tool_calls.append(tool_call)
|
|
133
|
-
|
|
134
|
-
if tool_calls:
|
|
135
|
-
delta_obj.tool_calls = tool_calls
|
|
136
|
-
|
|
137
|
-
# Create and yield a chunk
|
|
138
|
-
choice_obj = Choice(index=0, delta=delta_obj, finish_reason=None)
|
|
139
|
-
chunk = ChatCompletionChunk(
|
|
140
|
-
id=request_id,
|
|
141
|
-
choices=[choice_obj],
|
|
142
|
-
created=created_time,
|
|
143
|
-
model=model
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
yield chunk
|
|
147
|
-
except json.JSONDecodeError:
|
|
148
|
-
continue
|
|
149
|
-
|
|
150
|
-
# Final chunk with finish_reason
|
|
151
|
-
delta = ChoiceDelta(content=None)
|
|
152
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
153
|
-
chunk = ChatCompletionChunk(
|
|
154
|
-
id=request_id,
|
|
155
|
-
choices=[choice],
|
|
156
|
-
created=created_time,
|
|
157
|
-
model=model
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
yield chunk
|
|
161
|
-
|
|
162
|
-
except Exception as e:
|
|
163
|
-
print(f"{RED}Error during TextPollinations streaming request: {e}{RESET}")
|
|
164
|
-
raise IOError(f"TextPollinations streaming request failed: {e}") from e
|
|
165
|
-
|
|
166
|
-
def _create_non_streaming(
|
|
167
|
-
self,
|
|
168
|
-
request_id: str,
|
|
169
|
-
created_time: int,
|
|
170
|
-
model: str,
|
|
171
|
-
payload: Dict[str, Any],
|
|
172
|
-
timeout: Optional[int] = None,
|
|
173
|
-
proxies: Optional[Dict[str, str]] = None
|
|
174
|
-
) -> ChatCompletion:
|
|
175
|
-
"""Implementation for non-streaming chat completions."""
|
|
176
|
-
try:
|
|
177
|
-
|
|
178
|
-
# Make the non-streaming request
|
|
179
|
-
response = self._client.session.post(
|
|
180
|
-
self._client.api_endpoint,
|
|
181
|
-
headers=self._client.headers,
|
|
182
|
-
json=payload,
|
|
183
|
-
timeout=timeout or self._client.timeout,
|
|
184
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
if not response.ok:
|
|
188
|
-
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
189
|
-
|
|
190
|
-
# Parse the response
|
|
191
|
-
response_json = response.json()
|
|
192
|
-
|
|
193
|
-
# Extract the content
|
|
194
|
-
if 'choices' in response_json and len(response_json['choices']) > 0:
|
|
195
|
-
choice_data = response_json['choices'][0]
|
|
196
|
-
if 'message' in choice_data:
|
|
197
|
-
message_data = choice_data['message']
|
|
198
|
-
|
|
199
|
-
# Extract content
|
|
200
|
-
full_content = message_data.get('content', '')
|
|
201
|
-
|
|
202
|
-
# Create the completion message with potential tool calls
|
|
203
|
-
message = ChatCompletionMessage(role="assistant", content=full_content)
|
|
204
|
-
|
|
205
|
-
# Handle tool calls if present
|
|
206
|
-
if 'tool_calls' in message_data:
|
|
207
|
-
tool_calls = []
|
|
208
|
-
for tool_call_data in message_data['tool_calls']:
|
|
209
|
-
if 'function' in tool_call_data:
|
|
210
|
-
function = ToolFunction(
|
|
211
|
-
name=tool_call_data['function'].get('name', ''),
|
|
212
|
-
arguments=tool_call_data['function'].get('arguments', '')
|
|
213
|
-
)
|
|
214
|
-
tool_call = ToolCall(
|
|
215
|
-
id=tool_call_data.get('id', str(uuid.uuid4())),
|
|
216
|
-
type=tool_call_data.get('type', 'function'),
|
|
217
|
-
function=function
|
|
218
|
-
)
|
|
219
|
-
tool_calls.append(tool_call)
|
|
220
|
-
|
|
221
|
-
if tool_calls:
|
|
222
|
-
message.tool_calls = tool_calls
|
|
223
|
-
else:
|
|
224
|
-
# Fallback if no message is present
|
|
225
|
-
message = ChatCompletionMessage(role="assistant", content="")
|
|
226
|
-
else:
|
|
227
|
-
# Fallback if no choices are present
|
|
228
|
-
message = ChatCompletionMessage(role="assistant", content="")
|
|
229
|
-
|
|
230
|
-
# Create the choice
|
|
231
|
-
choice = Choice(
|
|
232
|
-
index=0,
|
|
233
|
-
message=message,
|
|
234
|
-
finish_reason="stop"
|
|
235
|
-
)
|
|
236
|
-
|
|
237
|
-
# Estimate token usage using count_tokens
|
|
238
|
-
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
239
|
-
completion_tokens = count_tokens(full_content)
|
|
240
|
-
usage = CompletionUsage(
|
|
241
|
-
prompt_tokens=prompt_tokens,
|
|
242
|
-
completion_tokens=completion_tokens,
|
|
243
|
-
total_tokens=prompt_tokens + completion_tokens
|
|
244
|
-
)
|
|
245
|
-
|
|
246
|
-
# Create the completion object
|
|
247
|
-
completion = ChatCompletion(
|
|
248
|
-
id=request_id,
|
|
249
|
-
choices=[choice],
|
|
250
|
-
created=created_time,
|
|
251
|
-
model=model,
|
|
252
|
-
usage=usage,
|
|
253
|
-
)
|
|
254
|
-
|
|
255
|
-
return completion
|
|
256
|
-
|
|
257
|
-
except Exception as e:
|
|
258
|
-
print(f"{RED}Error during TextPollinations non-stream request: {e}{RESET}")
|
|
259
|
-
raise IOError(f"TextPollinations request failed: {e}") from e
|
|
260
|
-
|
|
261
|
-
class Chat(BaseChat):
|
|
262
|
-
def __init__(self, client: 'TextPollinations'):
|
|
263
|
-
self.completions = Completions(client)
|
|
264
|
-
|
|
265
|
-
class TextPollinations(OpenAICompatibleProvider):
|
|
266
|
-
"""
|
|
267
|
-
OpenAI-compatible client for TextPollinations API.
|
|
268
|
-
|
|
269
|
-
Usage:
|
|
270
|
-
client = TextPollinations()
|
|
271
|
-
response = client.chat.completions.create(
|
|
272
|
-
model="openai-large",
|
|
273
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
274
|
-
)
|
|
275
|
-
print(response.choices[0].message.content)
|
|
276
|
-
"""
|
|
277
|
-
|
|
278
|
-
AVAILABLE_MODELS = [
|
|
279
|
-
"deepseek-reasoning",
|
|
280
|
-
"gemini",
|
|
281
|
-
"mistral",
|
|
282
|
-
"nova-fast",
|
|
283
|
-
"openai",
|
|
284
|
-
"openai-audio",
|
|
285
|
-
"openai-fast",
|
|
286
|
-
"openai-reasoning",
|
|
287
|
-
"qwen-coder",
|
|
288
|
-
"roblox-rp",
|
|
289
|
-
"bidara",
|
|
290
|
-
"evil",
|
|
291
|
-
"midijourney",
|
|
292
|
-
"mirexa",
|
|
293
|
-
"rtist",
|
|
294
|
-
"unity",
|
|
295
|
-
]
|
|
296
|
-
|
|
297
|
-
def __init__(
|
|
298
|
-
self,
|
|
299
|
-
timeout: int = 30,
|
|
300
|
-
proxies: dict = {}
|
|
301
|
-
):
|
|
302
|
-
"""
|
|
303
|
-
Initialize the TextPollinations client.
|
|
304
|
-
|
|
305
|
-
Args:
|
|
306
|
-
timeout: Request timeout in seconds
|
|
307
|
-
proxies: Optional proxy configuration
|
|
308
|
-
"""
|
|
309
|
-
self.timeout = timeout
|
|
310
|
-
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
311
|
-
self.proxies = proxies
|
|
312
|
-
|
|
313
|
-
# Initialize session
|
|
314
|
-
self.session = requests.Session()
|
|
315
|
-
if proxies:
|
|
316
|
-
self.session.proxies.update(proxies)
|
|
317
|
-
|
|
318
|
-
# Initialize LitAgent for user agent generation
|
|
319
|
-
agent = LitAgent()
|
|
320
|
-
self.user_agent = agent.random()
|
|
321
|
-
|
|
322
|
-
# Set headers
|
|
323
|
-
self.headers = {
|
|
324
|
-
'Accept': '*/*',
|
|
325
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
326
|
-
'User-Agent': self.user_agent,
|
|
327
|
-
'Content-Type': 'application/json',
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
self.session.headers.update(self.headers)
|
|
331
|
-
|
|
332
|
-
# Initialize chat interface
|
|
333
|
-
self.chat = Chat(self)
|
|
334
|
-
|
|
335
|
-
@property
|
|
336
|
-
def models(self):
|
|
337
|
-
class _ModelList:
|
|
338
|
-
def list(inner_self):
|
|
339
|
-
return type(self).AVAILABLE_MODELS
|
|
340
|
-
return _ModelList()
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, ToolCall, ToolFunction, count_tokens
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Import LitAgent for browser fingerprinting
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
|
|
17
|
+
# ANSI escape codes for formatting
|
|
18
|
+
BOLD = "\033[1m"
|
|
19
|
+
RED = "\033[91m"
|
|
20
|
+
RESET = "\033[0m"
|
|
21
|
+
|
|
22
|
+
class Completions(BaseCompletions):
|
|
23
|
+
def __init__(self, client: 'TextPollinations'):
|
|
24
|
+
self._client = client
|
|
25
|
+
|
|
26
|
+
def create(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
model: str,
|
|
30
|
+
messages: List[Dict[str, str]],
|
|
31
|
+
max_tokens: Optional[int] = None,
|
|
32
|
+
stream: bool = False,
|
|
33
|
+
temperature: Optional[float] = None,
|
|
34
|
+
top_p: Optional[float] = None,
|
|
35
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
36
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
37
|
+
timeout: Optional[int] = None,
|
|
38
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
39
|
+
**kwargs: Any
|
|
40
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
+
"""
|
|
42
|
+
Creates a model response for the given chat conversation.
|
|
43
|
+
Mimics openai.chat.completions.create
|
|
44
|
+
"""
|
|
45
|
+
payload = {
|
|
46
|
+
"model": model,
|
|
47
|
+
"messages": messages,
|
|
48
|
+
"stream": stream,
|
|
49
|
+
}
|
|
50
|
+
if max_tokens is not None:
|
|
51
|
+
payload["max_tokens"] = max_tokens
|
|
52
|
+
if temperature is not None:
|
|
53
|
+
payload["temperature"] = temperature
|
|
54
|
+
if top_p is not None:
|
|
55
|
+
payload["top_p"] = top_p
|
|
56
|
+
if tools is not None:
|
|
57
|
+
payload["tools"] = tools
|
|
58
|
+
if tool_choice is not None:
|
|
59
|
+
payload["tool_choice"] = tool_choice
|
|
60
|
+
|
|
61
|
+
payload.update(kwargs)
|
|
62
|
+
|
|
63
|
+
request_id = str(uuid.uuid4())
|
|
64
|
+
created_time = int(time.time())
|
|
65
|
+
|
|
66
|
+
if stream:
|
|
67
|
+
return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
+
else:
|
|
69
|
+
return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
+
|
|
71
|
+
def _create_streaming(
|
|
72
|
+
self,
|
|
73
|
+
request_id: str,
|
|
74
|
+
created_time: int,
|
|
75
|
+
model: str,
|
|
76
|
+
payload: Dict[str, Any],
|
|
77
|
+
timeout: Optional[int] = None,
|
|
78
|
+
proxies: Optional[Dict[str, str]] = None
|
|
79
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
80
|
+
"""Implementation for streaming chat completions."""
|
|
81
|
+
try:
|
|
82
|
+
|
|
83
|
+
# Make the streaming request
|
|
84
|
+
response = self._client.session.post(
|
|
85
|
+
self._client.api_endpoint,
|
|
86
|
+
headers=self._client.headers,
|
|
87
|
+
json=payload,
|
|
88
|
+
stream=True,
|
|
89
|
+
timeout=timeout or self._client.timeout,
|
|
90
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
if not response.ok:
|
|
94
|
+
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
95
|
+
|
|
96
|
+
# Process the streaming response
|
|
97
|
+
full_response = ""
|
|
98
|
+
|
|
99
|
+
for line in response.iter_lines():
|
|
100
|
+
if line:
|
|
101
|
+
line = line.decode('utf-8').strip()
|
|
102
|
+
if line == "data: [DONE]":
|
|
103
|
+
break
|
|
104
|
+
if line.startswith('data: '):
|
|
105
|
+
try:
|
|
106
|
+
json_data = json.loads(line[6:])
|
|
107
|
+
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
108
|
+
choice = json_data['choices'][0]
|
|
109
|
+
if 'delta' in choice:
|
|
110
|
+
delta_obj = ChoiceDelta()
|
|
111
|
+
|
|
112
|
+
# Handle content in delta
|
|
113
|
+
if 'content' in choice['delta']:
|
|
114
|
+
content = choice['delta']['content']
|
|
115
|
+
full_response += content
|
|
116
|
+
delta_obj.content = content
|
|
117
|
+
|
|
118
|
+
# Handle tool calls in delta
|
|
119
|
+
if 'tool_calls' in choice['delta']:
|
|
120
|
+
tool_calls = []
|
|
121
|
+
for tool_call_data in choice['delta']['tool_calls']:
|
|
122
|
+
if 'function' in tool_call_data:
|
|
123
|
+
function = ToolFunction(
|
|
124
|
+
name=tool_call_data['function'].get('name', ''),
|
|
125
|
+
arguments=tool_call_data['function'].get('arguments', '')
|
|
126
|
+
)
|
|
127
|
+
tool_call = ToolCall(
|
|
128
|
+
id=tool_call_data.get('id', str(uuid.uuid4())),
|
|
129
|
+
type=tool_call_data.get('type', 'function'),
|
|
130
|
+
function=function
|
|
131
|
+
)
|
|
132
|
+
tool_calls.append(tool_call)
|
|
133
|
+
|
|
134
|
+
if tool_calls:
|
|
135
|
+
delta_obj.tool_calls = tool_calls
|
|
136
|
+
|
|
137
|
+
# Create and yield a chunk
|
|
138
|
+
choice_obj = Choice(index=0, delta=delta_obj, finish_reason=None)
|
|
139
|
+
chunk = ChatCompletionChunk(
|
|
140
|
+
id=request_id,
|
|
141
|
+
choices=[choice_obj],
|
|
142
|
+
created=created_time,
|
|
143
|
+
model=model
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
yield chunk
|
|
147
|
+
except json.JSONDecodeError:
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
# Final chunk with finish_reason
|
|
151
|
+
delta = ChoiceDelta(content=None)
|
|
152
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
153
|
+
chunk = ChatCompletionChunk(
|
|
154
|
+
id=request_id,
|
|
155
|
+
choices=[choice],
|
|
156
|
+
created=created_time,
|
|
157
|
+
model=model
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
yield chunk
|
|
161
|
+
|
|
162
|
+
except Exception as e:
|
|
163
|
+
print(f"{RED}Error during TextPollinations streaming request: {e}{RESET}")
|
|
164
|
+
raise IOError(f"TextPollinations streaming request failed: {e}") from e
|
|
165
|
+
|
|
166
|
+
def _create_non_streaming(
|
|
167
|
+
self,
|
|
168
|
+
request_id: str,
|
|
169
|
+
created_time: int,
|
|
170
|
+
model: str,
|
|
171
|
+
payload: Dict[str, Any],
|
|
172
|
+
timeout: Optional[int] = None,
|
|
173
|
+
proxies: Optional[Dict[str, str]] = None
|
|
174
|
+
) -> ChatCompletion:
|
|
175
|
+
"""Implementation for non-streaming chat completions."""
|
|
176
|
+
try:
|
|
177
|
+
|
|
178
|
+
# Make the non-streaming request
|
|
179
|
+
response = self._client.session.post(
|
|
180
|
+
self._client.api_endpoint,
|
|
181
|
+
headers=self._client.headers,
|
|
182
|
+
json=payload,
|
|
183
|
+
timeout=timeout or self._client.timeout,
|
|
184
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
if not response.ok:
|
|
188
|
+
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
189
|
+
|
|
190
|
+
# Parse the response
|
|
191
|
+
response_json = response.json()
|
|
192
|
+
|
|
193
|
+
# Extract the content
|
|
194
|
+
if 'choices' in response_json and len(response_json['choices']) > 0:
|
|
195
|
+
choice_data = response_json['choices'][0]
|
|
196
|
+
if 'message' in choice_data:
|
|
197
|
+
message_data = choice_data['message']
|
|
198
|
+
|
|
199
|
+
# Extract content
|
|
200
|
+
full_content = message_data.get('content', '')
|
|
201
|
+
|
|
202
|
+
# Create the completion message with potential tool calls
|
|
203
|
+
message = ChatCompletionMessage(role="assistant", content=full_content)
|
|
204
|
+
|
|
205
|
+
# Handle tool calls if present
|
|
206
|
+
if 'tool_calls' in message_data:
|
|
207
|
+
tool_calls = []
|
|
208
|
+
for tool_call_data in message_data['tool_calls']:
|
|
209
|
+
if 'function' in tool_call_data:
|
|
210
|
+
function = ToolFunction(
|
|
211
|
+
name=tool_call_data['function'].get('name', ''),
|
|
212
|
+
arguments=tool_call_data['function'].get('arguments', '')
|
|
213
|
+
)
|
|
214
|
+
tool_call = ToolCall(
|
|
215
|
+
id=tool_call_data.get('id', str(uuid.uuid4())),
|
|
216
|
+
type=tool_call_data.get('type', 'function'),
|
|
217
|
+
function=function
|
|
218
|
+
)
|
|
219
|
+
tool_calls.append(tool_call)
|
|
220
|
+
|
|
221
|
+
if tool_calls:
|
|
222
|
+
message.tool_calls = tool_calls
|
|
223
|
+
else:
|
|
224
|
+
# Fallback if no message is present
|
|
225
|
+
message = ChatCompletionMessage(role="assistant", content="")
|
|
226
|
+
else:
|
|
227
|
+
# Fallback if no choices are present
|
|
228
|
+
message = ChatCompletionMessage(role="assistant", content="")
|
|
229
|
+
|
|
230
|
+
# Create the choice
|
|
231
|
+
choice = Choice(
|
|
232
|
+
index=0,
|
|
233
|
+
message=message,
|
|
234
|
+
finish_reason="stop"
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# Estimate token usage using count_tokens
|
|
238
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
239
|
+
completion_tokens = count_tokens(full_content)
|
|
240
|
+
usage = CompletionUsage(
|
|
241
|
+
prompt_tokens=prompt_tokens,
|
|
242
|
+
completion_tokens=completion_tokens,
|
|
243
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# Create the completion object
|
|
247
|
+
completion = ChatCompletion(
|
|
248
|
+
id=request_id,
|
|
249
|
+
choices=[choice],
|
|
250
|
+
created=created_time,
|
|
251
|
+
model=model,
|
|
252
|
+
usage=usage,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
return completion
|
|
256
|
+
|
|
257
|
+
except Exception as e:
|
|
258
|
+
print(f"{RED}Error during TextPollinations non-stream request: {e}{RESET}")
|
|
259
|
+
raise IOError(f"TextPollinations request failed: {e}") from e
|
|
260
|
+
|
|
261
|
+
class Chat(BaseChat):
|
|
262
|
+
def __init__(self, client: 'TextPollinations'):
|
|
263
|
+
self.completions = Completions(client)
|
|
264
|
+
|
|
265
|
+
class TextPollinations(OpenAICompatibleProvider):
|
|
266
|
+
"""
|
|
267
|
+
OpenAI-compatible client for TextPollinations API.
|
|
268
|
+
|
|
269
|
+
Usage:
|
|
270
|
+
client = TextPollinations()
|
|
271
|
+
response = client.chat.completions.create(
|
|
272
|
+
model="openai-large",
|
|
273
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
274
|
+
)
|
|
275
|
+
print(response.choices[0].message.content)
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
AVAILABLE_MODELS = [
|
|
279
|
+
"deepseek-reasoning",
|
|
280
|
+
"gemini",
|
|
281
|
+
"mistral",
|
|
282
|
+
"nova-fast",
|
|
283
|
+
"openai",
|
|
284
|
+
"openai-audio",
|
|
285
|
+
"openai-fast",
|
|
286
|
+
"openai-reasoning",
|
|
287
|
+
"qwen-coder",
|
|
288
|
+
"roblox-rp",
|
|
289
|
+
"bidara",
|
|
290
|
+
"evil",
|
|
291
|
+
"midijourney",
|
|
292
|
+
"mirexa",
|
|
293
|
+
"rtist",
|
|
294
|
+
"unity",
|
|
295
|
+
]
|
|
296
|
+
|
|
297
|
+
def __init__(
|
|
298
|
+
self,
|
|
299
|
+
timeout: int = 30,
|
|
300
|
+
proxies: dict = {}
|
|
301
|
+
):
|
|
302
|
+
"""
|
|
303
|
+
Initialize the TextPollinations client.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
timeout: Request timeout in seconds
|
|
307
|
+
proxies: Optional proxy configuration
|
|
308
|
+
"""
|
|
309
|
+
self.timeout = timeout
|
|
310
|
+
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
311
|
+
self.proxies = proxies
|
|
312
|
+
|
|
313
|
+
# Initialize session
|
|
314
|
+
self.session = requests.Session()
|
|
315
|
+
if proxies:
|
|
316
|
+
self.session.proxies.update(proxies)
|
|
317
|
+
|
|
318
|
+
# Initialize LitAgent for user agent generation
|
|
319
|
+
agent = LitAgent()
|
|
320
|
+
self.user_agent = agent.random()
|
|
321
|
+
|
|
322
|
+
# Set headers
|
|
323
|
+
self.headers = {
|
|
324
|
+
'Accept': '*/*',
|
|
325
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
326
|
+
'User-Agent': self.user_agent,
|
|
327
|
+
'Content-Type': 'application/json',
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
self.session.headers.update(self.headers)
|
|
331
|
+
|
|
332
|
+
# Initialize chat interface
|
|
333
|
+
self.chat = Chat(self)
|
|
334
|
+
|
|
335
|
+
@property
|
|
336
|
+
def models(self):
|
|
337
|
+
class _ModelList:
|
|
338
|
+
def list(inner_self):
|
|
339
|
+
return type(self).AVAILABLE_MODELS
|
|
340
|
+
return _ModelList()
|