webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
|
@@ -1,285 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Import LitAgent for browser fingerprinting
|
|
15
|
-
from webscout.litagent import LitAgent
|
|
16
|
-
|
|
17
|
-
# ANSI escape codes for formatting
|
|
18
|
-
BOLD = "\033[1m"
|
|
19
|
-
RED = "\033[91m"
|
|
20
|
-
RESET = "\033[0m"
|
|
21
|
-
|
|
22
|
-
class Completions(BaseCompletions):
|
|
23
|
-
def __init__(self, client: 'TextPollinations'):
|
|
24
|
-
self._client = client
|
|
25
|
-
|
|
26
|
-
def create(
|
|
27
|
-
self,
|
|
28
|
-
*,
|
|
29
|
-
model: str,
|
|
30
|
-
messages: List[Dict[str, str]],
|
|
31
|
-
max_tokens: Optional[int] = None,
|
|
32
|
-
stream: bool = False,
|
|
33
|
-
temperature: Optional[float] = None,
|
|
34
|
-
top_p: Optional[float] = None,
|
|
35
|
-
**kwargs: Any
|
|
36
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
37
|
-
"""
|
|
38
|
-
Creates a model response for the given chat conversation.
|
|
39
|
-
Mimics openai.chat.completions.create
|
|
40
|
-
"""
|
|
41
|
-
payload = {
|
|
42
|
-
"model": model,
|
|
43
|
-
"messages": messages,
|
|
44
|
-
"stream": stream,
|
|
45
|
-
}
|
|
46
|
-
if max_tokens is not None:
|
|
47
|
-
payload["max_tokens"] = max_tokens
|
|
48
|
-
if temperature is not None:
|
|
49
|
-
payload["temperature"] = temperature
|
|
50
|
-
if top_p is not None:
|
|
51
|
-
payload["top_p"] = top_p
|
|
52
|
-
|
|
53
|
-
payload.update(kwargs)
|
|
54
|
-
|
|
55
|
-
request_id = str(uuid.uuid4())
|
|
56
|
-
created_time = int(time.time())
|
|
57
|
-
|
|
58
|
-
if stream:
|
|
59
|
-
return self._create_streaming(request_id, created_time, model, payload)
|
|
60
|
-
else:
|
|
61
|
-
return self._create_non_streaming(request_id, created_time, model, payload)
|
|
62
|
-
|
|
63
|
-
def _create_streaming(
|
|
64
|
-
self,
|
|
65
|
-
request_id: str,
|
|
66
|
-
created_time: int,
|
|
67
|
-
model: str,
|
|
68
|
-
payload: Dict[str, Any]
|
|
69
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
70
|
-
"""Implementation for streaming chat completions."""
|
|
71
|
-
try:
|
|
72
|
-
|
|
73
|
-
# Make the streaming request
|
|
74
|
-
response = self._client.session.post(
|
|
75
|
-
self._client.api_endpoint,
|
|
76
|
-
headers=self._client.headers,
|
|
77
|
-
json=payload,
|
|
78
|
-
stream=True,
|
|
79
|
-
timeout=self._client.timeout
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
if not response.ok:
|
|
83
|
-
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
84
|
-
|
|
85
|
-
# Process the streaming response
|
|
86
|
-
full_response = ""
|
|
87
|
-
|
|
88
|
-
for line in response.iter_lines():
|
|
89
|
-
if line:
|
|
90
|
-
line = line.decode('utf-8').strip()
|
|
91
|
-
if line == "data: [DONE]":
|
|
92
|
-
break
|
|
93
|
-
if line.startswith('data: '):
|
|
94
|
-
try:
|
|
95
|
-
json_data = json.loads(line[6:])
|
|
96
|
-
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
97
|
-
choice = json_data['choices'][0]
|
|
98
|
-
if 'delta' in choice and 'content' in choice['delta']:
|
|
99
|
-
content = choice['delta']['content']
|
|
100
|
-
full_response += content
|
|
101
|
-
|
|
102
|
-
# Create and yield a chunk
|
|
103
|
-
delta = ChoiceDelta(content=content)
|
|
104
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
105
|
-
chunk = ChatCompletionChunk(
|
|
106
|
-
id=request_id,
|
|
107
|
-
choices=[choice],
|
|
108
|
-
created=created_time,
|
|
109
|
-
model=model
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
yield chunk
|
|
113
|
-
except json.JSONDecodeError:
|
|
114
|
-
continue
|
|
115
|
-
|
|
116
|
-
# Final chunk with finish_reason
|
|
117
|
-
delta = ChoiceDelta(content=None)
|
|
118
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
119
|
-
chunk = ChatCompletionChunk(
|
|
120
|
-
id=request_id,
|
|
121
|
-
choices=[choice],
|
|
122
|
-
created=created_time,
|
|
123
|
-
model=model
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
yield chunk
|
|
127
|
-
|
|
128
|
-
except Exception as e:
|
|
129
|
-
print(f"{RED}Error during TextPollinations streaming request: {e}{RESET}")
|
|
130
|
-
raise IOError(f"TextPollinations streaming request failed: {e}") from e
|
|
131
|
-
|
|
132
|
-
def _create_non_streaming(
|
|
133
|
-
self,
|
|
134
|
-
request_id: str,
|
|
135
|
-
created_time: int,
|
|
136
|
-
model: str,
|
|
137
|
-
payload: Dict[str, Any]
|
|
138
|
-
) -> ChatCompletion:
|
|
139
|
-
"""Implementation for non-streaming chat completions."""
|
|
140
|
-
try:
|
|
141
|
-
|
|
142
|
-
# Make the non-streaming request
|
|
143
|
-
response = self._client.session.post(
|
|
144
|
-
self._client.api_endpoint,
|
|
145
|
-
headers=self._client.headers,
|
|
146
|
-
json=payload,
|
|
147
|
-
timeout=self._client.timeout
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
if not response.ok:
|
|
151
|
-
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
152
|
-
|
|
153
|
-
# Parse the response
|
|
154
|
-
response_json = response.json()
|
|
155
|
-
|
|
156
|
-
# Extract the content
|
|
157
|
-
if 'choices' in response_json and len(response_json['choices']) > 0:
|
|
158
|
-
if 'message' in response_json['choices'][0]:
|
|
159
|
-
full_content = response_json['choices'][0]['message']['content']
|
|
160
|
-
else:
|
|
161
|
-
full_content = ""
|
|
162
|
-
else:
|
|
163
|
-
full_content = ""
|
|
164
|
-
|
|
165
|
-
# Create the completion message
|
|
166
|
-
message = ChatCompletionMessage(
|
|
167
|
-
role="assistant",
|
|
168
|
-
content=full_content
|
|
169
|
-
)
|
|
170
|
-
|
|
171
|
-
# Create the choice
|
|
172
|
-
choice = Choice(
|
|
173
|
-
index=0,
|
|
174
|
-
message=message,
|
|
175
|
-
finish_reason="stop"
|
|
176
|
-
)
|
|
177
|
-
|
|
178
|
-
# Estimate token usage (very rough estimate)
|
|
179
|
-
prompt_tokens = sum(len(msg.get("content", "")) // 4 for msg in payload.get("messages", []))
|
|
180
|
-
completion_tokens = len(full_content) // 4
|
|
181
|
-
usage = CompletionUsage(
|
|
182
|
-
prompt_tokens=prompt_tokens,
|
|
183
|
-
completion_tokens=completion_tokens,
|
|
184
|
-
total_tokens=prompt_tokens + completion_tokens
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
# Create the completion object
|
|
188
|
-
completion = ChatCompletion(
|
|
189
|
-
id=request_id,
|
|
190
|
-
choices=[choice],
|
|
191
|
-
created=created_time,
|
|
192
|
-
model=model,
|
|
193
|
-
usage=usage,
|
|
194
|
-
)
|
|
195
|
-
|
|
196
|
-
return completion
|
|
197
|
-
|
|
198
|
-
except Exception as e:
|
|
199
|
-
print(f"{RED}Error during TextPollinations non-stream request: {e}{RESET}")
|
|
200
|
-
raise IOError(f"TextPollinations request failed: {e}") from e
|
|
201
|
-
|
|
202
|
-
class Chat(BaseChat):
|
|
203
|
-
def __init__(self, client: 'TextPollinations'):
|
|
204
|
-
self.completions = Completions(client)
|
|
205
|
-
|
|
206
|
-
class TextPollinations(OpenAICompatibleProvider):
|
|
207
|
-
"""
|
|
208
|
-
OpenAI-compatible client for TextPollinations API.
|
|
209
|
-
|
|
210
|
-
Usage:
|
|
211
|
-
client = TextPollinations()
|
|
212
|
-
response = client.chat.completions.create(
|
|
213
|
-
model="openai-large",
|
|
214
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
215
|
-
)
|
|
216
|
-
print(response.choices[0].message.content)
|
|
217
|
-
"""
|
|
218
|
-
|
|
219
|
-
AVAILABLE_MODELS = [
|
|
220
|
-
"openai",
|
|
221
|
-
"openai-large",
|
|
222
|
-
"openai-reasoning",
|
|
223
|
-
"qwen-coder",
|
|
224
|
-
"llama",
|
|
225
|
-
"llamascout",
|
|
226
|
-
"mistral",
|
|
227
|
-
"unity",
|
|
228
|
-
"midijourney",
|
|
229
|
-
"rtist",
|
|
230
|
-
"searchgpt",
|
|
231
|
-
"evil",
|
|
232
|
-
"deepseek-reasoning",
|
|
233
|
-
"deepseek-reasoning-large",
|
|
234
|
-
"llamalight",
|
|
235
|
-
"phi",
|
|
236
|
-
"llama-vision",
|
|
237
|
-
"pixtral",
|
|
238
|
-
"gemini",
|
|
239
|
-
"hormoz",
|
|
240
|
-
"hypnosis-tracy",
|
|
241
|
-
"mistral-roblox",
|
|
242
|
-
"roblox-rp",
|
|
243
|
-
"deepseek",
|
|
244
|
-
"sur",
|
|
245
|
-
"llama-scaleway",
|
|
246
|
-
"openai-audio",
|
|
247
|
-
]
|
|
248
|
-
|
|
249
|
-
def __init__(
|
|
250
|
-
self,
|
|
251
|
-
timeout: int = 30,
|
|
252
|
-
proxies: dict = {}
|
|
253
|
-
):
|
|
254
|
-
"""
|
|
255
|
-
Initialize the TextPollinations client.
|
|
256
|
-
|
|
257
|
-
Args:
|
|
258
|
-
timeout: Request timeout in seconds
|
|
259
|
-
proxies: Optional proxy configuration
|
|
260
|
-
"""
|
|
261
|
-
self.timeout = timeout
|
|
262
|
-
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
263
|
-
self.proxies = proxies
|
|
264
|
-
|
|
265
|
-
# Initialize session
|
|
266
|
-
self.session = requests.Session()
|
|
267
|
-
if proxies:
|
|
268
|
-
self.session.proxies.update(proxies)
|
|
269
|
-
|
|
270
|
-
# Initialize LitAgent for user agent generation
|
|
271
|
-
agent = LitAgent()
|
|
272
|
-
self.user_agent = agent.random()
|
|
273
|
-
|
|
274
|
-
# Set headers
|
|
275
|
-
self.headers = {
|
|
276
|
-
'Accept': '*/*',
|
|
277
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
278
|
-
'User-Agent': self.user_agent,
|
|
279
|
-
'Content-Type': 'application/json',
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
self.session.headers.update(self.headers)
|
|
283
|
-
|
|
284
|
-
# Initialize chat interface
|
|
285
|
-
self.chat = Chat(self)
|
|
@@ -1,405 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import base64
|
|
4
|
-
import json
|
|
5
|
-
import random
|
|
6
|
-
import string
|
|
7
|
-
import re
|
|
8
|
-
import cloudscraper
|
|
9
|
-
from datetime import datetime
|
|
10
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
|
-
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
14
|
-
from .utils import (
|
|
15
|
-
ChatCompletion,
|
|
16
|
-
ChatCompletionChunk,
|
|
17
|
-
Choice,
|
|
18
|
-
ChatCompletionMessage,
|
|
19
|
-
ChoiceDelta,
|
|
20
|
-
CompletionUsage,
|
|
21
|
-
format_prompt,
|
|
22
|
-
get_system_prompt
|
|
23
|
-
)
|
|
24
|
-
|
|
25
|
-
# ANSI escape codes for formatting
|
|
26
|
-
BOLD = "\033[1m"
|
|
27
|
-
RED = "\033[91m"
|
|
28
|
-
RESET = "\033[0m"
|
|
29
|
-
|
|
30
|
-
class Completions(BaseCompletions):
|
|
31
|
-
def __init__(self, client: 'Toolbaz'):
|
|
32
|
-
self._client = client
|
|
33
|
-
|
|
34
|
-
def create(
|
|
35
|
-
self,
|
|
36
|
-
*,
|
|
37
|
-
model: str,
|
|
38
|
-
messages: List[Dict[str, str]],
|
|
39
|
-
max_tokens: Optional[int] = None,
|
|
40
|
-
stream: bool = False,
|
|
41
|
-
temperature: Optional[float] = None,
|
|
42
|
-
top_p: Optional[float] = None,
|
|
43
|
-
**kwargs: Any
|
|
44
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
45
|
-
"""
|
|
46
|
-
Creates a model response for the given chat conversation.
|
|
47
|
-
Mimics openai.chat.completions.create
|
|
48
|
-
"""
|
|
49
|
-
# Format the messages using the format_prompt utility
|
|
50
|
-
formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
|
|
51
|
-
|
|
52
|
-
# Get authentication token
|
|
53
|
-
auth = self._client.get_auth()
|
|
54
|
-
if not auth:
|
|
55
|
-
raise IOError("Failed to authenticate with Toolbaz API")
|
|
56
|
-
|
|
57
|
-
# Prepare the request data
|
|
58
|
-
data = {
|
|
59
|
-
"text": formatted_prompt,
|
|
60
|
-
"capcha": auth["token"],
|
|
61
|
-
"model": model,
|
|
62
|
-
"session_id": auth["session_id"]
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
# Generate a unique request ID
|
|
66
|
-
request_id = f"chatcmpl-{uuid.uuid4().hex}"
|
|
67
|
-
created_time = int(time.time())
|
|
68
|
-
|
|
69
|
-
# Handle streaming response
|
|
70
|
-
if stream:
|
|
71
|
-
return self._handle_streaming_response(request_id, created_time, model, data)
|
|
72
|
-
else:
|
|
73
|
-
return self._handle_non_streaming_response(request_id, created_time, model, data)
|
|
74
|
-
|
|
75
|
-
def _handle_streaming_response(
|
|
76
|
-
self,
|
|
77
|
-
request_id: str,
|
|
78
|
-
created_time: int,
|
|
79
|
-
model: str,
|
|
80
|
-
data: Dict[str, Any]
|
|
81
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
|
-
"""Handle streaming response from Toolbaz API"""
|
|
83
|
-
try:
|
|
84
|
-
resp = self._client.session.post(
|
|
85
|
-
"https://data.toolbaz.com/writing.php",
|
|
86
|
-
data=data,
|
|
87
|
-
stream=True,
|
|
88
|
-
proxies=self._client.proxies,
|
|
89
|
-
timeout=self._client.timeout
|
|
90
|
-
)
|
|
91
|
-
resp.raise_for_status()
|
|
92
|
-
|
|
93
|
-
buffer = ""
|
|
94
|
-
tag_start = "[model:"
|
|
95
|
-
streaming_text = ""
|
|
96
|
-
|
|
97
|
-
for chunk in resp.iter_content(chunk_size=1):
|
|
98
|
-
if chunk:
|
|
99
|
-
text = chunk.decode(errors="ignore")
|
|
100
|
-
buffer += text
|
|
101
|
-
|
|
102
|
-
# Remove all complete [model: ...] tags in buffer
|
|
103
|
-
while True:
|
|
104
|
-
match = re.search(r"\[model:.*?\]", buffer)
|
|
105
|
-
if not match:
|
|
106
|
-
break
|
|
107
|
-
buffer = buffer[:match.start()] + buffer[match.end():]
|
|
108
|
-
|
|
109
|
-
# Only yield up to the last possible start of a tag
|
|
110
|
-
last_tag = buffer.rfind(tag_start)
|
|
111
|
-
if last_tag == -1 or last_tag + len(tag_start) > len(buffer):
|
|
112
|
-
if buffer:
|
|
113
|
-
streaming_text += buffer
|
|
114
|
-
|
|
115
|
-
# Create the delta object
|
|
116
|
-
delta = ChoiceDelta(
|
|
117
|
-
content=buffer,
|
|
118
|
-
role="assistant"
|
|
119
|
-
)
|
|
120
|
-
|
|
121
|
-
# Create the choice object
|
|
122
|
-
choice = Choice(
|
|
123
|
-
index=0,
|
|
124
|
-
delta=delta,
|
|
125
|
-
finish_reason=None
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
# Create the chunk object
|
|
129
|
-
chunk = ChatCompletionChunk(
|
|
130
|
-
id=request_id,
|
|
131
|
-
choices=[choice],
|
|
132
|
-
created=created_time,
|
|
133
|
-
model=model
|
|
134
|
-
)
|
|
135
|
-
|
|
136
|
-
yield chunk
|
|
137
|
-
buffer = ""
|
|
138
|
-
else:
|
|
139
|
-
if buffer[:last_tag]:
|
|
140
|
-
streaming_text += buffer[:last_tag]
|
|
141
|
-
|
|
142
|
-
# Create the delta object
|
|
143
|
-
delta = ChoiceDelta(
|
|
144
|
-
content=buffer[:last_tag],
|
|
145
|
-
role="assistant"
|
|
146
|
-
)
|
|
147
|
-
|
|
148
|
-
# Create the choice object
|
|
149
|
-
choice = Choice(
|
|
150
|
-
index=0,
|
|
151
|
-
delta=delta,
|
|
152
|
-
finish_reason=None
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
# Create the chunk object
|
|
156
|
-
chunk = ChatCompletionChunk(
|
|
157
|
-
id=request_id,
|
|
158
|
-
choices=[choice],
|
|
159
|
-
created=created_time,
|
|
160
|
-
model=model
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
yield chunk
|
|
164
|
-
buffer = buffer[last_tag:]
|
|
165
|
-
|
|
166
|
-
# Remove any remaining [model: ...] tag in the buffer
|
|
167
|
-
buffer = re.sub(r"\[model:.*?\]", "", buffer)
|
|
168
|
-
if buffer:
|
|
169
|
-
# Create the delta object
|
|
170
|
-
delta = ChoiceDelta(
|
|
171
|
-
content=buffer,
|
|
172
|
-
role="assistant"
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
# Create the choice object
|
|
176
|
-
choice = Choice(
|
|
177
|
-
index=0,
|
|
178
|
-
delta=delta,
|
|
179
|
-
finish_reason="stop"
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
# Create the chunk object
|
|
183
|
-
chunk = ChatCompletionChunk(
|
|
184
|
-
id=request_id,
|
|
185
|
-
choices=[choice],
|
|
186
|
-
created=created_time,
|
|
187
|
-
model=model
|
|
188
|
-
)
|
|
189
|
-
|
|
190
|
-
yield chunk
|
|
191
|
-
|
|
192
|
-
# Final chunk with finish_reason
|
|
193
|
-
delta = ChoiceDelta(
|
|
194
|
-
content=None,
|
|
195
|
-
role=None
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
choice = Choice(
|
|
199
|
-
index=0,
|
|
200
|
-
delta=delta,
|
|
201
|
-
finish_reason="stop"
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
chunk = ChatCompletionChunk(
|
|
205
|
-
id=request_id,
|
|
206
|
-
choices=[choice],
|
|
207
|
-
created=created_time,
|
|
208
|
-
model=model
|
|
209
|
-
)
|
|
210
|
-
|
|
211
|
-
yield chunk
|
|
212
|
-
|
|
213
|
-
except Exception as e:
|
|
214
|
-
print(f"{RED}Error during Toolbaz streaming request: {e}{RESET}")
|
|
215
|
-
raise IOError(f"Toolbaz streaming request failed: {e}") from e
|
|
216
|
-
|
|
217
|
-
def _handle_non_streaming_response(
|
|
218
|
-
self,
|
|
219
|
-
request_id: str,
|
|
220
|
-
created_time: int,
|
|
221
|
-
model: str,
|
|
222
|
-
data: Dict[str, Any]
|
|
223
|
-
) -> ChatCompletion:
|
|
224
|
-
"""Handle non-streaming response from Toolbaz API"""
|
|
225
|
-
try:
|
|
226
|
-
resp = self._client.session.post(
|
|
227
|
-
"https://data.toolbaz.com/writing.php",
|
|
228
|
-
data=data,
|
|
229
|
-
proxies=self._client.proxies,
|
|
230
|
-
timeout=self._client.timeout
|
|
231
|
-
)
|
|
232
|
-
resp.raise_for_status()
|
|
233
|
-
|
|
234
|
-
text = resp.text
|
|
235
|
-
# Remove [model: ...] tags
|
|
236
|
-
text = re.sub(r"\[model:.*?\]", "", text)
|
|
237
|
-
|
|
238
|
-
# Create the message object
|
|
239
|
-
message = ChatCompletionMessage(
|
|
240
|
-
role="assistant",
|
|
241
|
-
content=text
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
# Create the choice object
|
|
245
|
-
choice = Choice(
|
|
246
|
-
index=0,
|
|
247
|
-
message=message,
|
|
248
|
-
finish_reason="stop"
|
|
249
|
-
)
|
|
250
|
-
|
|
251
|
-
# Usage data is not provided by this API in a standard way, set to 0
|
|
252
|
-
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
253
|
-
|
|
254
|
-
# Create the completion object
|
|
255
|
-
completion = ChatCompletion(
|
|
256
|
-
id=request_id,
|
|
257
|
-
choices=[choice],
|
|
258
|
-
created=created_time,
|
|
259
|
-
model=model,
|
|
260
|
-
usage=usage
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
return completion
|
|
264
|
-
|
|
265
|
-
except Exception as e:
|
|
266
|
-
print(f"{RED}Error during Toolbaz non-stream request: {e}{RESET}")
|
|
267
|
-
raise IOError(f"Toolbaz request failed: {e}") from e
|
|
268
|
-
|
|
269
|
-
class Chat(BaseChat):
|
|
270
|
-
def __init__(self, client: 'Toolbaz'):
|
|
271
|
-
self.completions = Completions(client)
|
|
272
|
-
|
|
273
|
-
class Toolbaz(OpenAICompatibleProvider):
|
|
274
|
-
"""
|
|
275
|
-
OpenAI-compatible client for Toolbaz API.
|
|
276
|
-
|
|
277
|
-
Usage:
|
|
278
|
-
client = Toolbaz()
|
|
279
|
-
response = client.chat.completions.create(
|
|
280
|
-
model="gemini-2.0-flash",
|
|
281
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
282
|
-
)
|
|
283
|
-
print(response.choices[0].message.content)
|
|
284
|
-
"""
|
|
285
|
-
|
|
286
|
-
AVAILABLE_MODELS = [
|
|
287
|
-
"gemini-2.0-flash-thinking",
|
|
288
|
-
"gemini-2.0-flash",
|
|
289
|
-
"gemini-1.5-flash",
|
|
290
|
-
"gpt-4o-latest",
|
|
291
|
-
"gpt-4o-mini",
|
|
292
|
-
"gpt-4o",
|
|
293
|
-
"deepseek-r1",
|
|
294
|
-
"Llama-3.3-70B",
|
|
295
|
-
"Llama-3.1-405B",
|
|
296
|
-
"Llama-3.1-70B",
|
|
297
|
-
"Qwen2.5-72B",
|
|
298
|
-
"Qwen2-72B",
|
|
299
|
-
"grok-2-1212",
|
|
300
|
-
"grok-beta",
|
|
301
|
-
"toolbaz_v3.5_pro",
|
|
302
|
-
"toolbaz_v3",
|
|
303
|
-
"mixtral_8x22b",
|
|
304
|
-
"L3-70B-Euryale-v2.1",
|
|
305
|
-
"midnight-rose",
|
|
306
|
-
"unity",
|
|
307
|
-
"unfiltered_x"
|
|
308
|
-
]
|
|
309
|
-
|
|
310
|
-
def __init__(
|
|
311
|
-
self,
|
|
312
|
-
api_key: Optional[str] = None, # Not used but kept for compatibility
|
|
313
|
-
timeout: int = 30,
|
|
314
|
-
proxies: dict = {},
|
|
315
|
-
browser: str = "chrome"
|
|
316
|
-
):
|
|
317
|
-
"""
|
|
318
|
-
Initialize the Toolbaz client.
|
|
319
|
-
|
|
320
|
-
Args:
|
|
321
|
-
api_key: Not used but kept for compatibility with OpenAI interface
|
|
322
|
-
timeout: Request timeout in seconds
|
|
323
|
-
proxies: Proxy configuration for requests
|
|
324
|
-
browser: Browser name for LitAgent to generate User-Agent
|
|
325
|
-
"""
|
|
326
|
-
self.timeout = timeout
|
|
327
|
-
self.proxies = proxies
|
|
328
|
-
|
|
329
|
-
# Initialize session with cloudscraper
|
|
330
|
-
self.session = cloudscraper.create_scraper()
|
|
331
|
-
|
|
332
|
-
# Set up headers
|
|
333
|
-
self.session.headers.update({
|
|
334
|
-
"user-agent": LitAgent().generate_fingerprint(browser=browser)["user_agent"],
|
|
335
|
-
"accept": "*/*",
|
|
336
|
-
"accept-language": "en-US",
|
|
337
|
-
"cache-control": "no-cache",
|
|
338
|
-
"connection": "keep-alive",
|
|
339
|
-
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
340
|
-
"origin": "https://toolbaz.com",
|
|
341
|
-
"pragma": "no-cache",
|
|
342
|
-
"referer": "https://toolbaz.com/",
|
|
343
|
-
"sec-fetch-mode": "cors"
|
|
344
|
-
})
|
|
345
|
-
|
|
346
|
-
# Initialize chat property
|
|
347
|
-
self.chat = Chat(self)
|
|
348
|
-
|
|
349
|
-
def random_string(self, length):
|
|
350
|
-
"""Generate a random string of specified length"""
|
|
351
|
-
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
352
|
-
|
|
353
|
-
def generate_token(self):
|
|
354
|
-
"""Generate authentication token for Toolbaz API"""
|
|
355
|
-
payload = {
|
|
356
|
-
"bR6wF": {
|
|
357
|
-
"nV5kP": self.session.headers.get("user-agent"),
|
|
358
|
-
"lQ9jX": "en-US",
|
|
359
|
-
"sD2zR": "431x958",
|
|
360
|
-
"tY4hL": time.tzname[0] if time.tzname else "UTC",
|
|
361
|
-
"pL8mC": "Linux armv81",
|
|
362
|
-
"cQ3vD": datetime.now().year,
|
|
363
|
-
"hK7jN": datetime.now().hour
|
|
364
|
-
},
|
|
365
|
-
"uT4bX": {
|
|
366
|
-
"mM9wZ": [],
|
|
367
|
-
"kP8jY": []
|
|
368
|
-
},
|
|
369
|
-
"tuTcS": int(time.time()),
|
|
370
|
-
"tDfxy": None,
|
|
371
|
-
"RtyJt": str(uuid.uuid4())
|
|
372
|
-
}
|
|
373
|
-
return "d8TW0v" + base64.b64encode(json.dumps(payload).encode()).decode()
|
|
374
|
-
|
|
375
|
-
def get_auth(self):
|
|
376
|
-
"""Get authentication credentials for Toolbaz API"""
|
|
377
|
-
try:
|
|
378
|
-
session_id = self.random_string(36)
|
|
379
|
-
token = self.generate_token()
|
|
380
|
-
data = {
|
|
381
|
-
"session_id": session_id,
|
|
382
|
-
"token": token
|
|
383
|
-
}
|
|
384
|
-
resp = self.session.post("https://data.toolbaz.com/token.php", data=data)
|
|
385
|
-
resp.raise_for_status()
|
|
386
|
-
result = resp.json()
|
|
387
|
-
if result.get("success"):
|
|
388
|
-
return {"token": result["token"], "session_id": session_id}
|
|
389
|
-
return None
|
|
390
|
-
except Exception as e:
|
|
391
|
-
print(f"{RED}Error getting Toolbaz authentication: {e}{RESET}")
|
|
392
|
-
return None
|
|
393
|
-
|
|
394
|
-
# Example usage
|
|
395
|
-
if __name__ == "__main__":
|
|
396
|
-
# Test the provider
|
|
397
|
-
client = Toolbaz()
|
|
398
|
-
response = client.chat.completions.create(
|
|
399
|
-
model="gemini-2.0-flash",
|
|
400
|
-
messages=[
|
|
401
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
|
402
|
-
{"role": "user", "content": "Hello! How are you today?"}
|
|
403
|
-
]
|
|
404
|
-
)
|
|
405
|
-
print(response.choices[0].message.content)
|