webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,285 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage
12
+ )
13
+
14
+ # Import LitAgent for browser fingerprinting
15
+ from webscout.litagent import LitAgent
16
+
17
+ # ANSI escape codes for formatting
18
+ BOLD = "\033[1m"
19
+ RED = "\033[91m"
20
+ RESET = "\033[0m"
21
+
22
+ class Completions(BaseCompletions):
23
+ def __init__(self, client: 'TextPollinations'):
24
+ self._client = client
25
+
26
+ def create(
27
+ self,
28
+ *,
29
+ model: str,
30
+ messages: List[Dict[str, str]],
31
+ max_tokens: Optional[int] = None,
32
+ stream: bool = False,
33
+ temperature: Optional[float] = None,
34
+ top_p: Optional[float] = None,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Creates a model response for the given chat conversation.
39
+ Mimics openai.chat.completions.create
40
+ """
41
+ payload = {
42
+ "model": model,
43
+ "messages": messages,
44
+ "stream": stream,
45
+ }
46
+ if max_tokens is not None:
47
+ payload["max_tokens"] = max_tokens
48
+ if temperature is not None:
49
+ payload["temperature"] = temperature
50
+ if top_p is not None:
51
+ payload["top_p"] = top_p
52
+
53
+ payload.update(kwargs)
54
+
55
+ request_id = str(uuid.uuid4())
56
+ created_time = int(time.time())
57
+
58
+ if stream:
59
+ return self._create_streaming(request_id, created_time, model, payload)
60
+ else:
61
+ return self._create_non_streaming(request_id, created_time, model, payload)
62
+
63
+ def _create_streaming(
64
+ self,
65
+ request_id: str,
66
+ created_time: int,
67
+ model: str,
68
+ payload: Dict[str, Any]
69
+ ) -> Generator[ChatCompletionChunk, None, None]:
70
+ """Implementation for streaming chat completions."""
71
+ try:
72
+
73
+ # Make the streaming request
74
+ response = self._client.session.post(
75
+ self._client.api_endpoint,
76
+ headers=self._client.headers,
77
+ json=payload,
78
+ stream=True,
79
+ timeout=self._client.timeout
80
+ )
81
+
82
+ if not response.ok:
83
+ raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
84
+
85
+ # Process the streaming response
86
+ full_response = ""
87
+
88
+ for line in response.iter_lines():
89
+ if line:
90
+ line = line.decode('utf-8').strip()
91
+ if line == "data: [DONE]":
92
+ break
93
+ if line.startswith('data: '):
94
+ try:
95
+ json_data = json.loads(line[6:])
96
+ if 'choices' in json_data and len(json_data['choices']) > 0:
97
+ choice = json_data['choices'][0]
98
+ if 'delta' in choice and 'content' in choice['delta']:
99
+ content = choice['delta']['content']
100
+ full_response += content
101
+
102
+ # Create and yield a chunk
103
+ delta = ChoiceDelta(content=content)
104
+ choice = Choice(index=0, delta=delta, finish_reason=None)
105
+ chunk = ChatCompletionChunk(
106
+ id=request_id,
107
+ choices=[choice],
108
+ created=created_time,
109
+ model=model
110
+ )
111
+
112
+ yield chunk
113
+ except json.JSONDecodeError:
114
+ continue
115
+
116
+ # Final chunk with finish_reason
117
+ delta = ChoiceDelta(content=None)
118
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
119
+ chunk = ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[choice],
122
+ created=created_time,
123
+ model=model
124
+ )
125
+
126
+ yield chunk
127
+
128
+ except Exception as e:
129
+ print(f"{RED}Error during TextPollinations streaming request: {e}{RESET}")
130
+ raise IOError(f"TextPollinations streaming request failed: {e}") from e
131
+
132
+ def _create_non_streaming(
133
+ self,
134
+ request_id: str,
135
+ created_time: int,
136
+ model: str,
137
+ payload: Dict[str, Any]
138
+ ) -> ChatCompletion:
139
+ """Implementation for non-streaming chat completions."""
140
+ try:
141
+
142
+ # Make the non-streaming request
143
+ response = self._client.session.post(
144
+ self._client.api_endpoint,
145
+ headers=self._client.headers,
146
+ json=payload,
147
+ timeout=self._client.timeout
148
+ )
149
+
150
+ if not response.ok:
151
+ raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
152
+
153
+ # Parse the response
154
+ response_json = response.json()
155
+
156
+ # Extract the content
157
+ if 'choices' in response_json and len(response_json['choices']) > 0:
158
+ if 'message' in response_json['choices'][0]:
159
+ full_content = response_json['choices'][0]['message']['content']
160
+ else:
161
+ full_content = ""
162
+ else:
163
+ full_content = ""
164
+
165
+ # Create the completion message
166
+ message = ChatCompletionMessage(
167
+ role="assistant",
168
+ content=full_content
169
+ )
170
+
171
+ # Create the choice
172
+ choice = Choice(
173
+ index=0,
174
+ message=message,
175
+ finish_reason="stop"
176
+ )
177
+
178
+ # Estimate token usage (very rough estimate)
179
+ prompt_tokens = sum(len(msg.get("content", "")) // 4 for msg in payload.get("messages", []))
180
+ completion_tokens = len(full_content) // 4
181
+ usage = CompletionUsage(
182
+ prompt_tokens=prompt_tokens,
183
+ completion_tokens=completion_tokens,
184
+ total_tokens=prompt_tokens + completion_tokens
185
+ )
186
+
187
+ # Create the completion object
188
+ completion = ChatCompletion(
189
+ id=request_id,
190
+ choices=[choice],
191
+ created=created_time,
192
+ model=model,
193
+ usage=usage,
194
+ )
195
+
196
+ return completion
197
+
198
+ except Exception as e:
199
+ print(f"{RED}Error during TextPollinations non-stream request: {e}{RESET}")
200
+ raise IOError(f"TextPollinations request failed: {e}") from e
201
+
202
+ class Chat(BaseChat):
203
+ def __init__(self, client: 'TextPollinations'):
204
+ self.completions = Completions(client)
205
+
206
+ class TextPollinations(OpenAICompatibleProvider):
207
+ """
208
+ OpenAI-compatible client for TextPollinations API.
209
+
210
+ Usage:
211
+ client = TextPollinations()
212
+ response = client.chat.completions.create(
213
+ model="openai-large",
214
+ messages=[{"role": "user", "content": "Hello!"}]
215
+ )
216
+ print(response.choices[0].message.content)
217
+ """
218
+
219
+ AVAILABLE_MODELS = [
220
+ "openai",
221
+ "openai-large",
222
+ "openai-reasoning",
223
+ "qwen-coder",
224
+ "llama",
225
+ "llamascout",
226
+ "mistral",
227
+ "unity",
228
+ "midijourney",
229
+ "rtist",
230
+ "searchgpt",
231
+ "evil",
232
+ "deepseek-reasoning",
233
+ "deepseek-reasoning-large",
234
+ "llamalight",
235
+ "phi",
236
+ "llama-vision",
237
+ "pixtral",
238
+ "gemini",
239
+ "hormoz",
240
+ "hypnosis-tracy",
241
+ "mistral-roblox",
242
+ "roblox-rp",
243
+ "deepseek",
244
+ "sur",
245
+ "llama-scaleway",
246
+ "openai-audio",
247
+ ]
248
+
249
+ def __init__(
250
+ self,
251
+ timeout: int = 30,
252
+ proxies: dict = {}
253
+ ):
254
+ """
255
+ Initialize the TextPollinations client.
256
+
257
+ Args:
258
+ timeout: Request timeout in seconds
259
+ proxies: Optional proxy configuration
260
+ """
261
+ self.timeout = timeout
262
+ self.api_endpoint = "https://text.pollinations.ai/openai"
263
+ self.proxies = proxies
264
+
265
+ # Initialize session
266
+ self.session = requests.Session()
267
+ if proxies:
268
+ self.session.proxies.update(proxies)
269
+
270
+ # Initialize LitAgent for user agent generation
271
+ agent = LitAgent()
272
+ self.user_agent = agent.random()
273
+
274
+ # Set headers
275
+ self.headers = {
276
+ 'Accept': '*/*',
277
+ 'Accept-Language': 'en-US,en;q=0.9',
278
+ 'User-Agent': self.user_agent,
279
+ 'Content-Type': 'application/json',
280
+ }
281
+
282
+ self.session.headers.update(self.headers)
283
+
284
+ # Initialize chat interface
285
+ self.chat = Chat(self)
@@ -0,0 +1,405 @@
1
+ import time
2
+ import uuid
3
+ import base64
4
+ import json
5
+ import random
6
+ import string
7
+ import re
8
+ import cloudscraper
9
+ from datetime import datetime
10
+ from typing import List, Dict, Optional, Union, Generator, Any
11
+
12
+ from webscout.litagent import LitAgent
13
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
14
+ from .utils import (
15
+ ChatCompletion,
16
+ ChatCompletionChunk,
17
+ Choice,
18
+ ChatCompletionMessage,
19
+ ChoiceDelta,
20
+ CompletionUsage,
21
+ format_prompt,
22
+ get_system_prompt
23
+ )
24
+
25
+ # ANSI escape codes for formatting
26
+ BOLD = "\033[1m"
27
+ RED = "\033[91m"
28
+ RESET = "\033[0m"
29
+
30
+ class Completions(BaseCompletions):
31
+ def __init__(self, client: 'Toolbaz'):
32
+ self._client = client
33
+
34
+ def create(
35
+ self,
36
+ *,
37
+ model: str,
38
+ messages: List[Dict[str, str]],
39
+ max_tokens: Optional[int] = None,
40
+ stream: bool = False,
41
+ temperature: Optional[float] = None,
42
+ top_p: Optional[float] = None,
43
+ **kwargs: Any
44
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
45
+ """
46
+ Creates a model response for the given chat conversation.
47
+ Mimics openai.chat.completions.create
48
+ """
49
+ # Format the messages using the format_prompt utility
50
+ formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
51
+
52
+ # Get authentication token
53
+ auth = self._client.get_auth()
54
+ if not auth:
55
+ raise IOError("Failed to authenticate with Toolbaz API")
56
+
57
+ # Prepare the request data
58
+ data = {
59
+ "text": formatted_prompt,
60
+ "capcha": auth["token"],
61
+ "model": model,
62
+ "session_id": auth["session_id"]
63
+ }
64
+
65
+ # Generate a unique request ID
66
+ request_id = f"chatcmpl-{uuid.uuid4().hex}"
67
+ created_time = int(time.time())
68
+
69
+ # Handle streaming response
70
+ if stream:
71
+ return self._handle_streaming_response(request_id, created_time, model, data)
72
+ else:
73
+ return self._handle_non_streaming_response(request_id, created_time, model, data)
74
+
75
+ def _handle_streaming_response(
76
+ self,
77
+ request_id: str,
78
+ created_time: int,
79
+ model: str,
80
+ data: Dict[str, Any]
81
+ ) -> Generator[ChatCompletionChunk, None, None]:
82
+ """Handle streaming response from Toolbaz API"""
83
+ try:
84
+ resp = self._client.session.post(
85
+ "https://data.toolbaz.com/writing.php",
86
+ data=data,
87
+ stream=True,
88
+ proxies=self._client.proxies,
89
+ timeout=self._client.timeout
90
+ )
91
+ resp.raise_for_status()
92
+
93
+ buffer = ""
94
+ tag_start = "[model:"
95
+ streaming_text = ""
96
+
97
+ for chunk in resp.iter_content(chunk_size=1):
98
+ if chunk:
99
+ text = chunk.decode(errors="ignore")
100
+ buffer += text
101
+
102
+ # Remove all complete [model: ...] tags in buffer
103
+ while True:
104
+ match = re.search(r"\[model:.*?\]", buffer)
105
+ if not match:
106
+ break
107
+ buffer = buffer[:match.start()] + buffer[match.end():]
108
+
109
+ # Only yield up to the last possible start of a tag
110
+ last_tag = buffer.rfind(tag_start)
111
+ if last_tag == -1 or last_tag + len(tag_start) > len(buffer):
112
+ if buffer:
113
+ streaming_text += buffer
114
+
115
+ # Create the delta object
116
+ delta = ChoiceDelta(
117
+ content=buffer,
118
+ role="assistant"
119
+ )
120
+
121
+ # Create the choice object
122
+ choice = Choice(
123
+ index=0,
124
+ delta=delta,
125
+ finish_reason=None
126
+ )
127
+
128
+ # Create the chunk object
129
+ chunk = ChatCompletionChunk(
130
+ id=request_id,
131
+ choices=[choice],
132
+ created=created_time,
133
+ model=model
134
+ )
135
+
136
+ yield chunk
137
+ buffer = ""
138
+ else:
139
+ if buffer[:last_tag]:
140
+ streaming_text += buffer[:last_tag]
141
+
142
+ # Create the delta object
143
+ delta = ChoiceDelta(
144
+ content=buffer[:last_tag],
145
+ role="assistant"
146
+ )
147
+
148
+ # Create the choice object
149
+ choice = Choice(
150
+ index=0,
151
+ delta=delta,
152
+ finish_reason=None
153
+ )
154
+
155
+ # Create the chunk object
156
+ chunk = ChatCompletionChunk(
157
+ id=request_id,
158
+ choices=[choice],
159
+ created=created_time,
160
+ model=model
161
+ )
162
+
163
+ yield chunk
164
+ buffer = buffer[last_tag:]
165
+
166
+ # Remove any remaining [model: ...] tag in the buffer
167
+ buffer = re.sub(r"\[model:.*?\]", "", buffer)
168
+ if buffer:
169
+ # Create the delta object
170
+ delta = ChoiceDelta(
171
+ content=buffer,
172
+ role="assistant"
173
+ )
174
+
175
+ # Create the choice object
176
+ choice = Choice(
177
+ index=0,
178
+ delta=delta,
179
+ finish_reason="stop"
180
+ )
181
+
182
+ # Create the chunk object
183
+ chunk = ChatCompletionChunk(
184
+ id=request_id,
185
+ choices=[choice],
186
+ created=created_time,
187
+ model=model
188
+ )
189
+
190
+ yield chunk
191
+
192
+ # Final chunk with finish_reason
193
+ delta = ChoiceDelta(
194
+ content=None,
195
+ role=None
196
+ )
197
+
198
+ choice = Choice(
199
+ index=0,
200
+ delta=delta,
201
+ finish_reason="stop"
202
+ )
203
+
204
+ chunk = ChatCompletionChunk(
205
+ id=request_id,
206
+ choices=[choice],
207
+ created=created_time,
208
+ model=model
209
+ )
210
+
211
+ yield chunk
212
+
213
+ except Exception as e:
214
+ print(f"{RED}Error during Toolbaz streaming request: {e}{RESET}")
215
+ raise IOError(f"Toolbaz streaming request failed: {e}") from e
216
+
217
+ def _handle_non_streaming_response(
218
+ self,
219
+ request_id: str,
220
+ created_time: int,
221
+ model: str,
222
+ data: Dict[str, Any]
223
+ ) -> ChatCompletion:
224
+ """Handle non-streaming response from Toolbaz API"""
225
+ try:
226
+ resp = self._client.session.post(
227
+ "https://data.toolbaz.com/writing.php",
228
+ data=data,
229
+ proxies=self._client.proxies,
230
+ timeout=self._client.timeout
231
+ )
232
+ resp.raise_for_status()
233
+
234
+ text = resp.text
235
+ # Remove [model: ...] tags
236
+ text = re.sub(r"\[model:.*?\]", "", text)
237
+
238
+ # Create the message object
239
+ message = ChatCompletionMessage(
240
+ role="assistant",
241
+ content=text
242
+ )
243
+
244
+ # Create the choice object
245
+ choice = Choice(
246
+ index=0,
247
+ message=message,
248
+ finish_reason="stop"
249
+ )
250
+
251
+ # Usage data is not provided by this API in a standard way, set to 0
252
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
253
+
254
+ # Create the completion object
255
+ completion = ChatCompletion(
256
+ id=request_id,
257
+ choices=[choice],
258
+ created=created_time,
259
+ model=model,
260
+ usage=usage
261
+ )
262
+
263
+ return completion
264
+
265
+ except Exception as e:
266
+ print(f"{RED}Error during Toolbaz non-stream request: {e}{RESET}")
267
+ raise IOError(f"Toolbaz request failed: {e}") from e
268
+
269
+ class Chat(BaseChat):
270
+ def __init__(self, client: 'Toolbaz'):
271
+ self.completions = Completions(client)
272
+
273
+ class Toolbaz(OpenAICompatibleProvider):
274
+ """
275
+ OpenAI-compatible client for Toolbaz API.
276
+
277
+ Usage:
278
+ client = Toolbaz()
279
+ response = client.chat.completions.create(
280
+ model="gemini-2.0-flash",
281
+ messages=[{"role": "user", "content": "Hello!"}]
282
+ )
283
+ print(response.choices[0].message.content)
284
+ """
285
+
286
+ AVAILABLE_MODELS = [
287
+ "gemini-2.0-flash-thinking",
288
+ "gemini-2.0-flash",
289
+ "gemini-1.5-flash",
290
+ "gpt-4o-latest",
291
+ "gpt-4o-mini",
292
+ "gpt-4o",
293
+ "deepseek-r1",
294
+ "Llama-3.3-70B",
295
+ "Llama-3.1-405B",
296
+ "Llama-3.1-70B",
297
+ "Qwen2.5-72B",
298
+ "Qwen2-72B",
299
+ "grok-2-1212",
300
+ "grok-beta",
301
+ "toolbaz_v3.5_pro",
302
+ "toolbaz_v3",
303
+ "mixtral_8x22b",
304
+ "L3-70B-Euryale-v2.1",
305
+ "midnight-rose",
306
+ "unity",
307
+ "unfiltered_x"
308
+ ]
309
+
310
+ def __init__(
311
+ self,
312
+ api_key: Optional[str] = None, # Not used but kept for compatibility
313
+ timeout: int = 30,
314
+ proxies: dict = {},
315
+ browser: str = "chrome"
316
+ ):
317
+ """
318
+ Initialize the Toolbaz client.
319
+
320
+ Args:
321
+ api_key: Not used but kept for compatibility with OpenAI interface
322
+ timeout: Request timeout in seconds
323
+ proxies: Proxy configuration for requests
324
+ browser: Browser name for LitAgent to generate User-Agent
325
+ """
326
+ self.timeout = timeout
327
+ self.proxies = proxies
328
+
329
+ # Initialize session with cloudscraper
330
+ self.session = cloudscraper.create_scraper()
331
+
332
+ # Set up headers
333
+ self.session.headers.update({
334
+ "user-agent": LitAgent().generate_fingerprint(browser=browser)["user_agent"],
335
+ "accept": "*/*",
336
+ "accept-language": "en-US",
337
+ "cache-control": "no-cache",
338
+ "connection": "keep-alive",
339
+ "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
340
+ "origin": "https://toolbaz.com",
341
+ "pragma": "no-cache",
342
+ "referer": "https://toolbaz.com/",
343
+ "sec-fetch-mode": "cors"
344
+ })
345
+
346
+ # Initialize chat property
347
+ self.chat = Chat(self)
348
+
349
+ def random_string(self, length):
350
+ """Generate a random string of specified length"""
351
+ return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
352
+
353
+ def generate_token(self):
354
+ """Generate authentication token for Toolbaz API"""
355
+ payload = {
356
+ "bR6wF": {
357
+ "nV5kP": self.session.headers.get("user-agent"),
358
+ "lQ9jX": "en-US",
359
+ "sD2zR": "431x958",
360
+ "tY4hL": time.tzname[0] if time.tzname else "UTC",
361
+ "pL8mC": "Linux armv81",
362
+ "cQ3vD": datetime.now().year,
363
+ "hK7jN": datetime.now().hour
364
+ },
365
+ "uT4bX": {
366
+ "mM9wZ": [],
367
+ "kP8jY": []
368
+ },
369
+ "tuTcS": int(time.time()),
370
+ "tDfxy": None,
371
+ "RtyJt": str(uuid.uuid4())
372
+ }
373
+ return "d8TW0v" + base64.b64encode(json.dumps(payload).encode()).decode()
374
+
375
+ def get_auth(self):
376
+ """Get authentication credentials for Toolbaz API"""
377
+ try:
378
+ session_id = self.random_string(36)
379
+ token = self.generate_token()
380
+ data = {
381
+ "session_id": session_id,
382
+ "token": token
383
+ }
384
+ resp = self.session.post("https://data.toolbaz.com/token.php", data=data)
385
+ resp.raise_for_status()
386
+ result = resp.json()
387
+ if result.get("success"):
388
+ return {"token": result["token"], "session_id": session_id}
389
+ return None
390
+ except Exception as e:
391
+ print(f"{RED}Error getting Toolbaz authentication: {e}{RESET}")
392
+ return None
393
+
394
+ # Example usage
395
+ if __name__ == "__main__":
396
+ # Test the provider
397
+ client = Toolbaz()
398
+ response = client.chat.completions.create(
399
+ model="gemini-2.0-flash",
400
+ messages=[
401
+ {"role": "system", "content": "You are a helpful assistant."},
402
+ {"role": "user", "content": "Hello! How are you today?"}
403
+ ]
404
+ )
405
+ print(response.choices[0].message.content)