webscout 8.3.5__py3-none-any.whl → 8.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/Bard.py +12 -6
  2. webscout/DWEBS.py +66 -57
  3. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  4. webscout/Provider/AISEARCH/__init__.py +1 -1
  5. webscout/Provider/Deepinfra.py +6 -0
  6. webscout/Provider/Flowith.py +6 -1
  7. webscout/Provider/GithubChat.py +1 -0
  8. webscout/Provider/GptOss.py +207 -0
  9. webscout/Provider/Kimi.py +445 -0
  10. webscout/Provider/Netwrck.py +3 -6
  11. webscout/Provider/OPENAI/README.md +2 -1
  12. webscout/Provider/OPENAI/TogetherAI.py +50 -55
  13. webscout/Provider/OPENAI/__init__.py +4 -2
  14. webscout/Provider/OPENAI/copilot.py +20 -4
  15. webscout/Provider/OPENAI/deepinfra.py +6 -0
  16. webscout/Provider/OPENAI/e2b.py +60 -8
  17. webscout/Provider/OPENAI/flowith.py +4 -3
  18. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  19. webscout/Provider/OPENAI/gptoss.py +288 -0
  20. webscout/Provider/OPENAI/kimi.py +469 -0
  21. webscout/Provider/OPENAI/netwrck.py +8 -12
  22. webscout/Provider/OPENAI/refact.py +274 -0
  23. webscout/Provider/OPENAI/textpollinations.py +3 -6
  24. webscout/Provider/OPENAI/toolbaz.py +1 -0
  25. webscout/Provider/TTI/bing.py +14 -2
  26. webscout/Provider/TTI/together.py +10 -9
  27. webscout/Provider/TTS/README.md +0 -1
  28. webscout/Provider/TTS/__init__.py +0 -1
  29. webscout/Provider/TTS/base.py +479 -159
  30. webscout/Provider/TTS/deepgram.py +409 -156
  31. webscout/Provider/TTS/elevenlabs.py +425 -111
  32. webscout/Provider/TTS/freetts.py +317 -140
  33. webscout/Provider/TTS/gesserit.py +192 -128
  34. webscout/Provider/TTS/murfai.py +248 -113
  35. webscout/Provider/TTS/openai_fm.py +347 -129
  36. webscout/Provider/TTS/speechma.py +620 -586
  37. webscout/Provider/TextPollinationsAI.py +3 -6
  38. webscout/Provider/TogetherAI.py +50 -55
  39. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  40. webscout/Provider/__init__.py +2 -90
  41. webscout/Provider/cerebras.py +83 -33
  42. webscout/Provider/copilot.py +42 -23
  43. webscout/Provider/toolbaz.py +1 -0
  44. webscout/conversation.py +22 -20
  45. webscout/sanitize.py +14 -10
  46. webscout/scout/README.md +20 -23
  47. webscout/scout/core/crawler.py +125 -38
  48. webscout/scout/core/scout.py +26 -5
  49. webscout/version.py +1 -1
  50. webscout/webscout_search.py +13 -6
  51. webscout/webscout_search_async.py +10 -8
  52. webscout/yep_search.py +13 -5
  53. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/METADATA +2 -1
  54. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/RECORD +59 -56
  55. webscout/Provider/Glider.py +0 -225
  56. webscout/Provider/OPENAI/c4ai.py +0 -394
  57. webscout/Provider/OPENAI/glider.py +0 -330
  58. webscout/Provider/TTS/sthir.py +0 -94
  59. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  60. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
  61. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
@@ -204,72 +204,67 @@ class TogetherAI(OpenAICompatibleProvider):
204
204
  OpenAI-compatible client for TogetherAI API.
205
205
  """
206
206
  AVAILABLE_MODELS = [
207
- "mistralai/Mistral-7B-Instruct-v0.3",
208
- "togethercomputer/MoA-1",
209
- "Qwen/Qwen2.5-7B-Instruct-Turbo",
210
- "meta-llama/Llama-3-8b-chat-hf",
211
- "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
212
- "togethercomputer/MoA-1-Turbo",
213
- "eddiehou/meta-llama/Llama-3.1-405B",
214
- "mistralai/Mistral-7B-Instruct-v0.2",
215
- "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
216
- "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
217
- "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
218
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
219
- "Qwen/Qwen2.5-VL-72B-Instruct",
220
- "arcee-ai/AFM-4.5B-Preview",
221
- "lgai/exaone-3-5-32b-instruct",
222
- "meta-llama/Llama-3-70b-chat-hf",
223
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
224
- "google/gemma-2-27b-it",
207
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
208
+ "Qwen/QwQ-32B",
225
209
  "Qwen/Qwen2-72B-Instruct",
226
- "mistralai/Mistral-Small-24B-Instruct-2501",
227
210
  "Qwen/Qwen2-VL-72B-Instruct",
228
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
229
- "meta-llama/Llama-Vision-Free",
230
- "perplexity-ai/r1-1776",
231
- "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
232
- "arcee-ai/maestro-reasoning",
233
- "togethercomputer/Refuel-Llm-V2-Small",
211
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
212
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
234
213
  "Qwen/Qwen2.5-Coder-32B-Instruct",
214
+ "Qwen/Qwen2.5-VL-72B-Instruct",
215
+ "Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
216
+ "Qwen/Qwen3-235B-A22B-Thinking-2507",
217
+ "Qwen/Qwen3-235B-A22B-fp8-tput",
218
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
219
+ "Salesforce/Llama-Rank-V1",
220
+ "Virtue-AI/VirtueGuard-Text-Lite",
221
+ "arcee-ai/AFM-4.5B",
235
222
  "arcee-ai/coder-large",
236
- "Qwen/QwQ-32B",
223
+ "arcee-ai/maestro-reasoning",
224
+ "arcee-ai/virtuoso-large",
237
225
  "arcee_ai/arcee-spotlight",
226
+ "blackbox/meta-llama-3-1-8b",
227
+ "deepcogito/cogito-v2-preview-deepseek-671b",
228
+ "deepseek-ai/DeepSeek-R1",
238
229
  "deepseek-ai/DeepSeek-R1-0528-tput",
239
- "marin-community/marin-8b-instruct",
240
- "lgai/exaone-deep-32b",
241
- "google/gemma-3-27b-it",
242
230
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
243
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
244
- "mistralai/Mistral-7B-Instruct-v0.1",
245
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
246
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
247
231
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
248
- "scb10x/scb10x-typhoon-2-1-gemma3-12b",
249
- "togethercomputer/Refuel-Llm-V2",
250
- "Qwen/Qwen2.5-72B-Instruct-Turbo",
251
- "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
252
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
253
- "meta-llama/Llama-3.2-3B-Instruct-Turbo",
254
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
232
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
233
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
255
234
  "deepseek-ai/DeepSeek-V3",
256
- "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
257
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
258
- "Qwen/Qwen3-32B-FP8",
259
- "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
260
- "arcee-ai/virtuoso-large",
235
+ "google/gemma-2-27b-it",
261
236
  "google/gemma-3n-E4B-it",
262
- "moonshotai/Kimi-K2-Instruct",
237
+ "lgai/exaone-3-5-32b-instruct",
238
+ "lgai/exaone-deep-32b",
239
+ "marin-community/marin-8b-instruct",
240
+ "meta-llama/Llama-2-70b-hf",
241
+ "meta-llama/Llama-3-70b-chat-hf",
242
+ "meta-llama/Llama-3-8b-chat-hf",
243
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
244
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
245
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
246
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
247
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
248
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
249
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
250
+ "meta-llama/Llama-Vision-Free",
251
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
252
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
253
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
254
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
263
255
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
264
- "deepseek-ai/DeepSeek-R1",
265
- "Qwen/Qwen3-235B-A22B-fp8-tput",
266
- "Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
267
- "Rrrr/nim/nvidia/llama-3.3-nemotron-super-49b-v1-de6a6453",
268
- "Rrrr/mistralai/Devstral-Small-2505-306f5881",
269
- "Qwen/Qwen3-235B-A22B-Thinking-2507",
270
- "Rrrr/ChatGPT-5",
271
- "Rrrr/MeowGPT-3.5",
272
- "blackbox/meta-llama-3-1-8b"
256
+ "mistralai/Mistral-7B-Instruct-v0.1",
257
+ "mistralai/Mistral-7B-Instruct-v0.2",
258
+ "mistralai/Mistral-7B-Instruct-v0.3",
259
+ "mistralai/Mistral-Small-24B-Instruct-2501",
260
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
261
+ "moonshotai/Kimi-K2-Instruct",
262
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
263
+ "perplexity-ai/r1-1776",
264
+ "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
265
+ "scb10x/scb10x-typhoon-2-1-gemma3-12b",
266
+ "togethercomputer/Refuel-Llm-V2-Small",
267
+ "zai-org/GLM-4.5-Air-FP8"
273
268
  ]
274
269
 
275
270
  def __init__(self, browser: str = "chrome"):
@@ -1,6 +1,6 @@
1
1
  # This file marks the directory as a Python package.
2
2
  from .deepinfra import *
3
- from .glider import *
3
+ ## glider import removed
4
4
  from .chatgptclone import *
5
5
  from .x0gpt import *
6
6
  from .wisecat import *
@@ -26,7 +26,6 @@ from .ai4chat import * # Add AI4Chat
26
26
  from .mcpcore import *
27
27
  from .flowith import *
28
28
  from .chatsandbox import *
29
- from .c4ai import *
30
29
  from .flowith import *
31
30
  from .Cloudflare import *
32
31
  from .NEMOTRON import *
@@ -44,6 +43,9 @@ from .friendli import *
44
43
  from .monochat import *
45
44
  from .MiniMax import * # Add MiniMaxAI provider
46
45
  from .qodo import * # Add QodoAI provider
46
+ from .kimi import * # Add Kimi provider
47
+ from .gptoss import * # Add GPT-OSS provider
48
+ from .refact import * # Add Refact provider
47
49
  # Export auto-proxy functionality
48
50
  from .autoproxy import (
49
51
  get_auto_proxy,
@@ -87,12 +87,22 @@ class Completions(BaseCompletions):
87
87
  images.append({"type": "image", "url": r.json().get("url")})
88
88
 
89
89
  ws = s.ws_connect(self._client.websocket_url)
90
- mode = "reasoning" if "Think" in model else "chat"
90
+ # Map alias to real model name if needed
91
+ real_model = Copilot.MODEL_ALIASES.get(model, model)
92
+ if real_model not in Copilot.AVAILABLE_MODELS:
93
+ raise RuntimeError(f"Invalid model: {model}. Choose from: {Copilot.AVAILABLE_MODELS}")
94
+ if real_model == "Smart":
95
+ mode = "smart"
96
+ elif "Think" in real_model:
97
+ mode = "reasoning"
98
+ else:
99
+ mode = "chat"
91
100
  ws.send(json.dumps({
92
101
  "event": "send",
93
102
  "conversationId": conv_id,
94
103
  "content": images + [{"type": "text", "text": prompt_text}],
95
- "mode": mode
104
+ "mode": mode,
105
+ "model": real_model
96
106
  }).encode(), CurlWsFlag.TEXT)
97
107
 
98
108
  prompt_tokens = count_tokens(prompt_text)
@@ -281,8 +291,14 @@ class Copilot(OpenAICompatibleProvider):
281
291
  url = "https://copilot.microsoft.com"
282
292
  conversation_url = f"{url}/c/api/conversations"
283
293
  websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
284
-
285
- AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
294
+
295
+ AVAILABLE_MODELS = ["Copilot", "Think Deeper", "Smart"]
296
+ MODEL_ALIASES = {
297
+ "gpt-4o": "Copilot",
298
+ "o4-mini": "Think Deeper",
299
+ "gpt-5": "Smart",
300
+
301
+ }
286
302
 
287
303
  def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
288
304
  self.timeout = 900
@@ -261,6 +261,12 @@ class DeepInfra(OpenAICompatibleProvider):
261
261
  "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
262
262
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
263
263
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
264
+ "zai-org/GLM-4.5-Air",
265
+ "zai-org/GLM-4.5",
266
+ "zai-org/GLM-4.5V",
267
+ "openai/gpt-oss-120b",
268
+ "openai/gpt-oss-20b",
269
+ "allenai/olmOCR-7B-0725-FP8",
264
270
  ]
265
271
  def __init__(self, browser: str = "chrome", api_key: str = None):
266
272
  self.timeout = None
@@ -114,6 +114,35 @@ MODEL_PROMPT = {
114
114
  }
115
115
  }
116
116
  },
117
+ "claude-opus-4-1-20250805": {
118
+ "apiUrl": "https://fragments.e2b.dev/api/chat",
119
+ "id": "claude-opus-4-1-20250805",
120
+ "name": "Claude Opus 4.1",
121
+ "Knowledge": "2024-10",
122
+ "provider": "Anthropic",
123
+ "providerId": "anthropic",
124
+ "multiModal": True,
125
+ "templates": {
126
+ "system": {
127
+ "intro": "You are Claude Opus 4.1, Anthropic's most capable AI assistant for complex reasoning and analysis. You excel at sophisticated problem-solving, creative thinking, and providing nuanced insights across a wide range of domains. You can analyze images, code, and complex data to deliver comprehensive and thoughtful responses.",
128
+ "principles": ["honesty", "ethics", "diligence", "helpfulness", "accuracy", "thoughtfulness", "creativity"],
129
+ "latex": {
130
+ "inline": "\\(\\nabla \\cdot \\vec{E} = \\frac{\\rho}{\\epsilon_0}\\)",
131
+ "block": "\\begin{align}\n\\nabla \\cdot \\vec{E} &= \\frac{\\rho}{\\epsilon_0} \\\\\n\\nabla \\times \\vec{B} &= \\mu_0\\vec{J} + \\mu_0\\epsilon_0\\frac{\\partial\\vec{E}}{\\partial t} \\\\\nE &= mc^2 \\\\\n\\psi(x,t) &= Ae^{i(kx-\\omega t)}\n\\end{align}"
132
+ }
133
+ }
134
+ },
135
+ "requestConfig": {
136
+ "template": {
137
+ "txt": {
138
+ "name": "chat with users and start role-playing, Above of all: Follow the latest news from users",
139
+ "lib": [""],
140
+ "file": "pages/ChatWithUsers.txt",
141
+ "port": 3000
142
+ }
143
+ }
144
+ }
145
+ },
117
146
  "o1-mini": {
118
147
  "apiUrl": "https://fragments.e2b.dev/api/chat",
119
148
  "id": "o1-mini",
@@ -1013,6 +1042,10 @@ class Completions(BaseCompletions):
1013
1042
  """Enhanced request method with IP rotation, session rotation, and advanced rate limit bypass."""
1014
1043
  url = model_config["apiUrl"]
1015
1044
  target_origin = "https://fragments.e2b.dev"
1045
+
1046
+ # Use client proxies if none provided
1047
+ if proxies is None:
1048
+ proxies = getattr(self._client, "proxies", None)
1016
1049
 
1017
1050
  for attempt in range(retries):
1018
1051
  try:
@@ -1055,13 +1088,13 @@ class Completions(BaseCompletions):
1055
1088
 
1056
1089
  json_data = json.dumps(enhanced_request_body)
1057
1090
 
1058
- # Use curl_cffi session with enhanced fingerprinting
1091
+ # Use curl_cffi session with enhanced fingerprinting and proxy support
1059
1092
  response = self._client.session.post(
1060
1093
  url=url,
1061
1094
  headers=headers,
1062
1095
  data=json_data,
1063
1096
  timeout=timeout or self._client.timeout,
1064
- proxies=proxies or getattr(self._client, "proxies", None),
1097
+ proxies=proxies,
1065
1098
  impersonate=self._client.impersonation
1066
1099
  )
1067
1100
 
@@ -1225,17 +1258,21 @@ class E2B(OpenAICompatibleProvider):
1225
1258
  'deepseek-r1-instruct': 'deepseek-r1'
1226
1259
  }
1227
1260
 
1228
- def __init__(self, retries: int = 3):
1261
+ def __init__(self, retries: int = 3, proxies: Optional[Dict[str, str]] = None, **kwargs):
1229
1262
  """
1230
1263
  Initialize the E2B client with curl_cffi and browser fingerprinting.
1231
1264
 
1232
1265
  Args:
1233
1266
  retries: Number of retries for failed requests.
1267
+ proxies: Proxy configuration for requests.
1268
+ **kwargs: Additional arguments passed to parent class.
1234
1269
  """
1235
1270
  self.timeout = 60 # Default timeout in seconds
1236
- self.proxies = None # Default proxies
1237
1271
  self.retries = retries
1238
-
1272
+
1273
+ # Handle proxy configuration
1274
+ self.proxies = proxies or {}
1275
+
1239
1276
  # Use LitAgent for user-agent
1240
1277
  self.headers = LitAgent().generate_fingerprint()
1241
1278
 
@@ -1243,6 +1280,20 @@ class E2B(OpenAICompatibleProvider):
1243
1280
  self.impersonation = curl_requests.impersonate.DEFAULT_CHROME
1244
1281
  self.session = curl_requests.Session()
1245
1282
  self.session.headers.update(self.headers)
1283
+
1284
+ # Apply proxy configuration if provided
1285
+ if self.proxies:
1286
+ self.session.proxies.update(self.proxies)
1287
+
1288
+ # Initialize bypass session data
1289
+ self._session_rotation_data = {}
1290
+ self._last_rotation_time = 0
1291
+ self._rotation_interval = 300 # Rotate session every 5 minutes
1292
+ self._rate_limit_failures = 0
1293
+ self._max_rate_limit_failures = 3
1294
+
1295
+ # Initialize the chat interface
1296
+ self.chat = Chat(self)
1246
1297
 
1247
1298
  # Initialize bypass session data
1248
1299
  self._session_rotation_data = {}
@@ -1589,13 +1640,13 @@ if __name__ == "__main__":
1589
1640
  print("-" * 80)
1590
1641
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
1591
1642
  print("-" * 80)
1592
- print("\n--- Streaming Simulation Test (gpt-4.1-mini) ---")
1643
+ print("\n--- Streaming Simulation Test (claude-opus-4-1-20250805) ---")
1593
1644
  try:
1594
1645
  client_stream = E2B()
1595
1646
  stream = client_stream.chat.completions.create(
1596
- model="gpt-4.1-mini",
1647
+ model="claude-opus-4-1-20250805",
1597
1648
  messages=[
1598
- {"role": "user", "content": "Write a poem about AI."}
1649
+ {"role": "user", "content": "hi."}
1599
1650
  ],
1600
1651
  stream=True
1601
1652
  )
@@ -1607,6 +1658,7 @@ if __name__ == "__main__":
1607
1658
  print(content, end="", flush=True)
1608
1659
  full_stream_response += content
1609
1660
  print("\n--- End of Stream ---")
1661
+ print(client_stream.proxies)
1610
1662
  if not full_stream_response:
1611
1663
  print(f"{RED}Stream test failed: No content received.{RESET}")
1612
1664
  except Exception as e:
@@ -150,8 +150,9 @@ class Chat(BaseChat):
150
150
 
151
151
  class Flowith(OpenAICompatibleProvider):
152
152
  AVAILABLE_MODELS = [
153
- "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
154
- "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
153
+ "gpt-5-nano", "gpt-5-mini", "glm-4.5", "gpt-oss-120b", "gpt-oss-20b", "kimi-k2",
154
+ "gpt-4.1", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner",
155
+ "gemini-2.5-flash", "grok-3-mini"
155
156
  ]
156
157
 
157
158
  chat: Chat
@@ -170,7 +171,7 @@ if __name__ == "__main__":
170
171
  client = Flowith()
171
172
  messages = [{"role": "user", "content": "Hello, how are you?"}]
172
173
  response = client.chat.completions.create(
173
- model="gpt-4.1-mini",
174
+ model="gpt-5-nano",
174
175
  messages=messages,
175
176
  stream=True
176
177
  )
@@ -0,0 +1,48 @@
1
+ import random
2
+ import string
3
+
4
+ def generate_api_key_suffix(length: int = 4) -> str:
5
+ """Generate a random API key suffix like 'C1Z5'
6
+
7
+ Args:
8
+ length: Length of the suffix (default: 4)
9
+
10
+ Returns:
11
+ A random string with uppercase letters and digits
12
+ """
13
+ # Use uppercase letters and digits for the suffix
14
+ chars = string.ascii_uppercase + string.digits
15
+ return ''.join(random.choice(chars) for _ in range(length))
16
+
17
+ def generate_full_api_key(prefix: str = "EU1CW20nX5oau42xBSgm") -> str:
18
+ """Generate a full API key with the given prefix pattern
19
+
20
+ Args:
21
+ prefix: The base prefix to use (default uses the pattern from the example)
22
+
23
+ Returns:
24
+ A full API key string with a random suffix like 'C1Z5'
25
+ """
26
+ # Generate the suffix (last 4 characters like C1Z5)
27
+ suffix = generate_api_key_suffix(4)
28
+
29
+ # Combine prefix with the generated suffix
30
+ return prefix + suffix
31
+
32
+ if __name__ == "__main__":
33
+ # Example usage
34
+ print("Generate API key suffix (like C1Z5):")
35
+ for i in range(5):
36
+ suffix = generate_api_key_suffix()
37
+ print(f" {suffix}")
38
+
39
+ print("\nGenerate full API key with prefix:")
40
+ for i in range(5):
41
+ api_key = generate_full_api_key()
42
+ print(f" {api_key}")
43
+
44
+ print("\nGenerate with custom prefix:")
45
+ custom_prefix = "EU1CW20nX5oau42xBSgm"
46
+ for i in range(3):
47
+ api_key = generate_full_api_key(custom_prefix)
48
+ print(f" {api_key}")