webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -7,13 +7,15 @@ from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
- from webscout.litagent import LitAgent
11
10
 
12
11
  class TogetherAI(Provider):
13
12
  """
14
13
  A class to interact with the TogetherAI API.
15
14
  """
16
15
 
16
+ required_auth = True
17
+
18
+ # Default models list (will be updated dynamically)
17
19
  AVAILABLE_MODELS = [
18
20
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
19
21
  "Qwen/QwQ-32B",
@@ -78,6 +80,53 @@ class TogetherAI(Provider):
78
80
  "zai-org/GLM-4.5-Air-FP8"
79
81
  ]
80
82
 
83
+ @classmethod
84
+ def get_models(cls, api_key: str = None):
85
+ """Fetch available models from TogetherAI API.
86
+
87
+ Args:
88
+ api_key (str, optional): TogetherAI API key. If not provided, returns default models.
89
+
90
+ Returns:
91
+ list: List of available model IDs
92
+ """
93
+ if not api_key:
94
+ return cls.AVAILABLE_MODELS
95
+
96
+ try:
97
+ # Use a temporary curl_cffi session for this class method
98
+ temp_session = Session()
99
+ headers = {
100
+ "Content-Type": "application/json",
101
+ "Authorization": f"Bearer {api_key}",
102
+ }
103
+
104
+ response = temp_session.get(
105
+ "https://api.together.xyz/v1/models",
106
+ headers=headers,
107
+ impersonate="chrome110"
108
+ )
109
+
110
+ if response.status_code != 200:
111
+ return cls.AVAILABLE_MODELS
112
+
113
+ data = response.json()
114
+ if "data" in data and isinstance(data["data"], list):
115
+ return [model["id"] for model in data["data"]]
116
+ return cls.AVAILABLE_MODELS
117
+
118
+ except (CurlError, Exception):
119
+ # Fallback to default models list if fetching fails
120
+ return cls.AVAILABLE_MODELS
121
+
122
+ def update_available_models(self, api_key: str):
123
+ """Update available models by fetching from TogetherAI API.
124
+
125
+ Args:
126
+ api_key (str): TogetherAI API key for fetching models.
127
+ """
128
+ self.AVAILABLE_MODELS = self.get_models(api_key)
129
+
81
130
  @staticmethod
82
131
  def _togetherai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
83
132
  """Extracts content from TogetherAI stream JSON objects."""
@@ -87,6 +136,7 @@ class TogetherAI(Provider):
87
136
 
88
137
  def __init__(
89
138
  self,
139
+ api_key: str,
90
140
  is_conversation: bool = True,
91
141
  max_tokens: int = 2049,
92
142
  timeout: int = 30,
@@ -98,45 +148,48 @@ class TogetherAI(Provider):
98
148
  act: str = None,
99
149
  model: str = "meta-llama/Llama-3.1-8B-Instruct-Turbo",
100
150
  system_prompt: str = "You are a helpful assistant.",
101
- browser: str = "chrome"
102
151
  ):
103
- """Initializes the TogetherAI API client."""
152
+ """Initializes the TogetherAI API client.
153
+
154
+ Args:
155
+ api_key (str): TogetherAI API key.
156
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
157
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 2049.
158
+ timeout (int, optional): Http request timeout. Defaults to 30.
159
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
160
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
161
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
162
+ proxies (dict, optional): Http request proxies. Defaults to {}.
163
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
164
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
165
+ model (str, optional): LLM model name. Defaults to "meta-llama/Llama-3.1-8B-Instruct-Turbo".
166
+ system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
167
+ """
168
+ # Update available models from API
169
+ self.update_available_models(api_key)
170
+
171
+ # Validate model after updating available models
104
172
  if model not in self.AVAILABLE_MODELS:
105
173
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
106
174
 
107
175
  self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
108
- self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
109
-
110
- # Initialize LitAgent
111
- self.agent = LitAgent()
112
- self.fingerprint = self.agent.generate_fingerprint(browser)
113
-
114
- # Use the fingerprint for headers
115
- self.headers = {
116
- "Accept": self.fingerprint["accept"],
117
- "Accept-Language": self.fingerprint["accept_language"],
118
- "Content-Type": "application/json",
119
- "Cache-Control": "no-cache",
120
- "Origin": "https://www.codegeneration.ai",
121
- "Pragma": "no-cache",
122
- "Referer": "https://www.codegeneration.ai/",
123
- "Sec-Fetch-Dest": "empty",
124
- "Sec-Fetch-Mode": "cors",
125
- "Sec-Fetch-Site": "same-site",
126
- "User-Agent": self.fingerprint["user_agent"],
127
- }
128
-
129
- # Initialize curl_cffi Session
130
176
  self.session = Session()
131
- self.session.headers.update(self.headers)
132
- self.session.proxies = proxies
133
- self.system_prompt = system_prompt
134
177
  self.is_conversation = is_conversation
135
178
  self.max_tokens_to_sample = max_tokens
179
+ self.api_key = api_key
180
+ self.model = model
136
181
  self.timeout = timeout
137
182
  self.last_response = {}
138
- self.model = model
139
- self._api_key_cache = None
183
+ self.system_prompt = system_prompt
184
+
185
+ self.headers = {
186
+ "Content-Type": "application/json",
187
+ "Authorization": f"Bearer {self.api_key}",
188
+ }
189
+
190
+ # Update curl_cffi session headers
191
+ self.session.headers.update(self.headers)
192
+ self.session.proxies = proxies
140
193
 
141
194
  self.__available_optimizers = (
142
195
  method
@@ -156,45 +209,7 @@ class TogetherAI(Provider):
156
209
  )
157
210
  self.conversation.history_offset = history_offset
158
211
 
159
- def refresh_identity(self, browser: str = None):
160
- """
161
- Refreshes the browser identity fingerprint.
162
-
163
- Args:
164
- browser: Specific browser to use for the new fingerprint
165
- """
166
- browser = browser or self.fingerprint.get("browser_type", "chrome")
167
- self.fingerprint = self.agent.generate_fingerprint(browser)
168
-
169
- # Update headers with new fingerprint
170
- self.headers.update({
171
- "Accept": self.fingerprint["accept"],
172
- "Accept-Language": self.fingerprint["accept_language"],
173
- "User-Agent": self.fingerprint["user_agent"],
174
- })
175
-
176
- # Update session headers
177
- self.session.headers.update(self.headers)
178
-
179
- return self.fingerprint
180
212
 
181
- def get_activation_key(self) -> str:
182
- """Get API key from activation endpoint"""
183
- if self._api_key_cache:
184
- return self._api_key_cache
185
-
186
- try:
187
- response = self.session.get(
188
- self.activation_endpoint,
189
- headers={"Accept": "application/json"},
190
- timeout=30
191
- )
192
- response.raise_for_status()
193
- activation_data = response.json()
194
- self._api_key_cache = activation_data["openAIParams"]["apiKey"]
195
- return self._api_key_cache
196
- except Exception as e:
197
- raise exceptions.FailedToGenerateResponseError(f"Failed to get activation key: {e}")
198
213
 
199
214
  def ask(
200
215
  self,
@@ -215,10 +230,7 @@ class TogetherAI(Provider):
215
230
  )
216
231
  else:
217
232
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
218
- if not self.headers.get("Authorization"):
219
- api_key = self.get_activation_key()
220
- self.headers["Authorization"] = f"Bearer {api_key}"
221
- self.session.headers.update(self.headers)
233
+
222
234
  payload = {
223
235
  "model": self.model,
224
236
  "messages": [
@@ -339,20 +351,7 @@ if __name__ == "__main__":
339
351
 
340
352
  for model in TogetherAI.AVAILABLE_MODELS:
341
353
  try:
342
- test_ai = TogetherAI(model=model, timeout=60)
343
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
344
- response_text = ""
345
- for chunk in response:
346
- response_text += chunk
347
-
348
- if response_text and len(response_text.strip()) > 0:
349
- status = "✓"
350
- # Clean and truncate response
351
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
352
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
353
- else:
354
- status = "✗"
355
- display_text = "Empty or invalid response"
356
- print(f"\r{model:<50} {status:<10} {display_text}")
354
+ # Skip testing if no API key is provided
355
+ print(f"\r{model:<50} {'':<10} Requires API key for testing")
357
356
  except Exception as e:
358
357
  print(f"\r{model:<50} {'✗':<10} {str(e)}")