webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,308 @@
1
+ import json
2
+ import os
3
+ from typing import Any, Dict, Optional, Generator, Union, List
4
+
5
+ from curl_cffi.requests import Session
6
+ from curl_cffi import CurlError
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+ from webscout.sanitize import sanitize_stream
15
+
16
+ class K2Think(Provider):
17
+ """
18
+ A class to interact with the K2Think AI API.
19
+ """
20
+ required_auth = False
21
+ AVAILABLE_MODELS = [
22
+ "MBZUAI-IFM/K2-Think",
23
+
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 600,
30
+ temperature: float = 1,
31
+ presence_penalty: int = 0,
32
+ frequency_penalty: int = 0,
33
+ top_p: float = 1,
34
+ model: str = "MBZUAI-IFM/K2-Think",
35
+ timeout: int = 30,
36
+ intro: str = None,
37
+ filepath: str = None,
38
+ update_file: bool = True,
39
+ proxies: dict = {},
40
+ history_offset: int = 10250,
41
+ act: str = None,
42
+ base_url: str = "https://www.k2think.ai/api/guest/chat/completions",
43
+ system_prompt: str = "You are a helpful assistant.",
44
+ browser: str = "chrome"
45
+ ):
46
+ """Initializes the K2Think AI client."""
47
+ self.url = base_url
48
+
49
+ # Initialize LitAgent
50
+ self.agent = LitAgent()
51
+ self.fingerprint = self.agent.generate_fingerprint(browser)
52
+
53
+ # Use the fingerprint for headers
54
+ self.headers = {
55
+ "Accept": "*/*",
56
+ "Accept-Encoding": "gzip, deflate, br, zstd",
57
+ "Accept-Language": self.fingerprint["accept_language"],
58
+ "Content-Type": "application/json",
59
+ "User-Agent": self.fingerprint.get("user_agent", ""),
60
+ "Origin": "https://www.k2think.ai",
61
+ "Referer": "https://www.k2think.ai/guest",
62
+ "Sec-Fetch-Dest": "empty",
63
+ "Sec-Fetch-Mode": "cors",
64
+ "Sec-Fetch-Site": "same-origin",
65
+ "Sec-Ch-Ua": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
66
+ "Sec-Ch-Ua-Mobile": "?0",
67
+ "Sec-Ch-Ua-Platform": '"Windows"',
68
+ "Priority": "u=1, i"
69
+ }
70
+
71
+ # Initialize curl_cffi Session
72
+ self.session = Session()
73
+ # Update curl_cffi session headers and proxies
74
+ self.session.headers.update(self.headers)
75
+ self.session.proxies = proxies # Assign proxies directly
76
+
77
+ self.system_prompt = system_prompt
78
+ self.is_conversation = is_conversation
79
+ self.max_tokens_to_sample = max_tokens
80
+ self.timeout = timeout
81
+ self.last_response = {}
82
+ self.model = model
83
+ self.temperature = temperature
84
+ self.presence_penalty = presence_penalty
85
+ self.frequency_penalty = frequency_penalty
86
+ self.top_p = top_p
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+
94
+ Conversation.intro = (
95
+ AwesomePrompts().get_act(
96
+ act, raise_not_found=True, default=None, case_insensitive=True
97
+ )
98
+ if act
99
+ else intro or Conversation.intro
100
+ )
101
+
102
+ self.conversation = Conversation(
103
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
104
+ )
105
+ self.conversation.history_offset = history_offset
106
+
107
+ def refresh_identity(self, browser: str = None):
108
+ """
109
+ Refreshes the browser identity fingerprint.
110
+
111
+ Args:
112
+ browser: Specific browser to use for the new fingerprint
113
+ """
114
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
115
+ self.fingerprint = self.agent.generate_fingerprint(browser)
116
+
117
+ # Update headers with new fingerprint (only relevant ones)
118
+ self.headers.update({
119
+ "Accept-Language": self.fingerprint["accept_language"],
120
+ "User-Agent": self.fingerprint.get("user_agent", ""),
121
+ })
122
+
123
+ # Update session headers
124
+ self.session.headers.update(self.headers)
125
+
126
+ return self.fingerprint
127
+
128
+ def ask(
129
+ self,
130
+ prompt: str,
131
+ stream: bool = False,
132
+ raw: bool = False,
133
+ optimizer: str = None,
134
+ conversationally: bool = False,
135
+ ) -> Union[Dict[str, Any], Generator]:
136
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
137
+ if optimizer:
138
+ if optimizer in self.__available_optimizers:
139
+ conversation_prompt = getattr(Optimizers, optimizer)(
140
+ conversation_prompt if conversationally else prompt
141
+ )
142
+ else:
143
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {self.__available_optimizers}")
144
+
145
+ # Payload construction
146
+ payload = {
147
+ "stream": stream,
148
+ "model": self.model,
149
+ "messages": [
150
+ {"role": "system", "content": self.system_prompt},
151
+ {"role": "user", "content": conversation_prompt}
152
+ ],
153
+ "params": {}
154
+ }
155
+
156
+ def for_stream():
157
+ try:
158
+ # Use curl_cffi session post with impersonate
159
+ response = self.session.post(
160
+ self.url,
161
+ data=json.dumps(payload),
162
+ stream=True,
163
+ timeout=self.timeout,
164
+ impersonate="chrome110"
165
+ )
166
+ response.raise_for_status()
167
+
168
+ # Extract content using the specified patterns - prioritize answer only
169
+ extract_regexes = [
170
+ r'<answer>([\s\S]*?)<\/answer>', # Extract answer content only
171
+ ]
172
+
173
+ skip_regexes = [
174
+ r'^\s*$', # Skip empty lines
175
+ r'data:\s*\[DONE\]', # Skip done markers
176
+ r'data:\s*$', # Skip empty data lines
177
+ r'^\s*\{\s*\}\s*$', # Skip empty JSON objects
178
+ r'<details type="reasoning"[^>]*>.*?<\/details>', # Skip reasoning sections entirely
179
+ ]
180
+
181
+ streaming_text = ""
182
+
183
+ # Use sanitize_stream to process the response
184
+ stream_chunks = sanitize_stream(
185
+ response.iter_content(chunk_size=None),
186
+ intro_value="data:",
187
+ to_json=False, # Don't parse as JSON, use regex extraction
188
+ skip_regexes=skip_regexes,
189
+ extract_regexes=extract_regexes,
190
+ encoding='utf-8',
191
+ yield_raw_on_error=False
192
+ )
193
+
194
+ for content_chunk in stream_chunks:
195
+ if content_chunk and isinstance(content_chunk, str):
196
+ content_cleaned = content_chunk.strip()
197
+ if content_cleaned:
198
+ streaming_text += content_cleaned
199
+ yield {"text": content_cleaned}
200
+
201
+ except CurlError as e:
202
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
203
+ except Exception as e:
204
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
205
+ finally:
206
+ # Update history after stream finishes or fails
207
+ if streaming_text:
208
+ self.last_response = {"text": streaming_text}
209
+ self.conversation.update_chat_history(prompt, streaming_text)
210
+
211
+ def for_non_stream():
212
+ try:
213
+ # For non-streaming, we still need to handle the stream format
214
+ response = self.session.post(
215
+ self.url,
216
+ data=json.dumps(payload),
217
+ stream=True,
218
+ timeout=self.timeout,
219
+ impersonate="chrome110"
220
+ )
221
+ response.raise_for_status()
222
+
223
+ # Extract content using the specified patterns
224
+ extract_regexes = [
225
+ r'<answer>([\s\S]*?)<\/answer>', # Extract answer content
226
+ r'<details type="reasoning"[^>]*>.*?<summary>.*?<\/summary>([\s\S]*?)<\/details>', # Extract reasoning content
227
+ ]
228
+
229
+ skip_regexes = [
230
+ r'^\s*$', # Skip empty lines
231
+ r'data:\s*\[DONE\]', # Skip done markers
232
+ r'data:\s*$', # Skip empty data lines
233
+ r'^\s*\{\s*\}\s*$', # Skip empty JSON objects
234
+ ]
235
+
236
+ streaming_text = ""
237
+
238
+ # Use sanitize_stream to process the response
239
+ stream_chunks = sanitize_stream(
240
+ response.iter_content(chunk_size=None),
241
+ intro_value="data:",
242
+ to_json=False, # Don't parse as JSON, use regex extraction
243
+ skip_regexes=skip_regexes,
244
+ extract_regexes=extract_regexes,
245
+ encoding='utf-8',
246
+ yield_raw_on_error=False
247
+ )
248
+
249
+ for content_chunk in stream_chunks:
250
+ if content_chunk and isinstance(content_chunk, str):
251
+ content_cleaned = content_chunk.strip()
252
+ if content_cleaned:
253
+ # Decode JSON escape sequences
254
+ content_decoded = content_cleaned.encode().decode('unicode_escape')
255
+ streaming_text += content_decoded
256
+
257
+ self.last_response = {"text": streaming_text}
258
+ self.conversation.update_chat_history(prompt, streaming_text)
259
+ return self.last_response if not raw else streaming_text
260
+
261
+ except CurlError as e:
262
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
263
+ except Exception as e:
264
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
265
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
266
+
267
+ return for_stream() if stream else for_non_stream()
268
+
269
+ def chat(
270
+ self,
271
+ prompt: str,
272
+ stream: bool = False,
273
+ optimizer: str = None,
274
+ conversationally: bool = False,
275
+ ) -> Union[str, Generator[str, None, None]]:
276
+ def for_stream_chat():
277
+ gen = self.ask(
278
+ prompt, stream=True, raw=False,
279
+ optimizer=optimizer, conversationally=conversationally
280
+ )
281
+ for response_dict in gen:
282
+ yield self.get_message(response_dict)
283
+
284
+ def for_non_stream_chat():
285
+ response_data = self.ask(
286
+ prompt, stream=False, raw=False,
287
+ optimizer=optimizer, conversationally=conversationally
288
+ )
289
+ return self.get_message(response_data)
290
+
291
+ return for_stream_chat() if stream else for_non_stream_chat()
292
+
293
+ def get_message(self, response: dict) -> str:
294
+ assert isinstance(response, dict), "Response should be of dict data-type only"
295
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
296
+
297
+ if __name__ == "__main__":
298
+ # Simple test
299
+ try:
300
+ ai = K2Think(model="MBZUAI-IFM/K2-Think", timeout=30)
301
+ response = ai.chat("What is artificial intelligence?", stream=True)
302
+ for chunk in response:
303
+ print(chunk, end="", flush=True)
304
+ print()
305
+ except Exception as e:
306
+ print(f"Error: {type(e).__name__}: {e}")
307
+ import traceback
308
+ traceback.print_exc()
@@ -1,14 +1,15 @@
1
1
  import requests
2
2
  import json
3
- from ..AIutel import Optimizers
4
- from ..AIutel import Conversation
5
- from ..AIutel import AwesomePrompts, sanitize_stream
6
- from ..AIbase import Provider, AsyncProvider
3
+ from webscout.AIutel import Optimizers
4
+ from webscout.AIutel import Conversation
5
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
6
+ from webscout.AIbase import Provider, AsyncProvider
7
7
  from webscout import exceptions
8
8
  from typing import Union, Any, AsyncGenerator, Dict
9
9
  import httpx
10
10
  #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
11
11
  class KOBOLDAI(Provider):
12
+ required_auth = False
12
13
  def __init__(
13
14
  self,
14
15
  is_conversation: bool = True,
@@ -199,186 +200,7 @@ class KOBOLDAI(Provider):
199
200
  """
200
201
  assert isinstance(response, dict), "Response should be of dict data-type only"
201
202
  return response.get("token")
202
- class AsyncKOBOLDAI(AsyncProvider):
203
- def __init__(
204
- self,
205
- is_conversation: bool = True,
206
- max_tokens: int = 600,
207
- temperature: float = 1,
208
- top_p: float = 1,
209
- timeout: int = 30,
210
- intro: str = None,
211
- filepath: str = None,
212
- update_file: bool = True,
213
- proxies: dict = {},
214
- history_offset: int = 10250,
215
- act: str = None,
216
- ):
217
- """Instantiate TGPT
218
-
219
- Args:
220
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
221
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
222
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
223
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
224
- timeout (int, optional): Http requesting timeout. Defaults to 30
225
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
226
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
227
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
228
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
229
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
230
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
231
- """
232
- self.is_conversation = is_conversation
233
- self.max_tokens_to_sample = max_tokens
234
- self.temperature = temperature
235
- self.top_p = top_p
236
- self.chat_endpoint = (
237
- "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
238
- )
239
- self.stream_chunk_size = 64
240
- self.timeout = timeout
241
- self.last_response = {}
242
- self.headers = {
243
- "Content-Type": "application/json",
244
- "Accept": "application/json",
245
- }
246
-
247
- self.__available_optimizers = (
248
- method
249
- for method in dir(Optimizers)
250
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
251
- )
252
- Conversation.intro = (
253
- AwesomePrompts().get_act(
254
- act, raise_not_found=True, default=None, case_insensitive=True
255
- )
256
- if act
257
- else intro or Conversation.intro
258
- )
259
- self.conversation = Conversation(
260
- is_conversation, self.max_tokens_to_sample, filepath, update_file
261
- )
262
- self.conversation.history_offset = history_offset
263
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
264
-
265
- async def ask(
266
- self,
267
- prompt: str,
268
- stream: bool = False,
269
- raw: bool = False,
270
- optimizer: str = None,
271
- conversationally: bool = False,
272
- ) -> Union[dict, AsyncGenerator]:
273
- """Chat with AI asynchronously.
274
-
275
- Args:
276
- prompt (str): Prompt to be send.
277
- stream (bool, optional): Flag for streaming response. Defaults to False.
278
- raw (bool, optional): Stream back raw response as received. Defaults to False.
279
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
280
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
281
- Returns:
282
- dict|AsyncGenerator : ai content
283
- ```json
284
- {
285
- "token" : "How may I assist you today?"
286
- }
287
- ```
288
- """
289
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
290
- if optimizer:
291
- if optimizer in self.__available_optimizers:
292
- conversation_prompt = getattr(Optimizers, optimizer)(
293
- conversation_prompt if conversationally else prompt
294
- )
295
- else:
296
- raise Exception(
297
- f"Optimizer is not one of {self.__available_optimizers}"
298
- )
299
-
300
- payload = {
301
- "prompt": conversation_prompt,
302
- "temperature": self.temperature,
303
- "top_p": self.top_p,
304
- }
305
-
306
- async def for_stream():
307
- async with self.session.stream(
308
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
309
- ) as response:
310
- if not response.is_success:
311
- raise exceptions.FailedToGenerateResponseError(
312
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
313
- )
314
-
315
- message_load = ""
316
- async for value in response.aiter_lines():
317
- try:
318
- resp = sanitize_stream(value)
319
- message_load += await self.get_message(resp)
320
- resp["token"] = message_load
321
- self.last_response.update(resp)
322
- yield value if raw else resp
323
- except json.decoder.JSONDecodeError:
324
- pass
325
-
326
- self.conversation.update_chat_history(
327
- prompt, await self.get_message(self.last_response)
328
- )
329
-
330
- async def for_non_stream():
331
- # let's make use of stream
332
- async for _ in for_stream():
333
- pass
334
- return self.last_response
335
203
 
336
- return for_stream() if stream else await for_non_stream()
337
-
338
- async def chat(
339
- self,
340
- prompt: str,
341
- stream: bool = False,
342
- optimizer: str = None,
343
- conversationally: bool = False,
344
- ) -> Union[str, AsyncGenerator]:
345
- """Generate response `str` asynchronously.
346
- Args:
347
- prompt (str): Prompt to be send.
348
- stream (bool, optional): Flag for streaming response. Defaults to False.
349
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
350
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
351
- Returns:
352
- str: Response generated
353
- """
354
-
355
- async def for_stream():
356
- async_ask = await self.ask(
357
- prompt, True, optimizer=optimizer, conversationally=conversationally
358
- )
359
- async for response in async_ask:
360
- yield await self.get_message(response)
361
-
362
- async def for_non_stream():
363
- return await self.get_message(
364
- await self.ask(
365
- prompt,
366
- False,
367
- optimizer=optimizer,
368
- conversationally=conversationally,
369
- )
370
- )
371
-
372
- return for_stream() if stream else await for_non_stream()
373
-
374
- async def get_message(self, response: dict) -> str:
375
- """Retrieves message only from response
376
-
377
- Args:
378
- response (dict): Response generated by `self.ask`
379
-
380
- Returns:
381
- str: Message extracted
382
- """
383
- assert isinstance(response, dict), "Response should be of dict data-type only"
384
- return response.get("token")
204
+ if __name__ == "__main__":
205
+ koboldai = KOBOLDAI(is_conversation=True, max_tokens=600, temperature=0.7)
206
+ print(koboldai.chat("Explain quantum computing in simple terms", stream=False))
@@ -1,11 +1,9 @@
1
1
  from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
- import time
5
4
  import random
6
- import re
7
5
  import uuid
8
- from typing import Any, Dict, List, Optional, Union, Generator
6
+ from typing import Any, Dict, Optional, Union, Generator
9
7
 
10
8
  from webscout.AIutel import Conversation, sanitize_stream
11
9
  from webscout.AIbase import Provider # Import sanitize_stream
@@ -18,7 +16,7 @@ class LambdaChat(Provider):
18
16
  Supports streaming responses.
19
17
  """
20
18
  url = "https://lambda.chat"
21
-
19
+ required_auth = False
22
20
  AVAILABLE_MODELS = [
23
21
  "deepseek-llama3.3-70b",
24
22
  "apriel-5b-instruct",
@@ -1,16 +1,15 @@
1
1
  import requests
2
- import json
3
2
  import random
4
3
  import datetime
5
- from typing import Any, Dict, Optional, Union, Generator
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
4
+ from typing import Any, Dict, Union, Generator
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
6
  from webscout.AIbase import Provider
8
7
  from webscout import exceptions
9
8
 
10
9
  class NEMOTRON(Provider):
11
10
  """NEMOTRON provider for interacting with the nemotron.one API."""
12
11
  url = "https://nemotron.one/api/chat"
13
-
12
+ required_auth = False
14
13
  AVAILABLE_MODELS = [
15
14
  "gpt4o",
16
15
  "nemotron70b",
@@ -1,5 +1,5 @@
1
1
  from typing import Any, Dict, Optional, Generator, Union
2
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
2
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts # Import sanitize_stream
3
3
  from webscout.AIbase import Provider
4
4
  from webscout import exceptions
5
5
  from webscout.litagent import LitAgent
@@ -12,9 +12,10 @@ class Netwrck(Provider):
12
12
  A class to interact with the Netwrck.com API. Supports streaming.
13
13
  """
14
14
  greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
15
-
15
+ required_auth = False
16
16
  AVAILABLE_MODELS = [
17
17
  "thedrummer/valkyrie-49b-v1",
18
+ "thedrummer/skyfall-36b-v2",
18
19
  "sao10k/l3-euryale-70b",
19
20
  "deepseek/deepseek-chat",
20
21
  "deepseek/deepseek-r1",
@@ -10,6 +10,7 @@ except ImportError as e:
10
10
  pass
11
11
 
12
12
  class OLLAMA(Provider):
13
+ required_auth = True
13
14
  def __init__(
14
15
  self,
15
16
  model: str = 'qwen2:0.5b',
@@ -9,8 +9,8 @@ from curl_cffi.requests import Session
9
9
  from uuid import uuid4
10
10
 
11
11
  # Import base classes and utility structures
12
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
- from .utils import (
12
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from webscout.Provider.OPENAI.utils import (
14
14
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
15
  ChatCompletionMessage, CompletionUsage, count_tokens
16
16
  )
@@ -337,20 +337,19 @@ class Cloudflare(OpenAICompatibleProvider):
337
337
  def __init__(
338
338
  self,
339
339
  api_key: Optional[str] = None, # Not used but included for compatibility
340
+ proxies: Optional[dict] = None
340
341
  ):
341
342
  """
342
343
  Initialize the Cloudflare client.
343
344
 
344
345
  Args:
345
346
  api_key: Not used but included for compatibility with OpenAI interface
347
+ proxies: Optional proxy configuration dictionary
346
348
  """
349
+ super().__init__(proxies=proxies)
347
350
  self.timeout = 30
348
351
  self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
349
352
 
350
- # Initialize session
351
- self.session = Session()
352
- self.session.proxies = {}
353
-
354
353
  # Set headers
355
354
  self.headers = {
356
355
  'Accept': 'text/event-stream',
@@ -392,4 +391,4 @@ class Cloudflare(OpenAICompatibleProvider):
392
391
  # @classmethod
393
392
  # def models(cls):
394
393
  # """Return the list of available models for Cloudflare."""
395
- # return cls.AVAILABLE_MODELS
394
+ # return cls.AVAILABLE_MODELS
@@ -351,12 +351,8 @@ class FalconH1(OpenAICompatibleProvider):
351
351
  timeout (int): Default request timeout in seconds (default: 120).
352
352
  proxies (Optional[dict]): Optional proxy settings for HTTP requests.
353
353
  """
354
+ super().__init__(proxies=proxies)
354
355
  self.timeout = timeout
355
- self.session = requests.Session()
356
- if proxies:
357
- self.session.proxies = proxies
358
- else:
359
- self.session.proxies = {}
360
356
  self.headers = {
361
357
  'User-Agent': LitAgent().random(),
362
358
  'Accept': '*/*',
@@ -453,5 +449,4 @@ if __name__ == "__main__":
453
449
  full_streamed_content += content_piece
454
450
  print("\n--- End of Stream ---")
455
451
  except Exception as e:
456
- print(f"Error in streaming example: {e}")
457
-
452
+ print(f"Error in streaming example: {e}")
@@ -251,20 +251,18 @@ class FreeGemini(OpenAICompatibleProvider):
251
251
 
252
252
  AVAILABLE_MODELS = ["gemini-2.0-flash"]
253
253
 
254
- def __init__(
255
- self,
256
- ):
254
+ def __init__(self, proxies: Optional[dict] = None):
257
255
  """
258
256
  Initialize the FreeGemini client.
257
+
258
+ Args:
259
+ proxies: Optional proxy configuration dictionary
259
260
  """
261
+ super().__init__(proxies=proxies)
260
262
  self.timeout = 30
261
263
  # Update the API endpoint to match the working implementation
262
264
  self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
263
265
 
264
- # Initialize session with curl_cffi for better Cloudflare handling
265
- self.session = Session()
266
- self.session.proxies = {}
267
-
268
266
  # Use LitAgent for fingerprinting
269
267
  self.agent = LitAgent()
270
268
 
@@ -296,4 +294,4 @@ if __name__ == "__main__":
296
294
  model="gemini-2.0-flash",
297
295
  messages=[{"role": "user", "parts": [{"text": conversation_prompt}]}]
298
296
  )
299
- print(response.choices[0].message.content)
297
+ print(response.choices[0].message.content)