webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -14,11 +14,11 @@ class IBMGranite(Provider):
14
14
  using Lit agent for the user agent.
15
15
  """
16
16
 
17
- AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
17
+ AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct", "granite-3-3-8b-instruct"]
18
18
 
19
19
  def __init__(
20
20
  self,
21
- api_key: str,
21
+ api_key: str = None,
22
22
  is_conversation: bool = True,
23
23
  max_tokens: int = 600, # Note: max_tokens is not used by this API
24
24
  timeout: int = 30,
@@ -28,7 +28,7 @@ class IBMGranite(Provider):
28
28
  proxies: dict = {},
29
29
  history_offset: int = 10250,
30
30
  act: str = None,
31
- model: str = "granite-3-2-8b-instruct",
31
+ model: str = "granite-3-3-8b-instruct",
32
32
  system_prompt: str = "You are a helpful AI assistant.",
33
33
  thinking: bool = False,
34
34
  ):
@@ -36,6 +36,10 @@ class IBMGranite(Provider):
36
36
  if model not in self.AVAILABLE_MODELS:
37
37
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
38
38
 
39
+ # Auto-generate API key if not provided or empty
40
+ if not api_key:
41
+ api_key = self.generate_api_key()
42
+
39
43
  # Initialize curl_cffi Session
40
44
  self.session = Session()
41
45
  self.is_conversation = is_conversation
@@ -55,6 +59,7 @@ class IBMGranite(Provider):
55
59
  "content-type": "application/json",
56
60
  "origin": "https://www.ibm.com", # Keep origin
57
61
  "referer": "https://www.ibm.com/", # Keep referer
62
+ "User-Agent": Lit().random(),
58
63
  }
59
64
  self.headers["Authorization"] = f"Bearer {api_key}"
60
65
 
@@ -84,6 +89,36 @@ class IBMGranite(Provider):
84
89
  return chunk[1]
85
90
  return None
86
91
 
92
+ @staticmethod
93
+ def generate_api_key() -> str:
94
+ """
95
+ Auto-generate an API key (sessionId) by making a GET request to the Granite auth endpoint.
96
+ Returns:
97
+ str: The sessionId to be used as the API key.
98
+ Raises:
99
+ Exception: If the sessionId cannot be retrieved.
100
+ """
101
+ session = Session()
102
+ headers = {
103
+ "User-Agent": Lit().random(),
104
+ "Origin": "https://www.ibm.com",
105
+ "Referer": "https://d18n68ssusgr7r.cloudfront.net/",
106
+ "Accept": "application/json,application/jsonl",
107
+ }
108
+ session.headers.update(headers)
109
+ url = "https://d18n68ssusgr7r.cloudfront.net/v1/auth"
110
+ resp = session.get(url, timeout=15, impersonate="chrome110")
111
+ if resp.status_code != 200:
112
+ raise Exception(f"Failed to get Granite API key: {resp.status_code} - {resp.text}")
113
+ try:
114
+ data = resp.json()
115
+ session_id = data.get("sessionId")
116
+ if not session_id:
117
+ raise Exception(f"No sessionId in Granite auth response: {data}")
118
+ return session_id
119
+ except Exception as e:
120
+ raise Exception(f"Failed to parse Granite auth response: {e}")
121
+
87
122
  def ask(
88
123
  self,
89
124
  prompt: str,
@@ -117,8 +152,9 @@ class IBMGranite(Provider):
117
152
  {"role": "system", "content": self.system_prompt},
118
153
  {"role": "user", "content": conversation_prompt},
119
154
  ],
120
- "stream": True # API seems to require stream=True based on response format
121
155
  }
156
+ if self.thinking:
157
+ payload["thinking"] = True
122
158
 
123
159
  def for_stream():
124
160
  streaming_text = "" # Initialize outside try block
@@ -227,9 +263,8 @@ if __name__ == "__main__":
227
263
  from rich import print
228
264
  # Example usage: Initialize without logging.
229
265
  ai = IBMGranite(
230
- api_key="", # press f12 to see the API key
231
266
  thinking=True,
232
267
  )
233
- response = ai.chat("write a poem about AI", stream=True)
268
+ response = ai.chat("How many r in strawberry", stream=True)
234
269
  for chunk in response:
235
270
  print(chunk, end="", flush=True)
@@ -1,170 +1,170 @@
1
- import requests
2
- import re
3
- from typing import Optional, Union, Any, Dict, Generator
4
- from uuid import uuid4
5
-
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
-
10
- class KOALA(Provider):
11
- """
12
- A class to interact with the Koala.sh API, X0GPT-style, without sanitize_stream.
13
- """
14
- AVAILABLE_MODELS = [
15
- "gpt-4.1-mini",
16
- "gpt-4.1",
17
- ]
18
-
19
- def __init__(
20
- self,
21
- is_conversation: bool = True,
22
- max_tokens: int = 600,
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- model: str = "gpt-4.1",
31
- web_search: bool = True,
32
- ):
33
- if model not in self.AVAILABLE_MODELS:
34
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
- self.session = requests.Session()
36
- self.is_conversation = is_conversation
37
- self.max_tokens_to_sample = max_tokens
38
- self.api_endpoint = "https://koala.sh/api/gpt/"
39
- self.timeout = timeout
40
- self.last_response = {}
41
- self.model = model
42
- self.headers = {
43
- "accept": "text/event-stream",
44
- "accept-encoding": "gzip, deflate, br, zstd",
45
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
46
- "content-type": "application/json",
47
- "dnt": "1",
48
- "flag-real-time-data": "true" if web_search else "false",
49
- "origin": "https://koala.sh",
50
- "referer": "https://koala.sh/chat",
51
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
52
- }
53
- self.session.headers.update(self.headers)
54
- Conversation.intro = (
55
- AwesomePrompts().get_act(
56
- act, raise_not_found=True, default=None, case_insensitive=True
57
- )
58
- if act
59
- else intro or Conversation.intro
60
- )
61
- self.conversation = Conversation(
62
- is_conversation, self.max_tokens_to_sample, filepath, update_file
63
- )
64
- self.conversation.history_offset = history_offset
65
- self.session.proxies = proxies
66
-
67
- @staticmethod
68
- def _koala_extractor(line: str) -> Optional[str]:
69
- # Koala returns lines like: data: "Hello" or data: "..."
70
- match = re.match(r'data:\s*"(.*)"', line)
71
- if match:
72
- return match.group(1)
73
- return None
74
-
75
- def ask(
76
- self,
77
- prompt: str,
78
- stream: bool = False,
79
- raw: bool = False,
80
- optimizer: str = None,
81
- conversationally: bool = False,
82
- ) -> Dict[str, Any]:
83
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
- if optimizer:
85
- if hasattr(Optimizers, optimizer):
86
- conversation_prompt = getattr(Optimizers, optimizer)(
87
- conversation_prompt if conversationally else prompt
88
- )
89
- else:
90
- raise Exception(f"Optimizer is not valid.")
91
- payload = {
92
- "input": conversation_prompt,
93
- "inputHistory": [],
94
- "outputHistory": [],
95
- "model": self.model
96
- }
97
- def for_stream():
98
- response = self.session.post(
99
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
100
- )
101
- if not response.ok:
102
- raise exceptions.FailedToGenerateResponseError(
103
- f"Failed to generate response - ({response.status_code}, {response.reason})"
104
- )
105
- streaming_response = ""
106
- for line in response.iter_lines(decode_unicode=True):
107
- if not line:
108
- continue
109
- # Only process lines starting with data:
110
- if line.startswith("data:"):
111
- content = self._koala_extractor(line)
112
- if content and content.strip():
113
- streaming_response += content
114
- yield dict(text=content) if not raw else content
115
- # Only update chat history if response is not empty
116
- if streaming_response.strip():
117
- self.last_response = dict(text=streaming_response)
118
- self.conversation.update_chat_history(
119
- prompt, self.get_message(self.last_response)
120
- )
121
- def for_non_stream():
122
- # Use streaming logic to collect the full response
123
- full_text = ""
124
- for chunk in for_stream():
125
- if isinstance(chunk, dict):
126
- full_text += chunk.get("text", "")
127
- elif isinstance(chunk, str):
128
- full_text += chunk
129
- # Only update chat history if response is not empty
130
- if full_text.strip():
131
- self.last_response = dict(text=full_text)
132
- self.conversation.update_chat_history(
133
- prompt, self.get_message(self.last_response)
134
- )
135
- return self.last_response
136
- return for_stream() if stream else for_non_stream()
137
-
138
- def chat(
139
- self,
140
- prompt: str,
141
- stream: bool = False,
142
- optimizer: str = None,
143
- conversationally: bool = False,
144
- ) -> Union[str, Generator[str, None, None]]:
145
- def for_stream():
146
- for response in self.ask(
147
- prompt, True, optimizer=optimizer, conversationally=conversationally
148
- ):
149
- yield self.get_message(response)
150
- def for_non_stream():
151
- return self.get_message(
152
- self.ask(
153
- prompt,
154
- False,
155
- optimizer=optimizer,
156
- conversationally=conversationally,
157
- )
158
- )
159
- return for_stream() if stream else for_non_stream()
160
-
161
- def get_message(self, response: dict) -> str:
162
- assert isinstance(response, dict), "Response should be of dict data-type only"
163
- return response.get("text", "")
164
-
165
- if __name__ == "__main__":
166
- from rich import print
167
- ai = KOALA(timeout=60)
168
- response = ai.chat("Say 'Hello' in one word", stream=True)
169
- for chunk in response:
1
+ import requests
2
+ import re
3
+ from typing import Optional, Union, Any, Dict, Generator
4
+ from uuid import uuid4
5
+
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class KOALA(Provider):
11
+ """
12
+ A class to interact with the Koala.sh API, X0GPT-style, without sanitize_stream.
13
+ """
14
+ AVAILABLE_MODELS = [
15
+ "gpt-4.1-mini",
16
+ "gpt-4.1",
17
+ ]
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "gpt-4.1",
31
+ web_search: bool = True,
32
+ ):
33
+ if model not in self.AVAILABLE_MODELS:
34
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
+ self.session = requests.Session()
36
+ self.is_conversation = is_conversation
37
+ self.max_tokens_to_sample = max_tokens
38
+ self.api_endpoint = "https://koala.sh/api/gpt/"
39
+ self.timeout = timeout
40
+ self.last_response = {}
41
+ self.model = model
42
+ self.headers = {
43
+ "accept": "text/event-stream",
44
+ "accept-encoding": "gzip, deflate, br, zstd",
45
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
46
+ "content-type": "application/json",
47
+ "dnt": "1",
48
+ "flag-real-time-data": "true" if web_search else "false",
49
+ "origin": "https://koala.sh",
50
+ "referer": "https://koala.sh/chat",
51
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
52
+ }
53
+ self.session.headers.update(self.headers)
54
+ Conversation.intro = (
55
+ AwesomePrompts().get_act(
56
+ act, raise_not_found=True, default=None, case_insensitive=True
57
+ )
58
+ if act
59
+ else intro or Conversation.intro
60
+ )
61
+ self.conversation = Conversation(
62
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
63
+ )
64
+ self.conversation.history_offset = history_offset
65
+ self.session.proxies = proxies
66
+
67
+ @staticmethod
68
+ def _koala_extractor(line: str) -> Optional[str]:
69
+ # Koala returns lines like: data: "Hello" or data: "..."
70
+ match = re.match(r'data:\s*"(.*)"', line)
71
+ if match:
72
+ return match.group(1)
73
+ return None
74
+
75
+ def ask(
76
+ self,
77
+ prompt: str,
78
+ stream: bool = False,
79
+ raw: bool = False,
80
+ optimizer: str = None,
81
+ conversationally: bool = False,
82
+ ) -> Dict[str, Any]:
83
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
+ if optimizer:
85
+ if hasattr(Optimizers, optimizer):
86
+ conversation_prompt = getattr(Optimizers, optimizer)(
87
+ conversation_prompt if conversationally else prompt
88
+ )
89
+ else:
90
+ raise Exception(f"Optimizer is not valid.")
91
+ payload = {
92
+ "input": conversation_prompt,
93
+ "inputHistory": [],
94
+ "outputHistory": [],
95
+ "model": self.model
96
+ }
97
+ def for_stream():
98
+ response = self.session.post(
99
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
100
+ )
101
+ if not response.ok:
102
+ raise exceptions.FailedToGenerateResponseError(
103
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
104
+ )
105
+ streaming_response = ""
106
+ for line in response.iter_lines(decode_unicode=True):
107
+ if not line:
108
+ continue
109
+ # Only process lines starting with data:
110
+ if line.startswith("data:"):
111
+ content = self._koala_extractor(line)
112
+ if content and content.strip():
113
+ streaming_response += content
114
+ yield dict(text=content) if not raw else content
115
+ # Only update chat history if response is not empty
116
+ if streaming_response.strip():
117
+ self.last_response = dict(text=streaming_response)
118
+ self.conversation.update_chat_history(
119
+ prompt, self.get_message(self.last_response)
120
+ )
121
+ def for_non_stream():
122
+ # Use streaming logic to collect the full response
123
+ full_text = ""
124
+ for chunk in for_stream():
125
+ if isinstance(chunk, dict):
126
+ full_text += chunk.get("text", "")
127
+ elif isinstance(chunk, str):
128
+ full_text += chunk
129
+ # Only update chat history if response is not empty
130
+ if full_text.strip():
131
+ self.last_response = dict(text=full_text)
132
+ self.conversation.update_chat_history(
133
+ prompt, self.get_message(self.last_response)
134
+ )
135
+ return self.last_response
136
+ return for_stream() if stream else for_non_stream()
137
+
138
+ def chat(
139
+ self,
140
+ prompt: str,
141
+ stream: bool = False,
142
+ optimizer: str = None,
143
+ conversationally: bool = False,
144
+ ) -> Union[str, Generator[str, None, None]]:
145
+ def for_stream():
146
+ for response in self.ask(
147
+ prompt, True, optimizer=optimizer, conversationally=conversationally
148
+ ):
149
+ yield self.get_message(response)
150
+ def for_non_stream():
151
+ return self.get_message(
152
+ self.ask(
153
+ prompt,
154
+ False,
155
+ optimizer=optimizer,
156
+ conversationally=conversationally,
157
+ )
158
+ )
159
+ return for_stream() if stream else for_non_stream()
160
+
161
+ def get_message(self, response: dict) -> str:
162
+ assert isinstance(response, dict), "Response should be of dict data-type only"
163
+ return response.get("text", "")
164
+
165
+ if __name__ == "__main__":
166
+ from rich import print
167
+ ai = KOALA(timeout=60)
168
+ response = ai.chat("Say 'Hello' in one word", stream=True)
169
+ for chunk in response:
170
170
  print(chunk, end="", flush=True)