webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,170 +1,170 @@
1
- import requests
2
- import re
3
- from typing import Optional, Union, Any, Dict, Generator
4
- from uuid import uuid4
5
-
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
-
10
- class KOALA(Provider):
11
- """
12
- A class to interact with the Koala.sh API, X0GPT-style, without sanitize_stream.
13
- """
14
- AVAILABLE_MODELS = [
15
- "gpt-4.1-mini",
16
- "gpt-4.1",
17
- ]
18
-
19
- def __init__(
20
- self,
21
- is_conversation: bool = True,
22
- max_tokens: int = 600,
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- model: str = "gpt-4.1",
31
- web_search: bool = True,
32
- ):
33
- if model not in self.AVAILABLE_MODELS:
34
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
- self.session = requests.Session()
36
- self.is_conversation = is_conversation
37
- self.max_tokens_to_sample = max_tokens
38
- self.api_endpoint = "https://koala.sh/api/gpt/"
39
- self.timeout = timeout
40
- self.last_response = {}
41
- self.model = model
42
- self.headers = {
43
- "accept": "text/event-stream",
44
- "accept-encoding": "gzip, deflate, br, zstd",
45
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
46
- "content-type": "application/json",
47
- "dnt": "1",
48
- "flag-real-time-data": "true" if web_search else "false",
49
- "origin": "https://koala.sh",
50
- "referer": "https://koala.sh/chat",
51
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
52
- }
53
- self.session.headers.update(self.headers)
54
- Conversation.intro = (
55
- AwesomePrompts().get_act(
56
- act, raise_not_found=True, default=None, case_insensitive=True
57
- )
58
- if act
59
- else intro or Conversation.intro
60
- )
61
- self.conversation = Conversation(
62
- is_conversation, self.max_tokens_to_sample, filepath, update_file
63
- )
64
- self.conversation.history_offset = history_offset
65
- self.session.proxies = proxies
66
-
67
- @staticmethod
68
- def _koala_extractor(line: str) -> Optional[str]:
69
- # Koala returns lines like: data: "Hello" or data: "..."
70
- match = re.match(r'data:\s*"(.*)"', line)
71
- if match:
72
- return match.group(1)
73
- return None
74
-
75
- def ask(
76
- self,
77
- prompt: str,
78
- stream: bool = False,
79
- raw: bool = False,
80
- optimizer: str = None,
81
- conversationally: bool = False,
82
- ) -> Dict[str, Any]:
83
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
- if optimizer:
85
- if hasattr(Optimizers, optimizer):
86
- conversation_prompt = getattr(Optimizers, optimizer)(
87
- conversation_prompt if conversationally else prompt
88
- )
89
- else:
90
- raise Exception(f"Optimizer is not valid.")
91
- payload = {
92
- "input": conversation_prompt,
93
- "inputHistory": [],
94
- "outputHistory": [],
95
- "model": self.model
96
- }
97
- def for_stream():
98
- response = self.session.post(
99
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
100
- )
101
- if not response.ok:
102
- raise exceptions.FailedToGenerateResponseError(
103
- f"Failed to generate response - ({response.status_code}, {response.reason})"
104
- )
105
- streaming_response = ""
106
- for line in response.iter_lines(decode_unicode=True):
107
- if not line:
108
- continue
109
- # Only process lines starting with data:
110
- if line.startswith("data:"):
111
- content = self._koala_extractor(line)
112
- if content and content.strip():
113
- streaming_response += content
114
- yield dict(text=content) if not raw else content
115
- # Only update chat history if response is not empty
116
- if streaming_response.strip():
117
- self.last_response = dict(text=streaming_response)
118
- self.conversation.update_chat_history(
119
- prompt, self.get_message(self.last_response)
120
- )
121
- def for_non_stream():
122
- # Use streaming logic to collect the full response
123
- full_text = ""
124
- for chunk in for_stream():
125
- if isinstance(chunk, dict):
126
- full_text += chunk.get("text", "")
127
- elif isinstance(chunk, str):
128
- full_text += chunk
129
- # Only update chat history if response is not empty
130
- if full_text.strip():
131
- self.last_response = dict(text=full_text)
132
- self.conversation.update_chat_history(
133
- prompt, self.get_message(self.last_response)
134
- )
135
- return self.last_response
136
- return for_stream() if stream else for_non_stream()
137
-
138
- def chat(
139
- self,
140
- prompt: str,
141
- stream: bool = False,
142
- optimizer: str = None,
143
- conversationally: bool = False,
144
- ) -> Union[str, Generator[str, None, None]]:
145
- def for_stream():
146
- for response in self.ask(
147
- prompt, True, optimizer=optimizer, conversationally=conversationally
148
- ):
149
- yield self.get_message(response)
150
- def for_non_stream():
151
- return self.get_message(
152
- self.ask(
153
- prompt,
154
- False,
155
- optimizer=optimizer,
156
- conversationally=conversationally,
157
- )
158
- )
159
- return for_stream() if stream else for_non_stream()
160
-
161
- def get_message(self, response: dict) -> str:
162
- assert isinstance(response, dict), "Response should be of dict data-type only"
163
- return response.get("text", "")
164
-
165
- if __name__ == "__main__":
166
- from rich import print
167
- ai = KOALA(timeout=60)
168
- response = ai.chat("Say 'Hello' in one word", stream=True)
169
- for chunk in response:
1
+ import requests
2
+ import re
3
+ from typing import Optional, Union, Any, Dict, Generator
4
+ from uuid import uuid4
5
+
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class KOALA(Provider):
11
+ """
12
+ A class to interact with the Koala.sh API, X0GPT-style, without sanitize_stream.
13
+ """
14
+ AVAILABLE_MODELS = [
15
+ "gpt-4.1-mini",
16
+ "gpt-4.1",
17
+ ]
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "gpt-4.1",
31
+ web_search: bool = True,
32
+ ):
33
+ if model not in self.AVAILABLE_MODELS:
34
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
+ self.session = requests.Session()
36
+ self.is_conversation = is_conversation
37
+ self.max_tokens_to_sample = max_tokens
38
+ self.api_endpoint = "https://koala.sh/api/gpt/"
39
+ self.timeout = timeout
40
+ self.last_response = {}
41
+ self.model = model
42
+ self.headers = {
43
+ "accept": "text/event-stream",
44
+ "accept-encoding": "gzip, deflate, br, zstd",
45
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
46
+ "content-type": "application/json",
47
+ "dnt": "1",
48
+ "flag-real-time-data": "true" if web_search else "false",
49
+ "origin": "https://koala.sh",
50
+ "referer": "https://koala.sh/chat",
51
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
52
+ }
53
+ self.session.headers.update(self.headers)
54
+ Conversation.intro = (
55
+ AwesomePrompts().get_act(
56
+ act, raise_not_found=True, default=None, case_insensitive=True
57
+ )
58
+ if act
59
+ else intro or Conversation.intro
60
+ )
61
+ self.conversation = Conversation(
62
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
63
+ )
64
+ self.conversation.history_offset = history_offset
65
+ self.session.proxies = proxies
66
+
67
+ @staticmethod
68
+ def _koala_extractor(line: str) -> Optional[str]:
69
+ # Koala returns lines like: data: "Hello" or data: "..."
70
+ match = re.match(r'data:\s*"(.*)"', line)
71
+ if match:
72
+ return match.group(1)
73
+ return None
74
+
75
+ def ask(
76
+ self,
77
+ prompt: str,
78
+ stream: bool = False,
79
+ raw: bool = False,
80
+ optimizer: str = None,
81
+ conversationally: bool = False,
82
+ ) -> Dict[str, Any]:
83
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
+ if optimizer:
85
+ if hasattr(Optimizers, optimizer):
86
+ conversation_prompt = getattr(Optimizers, optimizer)(
87
+ conversation_prompt if conversationally else prompt
88
+ )
89
+ else:
90
+ raise Exception(f"Optimizer is not valid.")
91
+ payload = {
92
+ "input": conversation_prompt,
93
+ "inputHistory": [],
94
+ "outputHistory": [],
95
+ "model": self.model
96
+ }
97
+ def for_stream():
98
+ response = self.session.post(
99
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
100
+ )
101
+ if not response.ok:
102
+ raise exceptions.FailedToGenerateResponseError(
103
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
104
+ )
105
+ streaming_response = ""
106
+ for line in response.iter_lines(decode_unicode=True):
107
+ if not line:
108
+ continue
109
+ # Only process lines starting with data:
110
+ if line.startswith("data:"):
111
+ content = self._koala_extractor(line)
112
+ if content and content.strip():
113
+ streaming_response += content
114
+ yield dict(text=content) if not raw else content
115
+ # Only update chat history if response is not empty
116
+ if streaming_response.strip():
117
+ self.last_response = dict(text=streaming_response)
118
+ self.conversation.update_chat_history(
119
+ prompt, self.get_message(self.last_response)
120
+ )
121
+ def for_non_stream():
122
+ # Use streaming logic to collect the full response
123
+ full_text = ""
124
+ for chunk in for_stream():
125
+ if isinstance(chunk, dict):
126
+ full_text += chunk.get("text", "")
127
+ elif isinstance(chunk, str):
128
+ full_text += chunk
129
+ # Only update chat history if response is not empty
130
+ if full_text.strip():
131
+ self.last_response = dict(text=full_text)
132
+ self.conversation.update_chat_history(
133
+ prompt, self.get_message(self.last_response)
134
+ )
135
+ return self.last_response
136
+ return for_stream() if stream else for_non_stream()
137
+
138
+ def chat(
139
+ self,
140
+ prompt: str,
141
+ stream: bool = False,
142
+ optimizer: str = None,
143
+ conversationally: bool = False,
144
+ ) -> Union[str, Generator[str, None, None]]:
145
+ def for_stream():
146
+ for response in self.ask(
147
+ prompt, True, optimizer=optimizer, conversationally=conversationally
148
+ ):
149
+ yield self.get_message(response)
150
+ def for_non_stream():
151
+ return self.get_message(
152
+ self.ask(
153
+ prompt,
154
+ False,
155
+ optimizer=optimizer,
156
+ conversationally=conversationally,
157
+ )
158
+ )
159
+ return for_stream() if stream else for_non_stream()
160
+
161
+ def get_message(self, response: dict) -> str:
162
+ assert isinstance(response, dict), "Response should be of dict data-type only"
163
+ return response.get("text", "")
164
+
165
+ if __name__ == "__main__":
166
+ from rich import print
167
+ ai = KOALA(timeout=60)
168
+ response = ai.chat("Say 'Hello' in one word", stream=True)
169
+ for chunk in response:
170
170
  print(chunk, end="", flush=True)
@@ -0,0 +1,309 @@
1
+ import secrets
2
+ import requests
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import Union, Any, Dict, Optional, Generator
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+
14
+
15
+ class oivscode(Provider):
16
+ """
17
+ A class to interact with a test API.
18
+ """
19
+ AVAILABLE_MODELS = [
20
+ "*",
21
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
22
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
23
+ "claude-3-5-sonnet-20240620",
24
+ "claude-3-5-sonnet-20241022",
25
+ "claude-3-7-sonnet-20250219",
26
+ "custom/blackbox-base",
27
+ "custom/blackbox-pro",
28
+ "custom/blackbox-pro-designer",
29
+ "custom/blackbox-pro-plus",
30
+ "deepseek-r1",
31
+ "deepseek-v3",
32
+ "deepseek/deepseek-chat",
33
+ "gemini-2.5-pro-preview-03-25",
34
+ "gpt-4o-mini",
35
+ "grok-3-beta",
36
+ "image-gen",
37
+ "llama-4-maverick-17b-128e-instruct-fp8",
38
+ "o1",
39
+ "o3-mini",
40
+ "o4-mini",
41
+ "transcribe",
42
+ "anthropic/claude-sonnet-4"
43
+ ]
44
+
45
+
46
+ def __init__(
47
+ self,
48
+ is_conversation: bool = True,
49
+ max_tokens: int = 1024,
50
+ timeout: int = 30,
51
+ intro: str = None,
52
+ filepath: str = None,
53
+ update_file: bool = True,
54
+ proxies: dict = {},
55
+ history_offset: int = 10250,
56
+ act: str = None,
57
+ model: str = "claude-3-5-sonnet-20240620",
58
+ system_prompt: str = "You are a helpful AI assistant.",
59
+
60
+ ):
61
+ """
62
+ Initializes the oivscode with given parameters.
63
+ """
64
+ if model not in self.AVAILABLE_MODELS:
65
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
66
+
67
+
68
+ self.session = requests.Session()
69
+ self.is_conversation = is_conversation
70
+ self.max_tokens_to_sample = max_tokens
71
+ self.api_endpoints = [
72
+ "https://oi-vscode-server.onrender.com/v1/chat/completions",
73
+ "https://oi-vscode-server-2.onrender.com/v1/chat/completions",
74
+ "https://oi-vscode-server-5.onrender.com/v1/chat/completions",
75
+ "https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
76
+ ]
77
+ self.api_endpoint = random.choice(self.api_endpoints)
78
+ self.timeout = timeout
79
+ self.last_response = {}
80
+ self.model = model
81
+ self.system_prompt = system_prompt
82
+ self.headers = {
83
+ "accept": "*/*",
84
+ "accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
85
+ "cache-control": "no-cache",
86
+ "content-type": "application/json",
87
+ "pragma": "no-cache",
88
+ "priority": "u=1, i",
89
+ "sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
90
+ "sec-ch-ua-mobile": "?0",
91
+ "sec-ch-ua-platform": '"Windows"',
92
+ "sec-fetch-dest": "empty",
93
+ "sec-fetch-mode": "cors",
94
+ "sec-fetch-site": "same-site",
95
+ }
96
+ self.userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
97
+ self.headers["userid"] = self.userid
98
+
99
+
100
+ self.__available_optimizers = (
101
+ method
102
+ for method in dir(Optimizers)
103
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
104
+ )
105
+ self.session.headers.update(self.headers)
106
+ Conversation.intro = (
107
+ AwesomePrompts().get_act(
108
+ act, raise_not_found=True, default=None, case_insensitive=True
109
+ )
110
+ if act
111
+ else intro or Conversation.intro
112
+ )
113
+ self.conversation = Conversation(
114
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
115
+ )
116
+ self.conversation.history_offset = history_offset
117
+ self.session.proxies = proxies
118
+
119
+ def _post_with_failover(self, payload, stream, timeout):
120
+ """Try all endpoints until one succeeds, else raise last error."""
121
+ endpoints = self.api_endpoints.copy()
122
+ random.shuffle(endpoints)
123
+ last_exception = None
124
+ for endpoint in endpoints:
125
+ try:
126
+ response = self.session.post(endpoint, json=payload, stream=stream, timeout=timeout)
127
+ if not response.ok:
128
+ last_exception = exceptions.FailedToGenerateResponseError(
129
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
130
+ )
131
+ continue
132
+ return response
133
+ except Exception as e:
134
+ last_exception = e
135
+ continue
136
+ if last_exception:
137
+ raise last_exception
138
+ raise exceptions.FailedToGenerateResponseError("All API endpoints failed.")
139
+
140
+ def ask(
141
+ self,
142
+ prompt: str,
143
+ stream: bool = False,
144
+ raw: bool = False,
145
+ optimizer: str = None,
146
+ conversationally: bool = False,
147
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
148
+ """Chat with AI (DeepInfra-style streaming and non-streaming)"""
149
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
150
+ if optimizer:
151
+ if optimizer in self.__available_optimizers:
152
+ conversation_prompt = getattr(Optimizers, optimizer)(
153
+ conversation_prompt if conversationally else prompt
154
+ )
155
+ else:
156
+ raise Exception(
157
+ f"Optimizer is not one of {self.__available_optimizers}"
158
+ )
159
+
160
+ payload = {
161
+ "model": self.model,
162
+ "messages": [
163
+ {"role": "system", "content": self.system_prompt},
164
+ {"role": "user", "content": conversation_prompt},
165
+ ],
166
+ "stream": stream
167
+ }
168
+
169
+ def for_stream():
170
+ streaming_text = ""
171
+ try:
172
+ response = self._post_with_failover(payload, stream=True, timeout=self.timeout)
173
+ response.raise_for_status()
174
+ # Use sanitize_stream for robust OpenAI-style streaming
175
+ processed_stream = sanitize_stream(
176
+ data=response.iter_content(chunk_size=None),
177
+ intro_value="data:",
178
+ to_json=True,
179
+ skip_markers=["[DONE]"],
180
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("delta", {}).get("content") if isinstance(chunk, dict) else None,
181
+ yield_raw_on_error=False
182
+ )
183
+ for content_chunk in processed_stream:
184
+ if content_chunk and isinstance(content_chunk, str):
185
+ streaming_text += content_chunk
186
+ resp = dict(text=content_chunk)
187
+ yield resp if not raw else content_chunk
188
+ except Exception as e:
189
+ raise exceptions.FailedToGenerateResponseError(f"Streaming request failed: {e}") from e
190
+ finally:
191
+ if streaming_text:
192
+ self.last_response = {"text": streaming_text}
193
+ self.conversation.update_chat_history(prompt, streaming_text)
194
+
195
+ def for_non_stream():
196
+ try:
197
+ response = self._post_with_failover(payload, stream=False, timeout=self.timeout)
198
+ response.raise_for_status()
199
+ response_text = response.text
200
+ processed_stream = sanitize_stream(
201
+ data=response_text,
202
+ to_json=True,
203
+ intro_value=None,
204
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
205
+ yield_raw_on_error=False
206
+ )
207
+ content = next(processed_stream, None)
208
+ content = content if isinstance(content, str) else ""
209
+ self.last_response = {"text": content}
210
+ self.conversation.update_chat_history(prompt, content)
211
+ return self.last_response if not raw else content
212
+ except Exception as e:
213
+ raise exceptions.FailedToGenerateResponseError(f"Non-streaming request failed: {e}") from e
214
+
215
+ return for_stream() if stream else for_non_stream()
216
+
217
+
218
+ def chat(
219
+ self,
220
+ prompt: str,
221
+ stream: bool = False,
222
+ optimizer: str = None,
223
+ conversationally: bool = False,
224
+ ) -> Union[str, Generator[str, None, None]]:
225
+ """Generate response `str`
226
+ Args:
227
+ prompt (str): Prompt to be send.
228
+ stream (bool, optional): Flag for streaming response. Defaults to False.
229
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
230
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
231
+ Returns:
232
+ str: Response generated
233
+ """
234
+ def for_stream():
235
+ for response in self.ask(
236
+ prompt, True, optimizer=optimizer, conversationally=conversationally
237
+ ):
238
+ yield self.get_message(response)
239
+ def for_non_stream():
240
+ return self.get_message(
241
+ self.ask(
242
+ prompt,
243
+ False,
244
+ optimizer=optimizer,
245
+ conversationally=conversationally,
246
+ )
247
+ )
248
+ return for_stream() if stream else for_non_stream()
249
+
250
+ def get_message(self, response: dict) -> str:
251
+ """Retrieves message content from response, handling both streaming and non-streaming formats."""
252
+ assert isinstance(response, dict), "Response should be of dict data-type only"
253
+ # Streaming chunk: choices[0]["delta"]["content"]
254
+ if "choices" in response and response["choices"]:
255
+ choice = response["choices"][0]
256
+ if "delta" in choice and "content" in choice["delta"]:
257
+ return choice["delta"]["content"]
258
+ if "message" in choice and "content" in choice["message"]:
259
+ return choice["message"]["content"]
260
+ # Fallback for non-standard or legacy responses
261
+ if "text" in response:
262
+ return response["text"]
263
+ return ""
264
+
265
+ # def fetch_available_models(self):
266
+ # """Fetches available models from the /models endpoint of all API endpoints and prints models per endpoint."""
267
+ # endpoints = self.api_endpoints.copy()
268
+ # random.shuffle(endpoints)
269
+ # results = {}
270
+ # errors = []
271
+ # for endpoint in endpoints:
272
+ # models_url = endpoint.replace('/v1/chat/completions', '/v1/models')
273
+ # try:
274
+ # response = self.session.get(models_url, timeout=self.timeout)
275
+ # if response.ok:
276
+ # data = response.json()
277
+ # if isinstance(data, dict) and "data" in data:
278
+ # models = [m["id"] if isinstance(m, dict) and "id" in m else m for m in data["data"]]
279
+ # elif isinstance(data, list):
280
+ # models = data
281
+ # else:
282
+ # models = list(data.keys()) if isinstance(data, dict) else []
283
+ # results[models_url] = models
284
+ # else:
285
+ # errors.append(f"Failed to fetch models from {models_url}: {response.status_code} {response.text}")
286
+ # except Exception as e:
287
+ # errors.append(f"Error fetching from {models_url}: {e}")
288
+ # if results:
289
+ # for url, models in results.items():
290
+ # print(f"Models from {url}:")
291
+ # if models:
292
+ # for m in sorted(models):
293
+ # print(f" {m}")
294
+ # else:
295
+ # print(" No models found.")
296
+ # return results
297
+ # else:
298
+ # print("No models found from any endpoint.")
299
+ # for err in errors:
300
+ # print(err)
301
+ # return {}
302
+
303
+ if __name__ == "__main__":
304
+ from rich import print
305
+ chatbot = oivscode()
306
+ print(chatbot.fetch_available_models())
307
+ response = chatbot.chat(input(">>> "), stream=True)
308
+ for chunk in response:
309
+ print(chunk, end="", flush=True)
@@ -22,6 +22,7 @@ class samurai(Provider):
22
22
  "gpt-4o",
23
23
  "o3-mini",
24
24
  "Claude-sonnet-3.7",
25
+ "uncensored-r1",
25
26
  "anthropic/claude-3.5-sonnet",
26
27
  "gemini-1.5-pro",
27
28
  "gemini-1.5-pro-latest",
@@ -56,9 +57,9 @@ class samurai(Provider):
56
57
  system_prompt: str = "You are a helpful assistant."
57
58
  ):
58
59
  """Initializes the Custom API client."""
59
- self.url = "https://vmxvcq-5000.csb.app/v1/chat/completions"
60
+ self.url = "https://newapi-9qln.onrender.com/v1/chat/completions"
60
61
  self.headers = {
61
- "Authorization": "Bearer Public-Samurai-001",
62
+ "Authorization": "Bearer Samurai-AP1-Fr33",
62
63
  "Content-Type": "application/json"
63
64
  }
64
65
  self.session = Session()