webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,246 +1,246 @@
1
- from curl_cffi import CurlError
2
- from curl_cffi.requests import Session # Keep Session import
3
- import json
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
- from webscout.AIbase import Provider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class Writecream(Provider):
14
- """
15
- A class to interact with the Writecream API.
16
- """
17
-
18
- AVAILABLE_MODELS = ["writecream-gpt"]
19
-
20
- def __init__(
21
- self,
22
- is_conversation: bool = True,
23
- max_tokens: int = 600,
24
- timeout: int = 30,
25
- intro: str = None,
26
- filepath: str = None,
27
- update_file: bool = True,
28
- proxies: dict = {},
29
- history_offset: int = 10250,
30
- act: str = None,
31
- system_prompt: str = "You are a helpful and informative AI assistant.",
32
- base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
33
- referer: str = "https://www.writecream.com/chatgpt-chat/",
34
- link: str = "writecream.com",
35
- model: str = "writecream-gpt"
36
- ):
37
- """
38
- Initializes the Writecream API with given parameters.
39
- """
40
- if model not in self.AVAILABLE_MODELS:
41
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
-
43
- # Initialize curl_cffi Session
44
- self.session = Session()
45
- self.is_conversation = is_conversation
46
- self.max_tokens_to_sample = max_tokens
47
- self.base_url = base_url
48
- self.timeout = timeout
49
- self.last_response = {}
50
- self.system_prompt = system_prompt
51
- self.model = model
52
- # Initialize LitAgent
53
- self.agent = LitAgent()
54
- self.referer = referer
55
- self.link = link
56
-
57
- self.headers = {
58
- # Use LitAgent for User-Agent
59
- "User-Agent": self.agent.random(),
60
- "Referer": self.referer
61
- # Add other headers if needed by curl_cffi impersonation or API
62
- }
63
-
64
- self.__available_optimizers = (
65
- method
66
- for method in dir(Optimizers)
67
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
- )
69
-
70
- # Update curl_cffi session headers and proxies
71
- self.session.headers.update(self.headers)
72
- self.session.proxies.update(proxies)
73
-
74
- Conversation.intro = (
75
- AwesomePrompts().get_act(
76
- act, raise_not_found=True, default=None, case_insensitive=True
77
- )
78
- if act
79
- else intro or Conversation.intro
80
- )
81
-
82
- self.conversation = Conversation(
83
- is_conversation, self.max_tokens_to_sample, filepath, update_file
84
- )
85
- self.conversation.history_offset = history_offset
86
-
87
- def ask(
88
- self,
89
- prompt: str,
90
- stream: bool = False,
91
- raw: bool = False,
92
- optimizer: str = None,
93
- conversationally: bool = False,
94
- ) -> Union[Dict[str, Any], Generator]:
95
- """
96
- Sends a message to the Writecream API and returns the response.
97
-
98
- Args:
99
- prompt (str): Prompt to be sent.
100
- stream (bool, optional): Flag for streaming response. Defaults to False.
101
- raw (bool, optional): Stream back raw response as received. Defaults to False.
102
- optimizer (str, optional): Prompt optimizer name. Defaults to None.
103
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
-
105
- Returns:
106
- Union[Dict[str, Any], Generator]: Response from the API.
107
- """
108
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
109
- if optimizer:
110
- if optimizer in self.__available_optimizers:
111
- conversation_prompt = getattr(Optimizers, optimizer)(
112
- conversation_prompt if conversationally else prompt
113
- )
114
- else:
115
- raise exceptions.FailedToGenerateResponseError(
116
- f"Optimizer is not one of {self.__available_optimizers}"
117
- )
118
-
119
- final_query = [
120
- {"role": "system", "content": self.system_prompt},
121
- {"role": "user", "content": conversation_prompt}
122
- ]
123
-
124
- params = {
125
- "query": json.dumps(final_query),
126
- "link": self.link
127
- }
128
-
129
- def for_non_stream():
130
- try:
131
- # Use curl_cffi session.get with impersonate
132
- response = self.session.get(
133
- self.base_url,
134
- params=params,
135
- timeout=self.timeout,
136
- impersonate="chrome120" # Add impersonate
137
- )
138
- response.raise_for_status()
139
- response_text = response.text # Get the raw text
140
-
141
- # Use sanitize_stream to process the non-streaming text
142
- # It will try to parse the whole text as JSON because to_json=True
143
- processed_stream = sanitize_stream(
144
- data=response_text,
145
- to_json=True, # Attempt to parse the whole response text as JSON
146
- intro_value=None, # No prefix expected on the full response
147
- content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
148
- )
149
-
150
- # Extract the single result from the generator
151
- response_content = ""
152
- for content in processed_stream:
153
- response_content = content if isinstance(content, str) else ""
154
-
155
- # Update conversation history
156
- self.last_response = {"text": response_content}
157
- self.conversation.update_chat_history(prompt, response_content)
158
-
159
- return {"text": response_content}
160
- except CurlError as e: # Catch CurlError
161
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
162
- except Exception as e:
163
- # Include original exception type
164
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
165
-
166
- # Currently, Writecream API doesn't support streaming, so we always return non-streaming response
167
- return for_non_stream()
168
-
169
- def chat(
170
- self,
171
- prompt: str,
172
- stream: bool = False,
173
- optimizer: str = None,
174
- conversationally: bool = False,
175
- ) -> Union[str, Generator[str, None, None]]:
176
- """
177
- Generates a response from the Writecream API.
178
-
179
- Args:
180
- prompt (str): Prompt to be sent.
181
- stream (bool, optional): Flag for streaming response. Defaults to False.
182
- optimizer (str, optional): Prompt optimizer name. Defaults to None.
183
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184
-
185
- Returns:
186
- Union[str, Generator[str, None, None]]: Response from the API.
187
- """
188
- def for_non_stream():
189
- return self.get_message(
190
- self.ask(
191
- prompt,
192
- stream=False,
193
- optimizer=optimizer,
194
- conversationally=conversationally,
195
- )
196
- )
197
-
198
- if stream:
199
- # For compatibility with AUTO streaming interface, yield a dict
200
- response_dict = self.ask(
201
- prompt,
202
- stream=False,
203
- optimizer=optimizer,
204
- conversationally=conversationally,
205
- )
206
- yield response_dict
207
- else:
208
- return for_non_stream()
209
-
210
- def get_message(self, response: dict) -> str:
211
- """
212
- Retrieves message only from response.
213
-
214
- Args:
215
- response (dict): Response generated by `self.ask`
216
-
217
- Returns:
218
- str: Message extracted
219
- """
220
- assert isinstance(response, dict), "Response should be of dict data-type only"
221
- return response["text"]
222
-
223
-
224
- if __name__ == "__main__":
225
- # Ensure curl_cffi is installed
226
- print("-" * 80)
227
- print(f"{'Model':<30} {'Status':<10} {'Response'}")
228
- print("-" * 80)
229
-
230
- try:
231
- test_api = Writecream(timeout=60)
232
- prompt = "Say 'Hello' in one word"
233
- response = test_api.chat(prompt)
234
-
235
- if response and len(response.strip()) > 0:
236
- status = "✓"
237
- # Clean and truncate response
238
- clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
239
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
240
- else:
241
- status = "✗"
242
- display_text = "Empty or invalid response"
243
-
244
- print(f"{test_api.model:<30} {status:<10} {display_text}")
245
- except Exception as e:
246
- print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session # Keep Session import
3
+ import json
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class Writecream(Provider):
14
+ """
15
+ A class to interact with the Writecream API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = ["writecream-gpt"]
19
+
20
+ def __init__(
21
+ self,
22
+ is_conversation: bool = True,
23
+ max_tokens: int = 600,
24
+ timeout: int = 30,
25
+ intro: str = None,
26
+ filepath: str = None,
27
+ update_file: bool = True,
28
+ proxies: dict = {},
29
+ history_offset: int = 10250,
30
+ act: str = None,
31
+ system_prompt: str = "You are a helpful and informative AI assistant.",
32
+ base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
33
+ referer: str = "https://www.writecream.com/chatgpt-chat/",
34
+ link: str = "writecream.com",
35
+ model: str = "writecream-gpt"
36
+ ):
37
+ """
38
+ Initializes the Writecream API with given parameters.
39
+ """
40
+ if model not in self.AVAILABLE_MODELS:
41
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
+
43
+ # Initialize curl_cffi Session
44
+ self.session = Session()
45
+ self.is_conversation = is_conversation
46
+ self.max_tokens_to_sample = max_tokens
47
+ self.base_url = base_url
48
+ self.timeout = timeout
49
+ self.last_response = {}
50
+ self.system_prompt = system_prompt
51
+ self.model = model
52
+ # Initialize LitAgent
53
+ self.agent = LitAgent()
54
+ self.referer = referer
55
+ self.link = link
56
+
57
+ self.headers = {
58
+ # Use LitAgent for User-Agent
59
+ "User-Agent": self.agent.random(),
60
+ "Referer": self.referer
61
+ # Add other headers if needed by curl_cffi impersonation or API
62
+ }
63
+
64
+ self.__available_optimizers = (
65
+ method
66
+ for method in dir(Optimizers)
67
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
+ )
69
+
70
+ # Update curl_cffi session headers and proxies
71
+ self.session.headers.update(self.headers)
72
+ self.session.proxies.update(proxies)
73
+
74
+ Conversation.intro = (
75
+ AwesomePrompts().get_act(
76
+ act, raise_not_found=True, default=None, case_insensitive=True
77
+ )
78
+ if act
79
+ else intro or Conversation.intro
80
+ )
81
+
82
+ self.conversation = Conversation(
83
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
84
+ )
85
+ self.conversation.history_offset = history_offset
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> Union[Dict[str, Any], Generator]:
95
+ """
96
+ Sends a message to the Writecream API and returns the response.
97
+
98
+ Args:
99
+ prompt (str): Prompt to be sent.
100
+ stream (bool, optional): Flag for streaming response. Defaults to False.
101
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
102
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
103
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+
105
+ Returns:
106
+ Union[Dict[str, Any], Generator]: Response from the API.
107
+ """
108
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
109
+ if optimizer:
110
+ if optimizer in self.__available_optimizers:
111
+ conversation_prompt = getattr(Optimizers, optimizer)(
112
+ conversation_prompt if conversationally else prompt
113
+ )
114
+ else:
115
+ raise exceptions.FailedToGenerateResponseError(
116
+ f"Optimizer is not one of {self.__available_optimizers}"
117
+ )
118
+
119
+ final_query = [
120
+ {"role": "system", "content": self.system_prompt},
121
+ {"role": "user", "content": conversation_prompt}
122
+ ]
123
+
124
+ params = {
125
+ "query": json.dumps(final_query),
126
+ "link": self.link
127
+ }
128
+
129
+ def for_non_stream():
130
+ try:
131
+ # Use curl_cffi session.get with impersonate
132
+ response = self.session.get(
133
+ self.base_url,
134
+ params=params,
135
+ timeout=self.timeout,
136
+ impersonate="chrome120" # Add impersonate
137
+ )
138
+ response.raise_for_status()
139
+ response_text = response.text # Get the raw text
140
+
141
+ # Use sanitize_stream to process the non-streaming text
142
+ # It will try to parse the whole text as JSON because to_json=True
143
+ processed_stream = sanitize_stream(
144
+ data=response_text,
145
+ to_json=True, # Attempt to parse the whole response text as JSON
146
+ intro_value=None, # No prefix expected on the full response
147
+ content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
148
+ )
149
+
150
+ # Extract the single result from the generator
151
+ response_content = ""
152
+ for content in processed_stream:
153
+ response_content = content if isinstance(content, str) else ""
154
+
155
+ # Update conversation history
156
+ self.last_response = {"text": response_content}
157
+ self.conversation.update_chat_history(prompt, response_content)
158
+
159
+ return {"text": response_content}
160
+ except CurlError as e: # Catch CurlError
161
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
162
+ except Exception as e:
163
+ # Include original exception type
164
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
165
+
166
+ # Currently, Writecream API doesn't support streaming, so we always return non-streaming response
167
+ return for_non_stream()
168
+
169
+ def chat(
170
+ self,
171
+ prompt: str,
172
+ stream: bool = False,
173
+ optimizer: str = None,
174
+ conversationally: bool = False,
175
+ ) -> Union[str, Generator[str, None, None]]:
176
+ """
177
+ Generates a response from the Writecream API.
178
+
179
+ Args:
180
+ prompt (str): Prompt to be sent.
181
+ stream (bool, optional): Flag for streaming response. Defaults to False.
182
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
183
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184
+
185
+ Returns:
186
+ Union[str, Generator[str, None, None]]: Response from the API.
187
+ """
188
+ def for_non_stream():
189
+ return self.get_message(
190
+ self.ask(
191
+ prompt,
192
+ stream=False,
193
+ optimizer=optimizer,
194
+ conversationally=conversationally,
195
+ )
196
+ )
197
+
198
+ if stream:
199
+ # For compatibility with AUTO streaming interface, yield a dict
200
+ response_dict = self.ask(
201
+ prompt,
202
+ stream=False,
203
+ optimizer=optimizer,
204
+ conversationally=conversationally,
205
+ )
206
+ yield response_dict
207
+ else:
208
+ return for_non_stream()
209
+
210
+ def get_message(self, response: dict) -> str:
211
+ """
212
+ Retrieves message only from response.
213
+
214
+ Args:
215
+ response (dict): Response generated by `self.ask`
216
+
217
+ Returns:
218
+ str: Message extracted
219
+ """
220
+ assert isinstance(response, dict), "Response should be of dict data-type only"
221
+ return response["text"]
222
+
223
+
224
+ if __name__ == "__main__":
225
+ # Ensure curl_cffi is installed
226
+ print("-" * 80)
227
+ print(f"{'Model':<30} {'Status':<10} {'Response'}")
228
+ print("-" * 80)
229
+
230
+ try:
231
+ test_api = Writecream(timeout=60)
232
+ prompt = "Say 'Hello' in one word"
233
+ response = test_api.chat(prompt)
234
+
235
+ if response and len(response.strip()) > 0:
236
+ status = "✓"
237
+ # Clean and truncate response
238
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
239
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
240
+ else:
241
+ status = "✗"
242
+ display_text = "Empty or invalid response"
243
+
244
+ print(f"{test_api.model:<30} {status:<10} {display_text}")
245
+ except Exception as e:
246
+ print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")
@@ -83,8 +83,10 @@ from .FreeGemini import FreeGemini
83
83
  from .Flowith import Flowith
84
84
  from .samurai import samurai
85
85
  from .lmarena import lmarena
86
+ from .oivscode import oivscode
86
87
  __all__ = [
87
88
  'SCNet',
89
+ 'oivscode',
88
90
  'lmarena',
89
91
  'NEMOTRON',
90
92
  'Flowith',
@@ -76,9 +76,10 @@ class AI4Chat(Provider):
76
76
  conversationally: bool = False,
77
77
  country: str = None,
78
78
  user_id: str = None,
79
- ) -> Dict[str, Any]:
79
+ ):
80
80
  """
81
81
  Sends a prompt to the AI4Chat API and returns the response.
82
+ If stream=True, yields small chunks of the response (simulated streaming).
82
83
  """
83
84
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
85
  if optimizer:
@@ -107,9 +108,20 @@ class AI4Chat(Provider):
107
108
  response_text = response_text[1:]
108
109
  if response_text.endswith('"'):
109
110
  response_text = response_text[:-1]
111
+ response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
110
112
  self.last_response.update(dict(text=response_text))
111
113
  self.conversation.update_chat_history(prompt, response_text)
112
- return self.last_response
114
+ if stream:
115
+ # Simulate streaming by yielding fixed-size character chunks (e.g., 48 chars)
116
+ buffer = response_text
117
+ chunk_size = 48
118
+ while buffer:
119
+ chunk = buffer[:chunk_size]
120
+ buffer = buffer[chunk_size:]
121
+ if chunk.strip():
122
+ yield {"text": chunk}
123
+ else:
124
+ return self.last_response
113
125
 
114
126
  def chat(
115
127
  self,
@@ -119,19 +131,31 @@ class AI4Chat(Provider):
119
131
  conversationally: bool = False,
120
132
  country: str = None,
121
133
  user_id: str = None,
122
- ) -> str:
134
+ ):
123
135
  """
124
136
  Generates a response from the AI4Chat API.
137
+ If stream=True, yields each chunk as a string.
125
138
  """
126
- return self.get_message(
127
- self.ask(
139
+ if stream:
140
+ for chunk in self.ask(
128
141
  prompt,
142
+ stream=True,
129
143
  optimizer=optimizer,
130
144
  conversationally=conversationally,
131
145
  country=country,
132
146
  user_id=user_id,
147
+ ):
148
+ yield self.get_message(chunk)
149
+ else:
150
+ return self.get_message(
151
+ self.ask(
152
+ prompt,
153
+ optimizer=optimizer,
154
+ conversationally=conversationally,
155
+ country=country,
156
+ user_id=user_id,
157
+ )
133
158
  )
134
- )
135
159
 
136
160
  def get_message(self, response: Union[dict, str]) -> str:
137
161
  """
@@ -145,5 +169,6 @@ class AI4Chat(Provider):
145
169
  if __name__ == "__main__":
146
170
  from rich import print
147
171
  ai = AI4Chat()
148
- response = ai.chat("Tell me something interesting")
149
- print(response)
172
+ response = ai.chat("Tell me about humans in points", stream=True)
173
+ for c in response:
174
+ print(c, end="")