webscout 8.1__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (51) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  14. webscout/Provider/AISEARCH/__init__.py +2 -1
  15. webscout/Provider/AISEARCH/scira_search.py +8 -4
  16. webscout/Provider/ExaChat.py +18 -8
  17. webscout/Provider/GithubChat.py +5 -1
  18. webscout/Provider/Glider.py +4 -2
  19. webscout/Provider/OPENAI/__init__.py +8 -1
  20. webscout/Provider/OPENAI/chatgpt.py +549 -0
  21. webscout/Provider/OPENAI/exachat.py +20 -8
  22. webscout/Provider/OPENAI/glider.py +3 -1
  23. webscout/Provider/OPENAI/llmchatco.py +3 -1
  24. webscout/Provider/OPENAI/opkfc.py +488 -0
  25. webscout/Provider/OPENAI/scirachat.py +11 -7
  26. webscout/Provider/OPENAI/standardinput.py +425 -0
  27. webscout/Provider/OPENAI/textpollinations.py +285 -0
  28. webscout/Provider/OPENAI/toolbaz.py +405 -0
  29. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  30. webscout/Provider/OPENAI/writecream.py +158 -0
  31. webscout/Provider/StandardInput.py +278 -0
  32. webscout/Provider/TextPollinationsAI.py +27 -28
  33. webscout/Provider/Writecream.py +211 -0
  34. webscout/Provider/WritingMate.py +197 -0
  35. webscout/Provider/Youchat.py +30 -26
  36. webscout/Provider/__init__.py +10 -2
  37. webscout/Provider/koala.py +2 -2
  38. webscout/Provider/llmchatco.py +5 -0
  39. webscout/Provider/scira_chat.py +5 -2
  40. webscout/Provider/scnet.py +187 -0
  41. webscout/Provider/toolbaz.py +320 -0
  42. webscout/Provider/uncovr.py +3 -3
  43. webscout/conversation.py +32 -32
  44. webscout/version.py +1 -1
  45. {webscout-8.1.dist-info → webscout-8.2.dist-info}/METADATA +54 -3
  46. {webscout-8.1.dist-info → webscout-8.2.dist-info}/RECORD +50 -25
  47. webscout-8.2.dist-info/entry_points.txt +5 -0
  48. {webscout-8.1.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  49. webscout-8.1.dist-info/entry_points.txt +0 -3
  50. {webscout-8.1.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  51. {webscout-8.1.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,278 @@
1
+ from os import system
2
+ import requests
3
+ import json
4
+ import uuid
5
+ import re
6
+ from datetime import datetime
7
+ from typing import Any, Dict, Optional, Union, Generator
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+ class StandardInputAI(Provider):
16
+ """
17
+ A class to interact with the Standard Input chat API.
18
+ """
19
+
20
+ AVAILABLE_MODELS = {
21
+ "standard-quick": "quick",
22
+ "standard-reasoning": "quick", # Same model but with reasoning enabled
23
+ }
24
+
25
+ def __init__(
26
+ self,
27
+ is_conversation: bool = True,
28
+ max_tokens: int = 2049,
29
+ timeout: int = 30,
30
+ intro: str = None,
31
+ filepath: str = None,
32
+ update_file: bool = True,
33
+ proxies: dict = {},
34
+ history_offset: int = 10250,
35
+ act: str = None,
36
+ model: str = "standard-quick",
37
+ chat_id: str = None,
38
+ user_id: str = None,
39
+ browser: str = "chrome",
40
+ system_prompt: str = "You are a helpful assistant.",
41
+ enable_reasoning: bool = False,
42
+ ):
43
+ """
44
+ Initializes the Standard Input API client.
45
+
46
+ Args:
47
+ is_conversation (bool): Whether to maintain conversation history.
48
+ max_tokens (int): Maximum number of tokens to generate.
49
+ timeout (int): Request timeout in seconds.
50
+ intro (str): Introduction text for the conversation.
51
+ filepath (str): Path to save conversation history.
52
+ update_file (bool): Whether to update the conversation history file.
53
+ proxies (dict): Proxy configuration for requests.
54
+ history_offset (int): Maximum history length in characters.
55
+ act (str): Persona for the AI to adopt.
56
+ model (str): Model to use, must be one of AVAILABLE_MODELS.
57
+ chat_id (str): Unique identifier for the chat session.
58
+ user_id (str): Unique identifier for the user.
59
+ browser (str): Browser to emulate in requests.
60
+ system_prompt (str): System prompt for the AI.
61
+ enable_reasoning (bool): Whether to enable reasoning feature.
62
+ """
63
+ if model not in self.AVAILABLE_MODELS:
64
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
65
+
66
+ self.url = "https://chat.standard-input.com/api/chat"
67
+
68
+ # Initialize LitAgent for user agent generation
69
+ self.agent = LitAgent()
70
+ # Use fingerprinting to create a consistent browser identity
71
+ self.fingerprint = self.agent.generate_fingerprint(browser)
72
+ self.system_prompt = system_prompt
73
+
74
+ # Use the fingerprint for headers
75
+ self.headers = {
76
+ "accept": "*/*",
77
+ "accept-encoding": "gzip, deflate, br, zstd",
78
+ "accept-language": self.fingerprint["accept_language"],
79
+ "content-type": "application/json",
80
+ "dnt": "1",
81
+ "origin": "https://chat.standard-input.com",
82
+ "referer": "https://chat.standard-input.com/",
83
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
84
+ "sec-ch-ua-mobile": "?0",
85
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
86
+ "sec-fetch-dest": "empty",
87
+ "sec-fetch-mode": "cors",
88
+ "sec-fetch-site": "same-origin",
89
+ "sec-gpc": "1",
90
+ "user-agent": self.fingerprint["user_agent"],
91
+ }
92
+
93
+ # Default cookies - these should be updated for production use
94
+ self.cookies = {
95
+ "auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
96
+ "auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
97
+ "sidebar:state": "true",
98
+ "ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
99
+ }
100
+
101
+ self.session = requests.Session()
102
+ self.session.headers.update(self.headers)
103
+ self.session.proxies.update(proxies)
104
+
105
+ self.is_conversation = is_conversation
106
+ self.max_tokens_to_sample = max_tokens
107
+ self.timeout = timeout
108
+ self.last_response = {}
109
+ self.model = model
110
+ self.chat_id = chat_id or str(uuid.uuid4())
111
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
112
+ self.enable_reasoning = enable_reasoning
113
+
114
+ self.__available_optimizers = (
115
+ method
116
+ for method in dir(Optimizers)
117
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
118
+ )
119
+ Conversation.intro = (
120
+ AwesomePrompts().get_act(
121
+ act, raise_not_found=True, default=None, case_insensitive=True
122
+ )
123
+ if act
124
+ else intro or Conversation.intro
125
+ )
126
+
127
+ self.conversation = Conversation(
128
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
129
+ )
130
+ self.conversation.history_offset = history_offset
131
+
132
+ def refresh_identity(self, browser: str = None):
133
+ """
134
+ Refreshes the browser identity fingerprint.
135
+
136
+ Args:
137
+ browser: Specific browser to use for the new fingerprint
138
+ """
139
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
140
+ self.fingerprint = self.agent.generate_fingerprint(browser)
141
+
142
+ # Update headers with new fingerprint
143
+ self.headers.update({
144
+ "Accept-Language": self.fingerprint["accept_language"],
145
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
146
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
147
+ "User-Agent": self.fingerprint["user_agent"],
148
+ })
149
+
150
+ # Update session headers
151
+ for header, value in self.headers.items():
152
+ self.session.headers[header] = value
153
+
154
+ return self.fingerprint
155
+
156
+ def ask(
157
+ self,
158
+ prompt: str,
159
+ optimizer: str = None,
160
+ conversationally: bool = False,
161
+ ) -> Dict[str, Any]:
162
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
163
+ if optimizer:
164
+ if optimizer in self.__available_optimizers:
165
+ conversation_prompt = getattr(Optimizers, optimizer)(
166
+ conversation_prompt if conversationally else prompt
167
+ )
168
+ else:
169
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
170
+
171
+ # Prepare the messages
172
+ messages = [
173
+ {"role": "system", "content": self.system_prompt},
174
+ {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
175
+ ]
176
+
177
+ # Prepare the request payload
178
+ payload = {
179
+ "id": self.chat_id,
180
+ "messages": messages,
181
+ "modelId": self.AVAILABLE_MODELS[self.model],
182
+ "enabledFeatures": ["reasoning"] if self.enable_reasoning or self.model == "standard-reasoning" else []
183
+ }
184
+
185
+ try:
186
+ response = self.session.post(self.url, cookies=self.cookies, json=payload, stream=True, timeout=self.timeout)
187
+ if response.status_code != 200:
188
+ # Try to get response content for better error messages
189
+ try:
190
+ error_content = response.text
191
+ except:
192
+ error_content = "<could not read response content>"
193
+
194
+ if response.status_code in [403, 429]:
195
+ print(f"Received status code {response.status_code}, refreshing identity...")
196
+ self.refresh_identity()
197
+ response = self.session.post(self.url, cookies=self.cookies, json=payload, stream=True, timeout=self.timeout)
198
+ if not response.ok:
199
+ raise exceptions.FailedToGenerateResponseError(
200
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
201
+ )
202
+ print("Identity refreshed successfully.")
203
+ else:
204
+ raise exceptions.FailedToGenerateResponseError(
205
+ f"Request failed with status code {response.status_code}. Response: {error_content}"
206
+ )
207
+
208
+ full_response = ""
209
+ debug_lines = []
210
+
211
+ # Process the streaming response
212
+ for i, line in enumerate(response.iter_lines(decode_unicode=True)):
213
+ if line:
214
+ try:
215
+ line_str = line
216
+ debug_lines.append(line_str)
217
+
218
+ # Extract content from the response
219
+ match = re.search(r'0:"(.*?)"', line_str)
220
+ if match:
221
+ content = match.group(1)
222
+ full_response += content
223
+ continue
224
+ except: pass
225
+
226
+ self.last_response = {"text": full_response}
227
+ self.conversation.update_chat_history(prompt, full_response)
228
+ return {"text": full_response}
229
+ except Exception as e:
230
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
231
+
232
+ def chat(
233
+ self,
234
+ prompt: str,
235
+ optimizer: str = None,
236
+ conversationally: bool = False,
237
+ ) -> str:
238
+ return self.get_message(
239
+ self.ask(
240
+ prompt, optimizer=optimizer, conversationally=conversationally
241
+ )
242
+ )
243
+
244
+ def get_message(self, response: dict) -> str:
245
+ assert isinstance(response, dict), "Response should be of dict data-type only"
246
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
247
+
248
+ if __name__ == "__main__":
249
+ print("-" * 100)
250
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
251
+ print("-" * 100)
252
+
253
+ test_prompt = "Say 'Hello' in one word"
254
+
255
+ # Test each model
256
+ for model in StandardInputAI.AVAILABLE_MODELS:
257
+ print(f"\rTesting {model}...", end="")
258
+
259
+ try:
260
+ test_ai = StandardInputAI(model=model, timeout=120) # Increased timeout
261
+ response = test_ai.chat(test_prompt)
262
+
263
+ if response and len(response.strip()) > 0:
264
+ status = "✓"
265
+ # Clean and truncate response
266
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
267
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
268
+ else:
269
+ status = "✗"
270
+ display_text = "Empty or invalid response"
271
+
272
+ print(f"\r{model:<50} {status:<10} {display_text}")
273
+ except Exception as e:
274
+ error_msg = str(e)
275
+ # Truncate very long error messages
276
+ if len(error_msg) > 100:
277
+ error_msg = error_msg[:97] + "..."
278
+ print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
@@ -12,34 +12,33 @@ class TextPollinationsAI(Provider):
12
12
  """
13
13
 
14
14
  AVAILABLE_MODELS = [
15
- "openai", # OpenAI GPT-4o-mini
16
- "openai-large", # OpenAI GPT-4o
17
- "openai-reasoning", # OpenAI o3-mini
18
- "qwen-coder", # Qwen 2.5 Coder 32B
19
- "llama", # Llama 3.3 70B
20
- "mistral", # Mistral Small 3
21
- "unity", # Unity Mistral Large
22
- "midijourney", # Midijourney
23
- "rtist", # Rtist
24
- "searchgpt", # SearchGPT
25
- "evil", # Evil
26
- "deepseek-reasoning", # DeepSeek-R1 Distill Qwen 32B
27
- "deepseek-reasoning-large",# DeepSeek R1 - Llama 70B
28
- # "llamalight", # Llama 3.1 8B Instruct # >>> NOT WORKING
29
- "phi", # Phi-4 Instruct
30
- "llama-vision", # Llama 3.2 11B Vision
31
- "pixtral", # Pixtral 12B
32
- "gemini", # Gemini 2.0 Flash
33
- "gemini-reasoning", # Gemini 2.0 Flash Thinking
34
- "hormoz", # Hormoz 8b
35
- "hypnosis-tracy", # Hypnosis Tracy 7B
36
- "mistral-roblox", # Mistral Roblox on Scaleway
37
- "roblox-rp", # Roblox Roleplay Assistant
38
- "deepseek", # DeepSeek-V3
39
- "qwen-reasoning", # Qwen QWQ 32B - Advanced Reasoning
40
- "sur", # Sur AI Assistant (Mistral)
41
- "llama-scaleway", # Llama (Scaleway)
42
- "openai-audio", # OpenAI GPT-4o-audio-preview
15
+ "openai",
16
+ "openai-large",
17
+ "openai-reasoning",
18
+ "qwen-coder",
19
+ "llama",
20
+ "llamascout",
21
+ "mistral",
22
+ "unity",
23
+ "midijourney",
24
+ "rtist",
25
+ "searchgpt",
26
+ "evil",
27
+ "deepseek-reasoning",
28
+ "deepseek-reasoning-large",
29
+ "llamalight",
30
+ "phi",
31
+ "llama-vision",
32
+ "pixtral",
33
+ "gemini",
34
+ "hormoz",
35
+ "hypnosis-tracy",
36
+ "mistral-roblox",
37
+ "roblox-rp",
38
+ "deepseek",
39
+ "sur",
40
+ "llama-scaleway",
41
+ "openai-audio",
43
42
  ]
44
43
 
45
44
  def __init__(
@@ -0,0 +1,211 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class Writecream(Provider):
13
+ """
14
+ A class to interact with the Writecream API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = ["writecream-gpt"]
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ system_prompt: str = "You are a helpful and informative AI assistant.",
31
+ base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
32
+ user_agent: str = "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Mobile Safari/537.36",
33
+ referer: str = "https://www.writecream.com/chatgpt-chat/",
34
+ link: str = "writecream.com",
35
+ model: str = "writecream-gpt"
36
+ ):
37
+ """
38
+ Initializes the Writecream API with given parameters.
39
+ """
40
+ if model not in self.AVAILABLE_MODELS:
41
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
+
43
+ self.session = requests.Session()
44
+ self.is_conversation = is_conversation
45
+ self.max_tokens_to_sample = max_tokens
46
+ self.base_url = base_url
47
+ self.timeout = timeout
48
+ self.last_response = {}
49
+ self.system_prompt = system_prompt
50
+ self.model = model
51
+ self.user_agent = user_agent
52
+ self.referer = referer
53
+ self.link = link
54
+
55
+ self.headers = {
56
+ "User-Agent": self.user_agent,
57
+ "Referer": self.referer
58
+ }
59
+
60
+ self.__available_optimizers = (
61
+ method
62
+ for method in dir(Optimizers)
63
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
64
+ )
65
+
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+
77
+ self.conversation = Conversation(
78
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
79
+ )
80
+ self.conversation.history_offset = history_offset
81
+
82
+ def ask(
83
+ self,
84
+ prompt: str,
85
+ stream: bool = False,
86
+ raw: bool = False,
87
+ optimizer: str = None,
88
+ conversationally: bool = False,
89
+ ) -> Union[Dict[str, Any], Generator]:
90
+ """
91
+ Sends a message to the Writecream API and returns the response.
92
+
93
+ Args:
94
+ prompt (str): Prompt to be sent.
95
+ stream (bool, optional): Flag for streaming response. Defaults to False.
96
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
97
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
98
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
+
100
+ Returns:
101
+ Union[Dict[str, Any], Generator]: Response from the API.
102
+ """
103
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
104
+ if optimizer:
105
+ if optimizer in self.__available_optimizers:
106
+ conversation_prompt = getattr(Optimizers, optimizer)(
107
+ conversation_prompt if conversationally else prompt
108
+ )
109
+ else:
110
+ raise exceptions.FailedToGenerateResponseError(
111
+ f"Optimizer is not one of {self.__available_optimizers}"
112
+ )
113
+
114
+ final_query = [
115
+ {"role": "system", "content": self.system_prompt},
116
+ {"role": "user", "content": conversation_prompt}
117
+ ]
118
+
119
+ params = {
120
+ "query": json.dumps(final_query),
121
+ "link": self.link
122
+ }
123
+
124
+ def for_non_stream():
125
+ try:
126
+ response = self.session.get(self.base_url, params=params, timeout=self.timeout)
127
+ response.raise_for_status()
128
+ data = response.json()
129
+
130
+ # Extract the response content
131
+ response_content = data.get("response", data.get("response_content", ""))
132
+
133
+ # Update conversation history
134
+ self.last_response = {"text": response_content}
135
+ self.conversation.update_chat_history(prompt, response_content)
136
+
137
+ return {"text": response_content}
138
+ except Exception as e:
139
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response from the chat API: {e}")
140
+
141
+ # Currently, Writecream API doesn't support streaming, so we always return non-streaming response
142
+ return for_non_stream()
143
+
144
+ def chat(
145
+ self,
146
+ prompt: str,
147
+ stream: bool = False,
148
+ optimizer: str = None,
149
+ conversationally: bool = False,
150
+ ) -> Union[str, Generator[str, None, None]]:
151
+ """
152
+ Generates a response from the Writecream API.
153
+
154
+ Args:
155
+ prompt (str): Prompt to be sent.
156
+ stream (bool, optional): Flag for streaming response. Defaults to False.
157
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
158
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
159
+
160
+ Returns:
161
+ Union[str, Generator[str, None, None]]: Response from the API.
162
+ """
163
+ def for_non_stream():
164
+ return self.get_message(
165
+ self.ask(
166
+ prompt,
167
+ stream=False,
168
+ optimizer=optimizer,
169
+ conversationally=conversationally,
170
+ )
171
+ )
172
+
173
+ # Currently, Writecream API doesn't support streaming
174
+ return for_non_stream()
175
+
176
+ def get_message(self, response: dict) -> str:
177
+ """
178
+ Retrieves message only from response.
179
+
180
+ Args:
181
+ response (dict): Response generated by `self.ask`
182
+
183
+ Returns:
184
+ str: Message extracted
185
+ """
186
+ assert isinstance(response, dict), "Response should be of dict data-type only"
187
+ return response["text"]
188
+
189
+
190
+ if __name__ == "__main__":
191
+ print("-" * 80)
192
+ print(f"{'Model':<30} {'Status':<10} {'Response'}")
193
+ print("-" * 80)
194
+
195
+ try:
196
+ test_api = Writecream(timeout=60)
197
+ prompt = "Say 'Hello' in one word"
198
+ response = test_api.chat(prompt)
199
+
200
+ if response and len(response.strip()) > 0:
201
+ status = "✓"
202
+ # Clean and truncate response
203
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
204
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
205
+ else:
206
+ status = "✗"
207
+ display_text = "Empty or invalid response"
208
+
209
+ print(f"{test_api.model:<30} {status:<10} {display_text}")
210
+ except Exception as e:
211
+ print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")