webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,211 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class Writecream(Provider):
13
+ """
14
+ A class to interact with the Writecream API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = ["writecream-gpt"]
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ system_prompt: str = "You are a helpful and informative AI assistant.",
31
+ base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
32
+ user_agent: str = "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Mobile Safari/537.36",
33
+ referer: str = "https://www.writecream.com/chatgpt-chat/",
34
+ link: str = "writecream.com",
35
+ model: str = "writecream-gpt"
36
+ ):
37
+ """
38
+ Initializes the Writecream API with given parameters.
39
+ """
40
+ if model not in self.AVAILABLE_MODELS:
41
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
+
43
+ self.session = requests.Session()
44
+ self.is_conversation = is_conversation
45
+ self.max_tokens_to_sample = max_tokens
46
+ self.base_url = base_url
47
+ self.timeout = timeout
48
+ self.last_response = {}
49
+ self.system_prompt = system_prompt
50
+ self.model = model
51
+ self.user_agent = user_agent
52
+ self.referer = referer
53
+ self.link = link
54
+
55
+ self.headers = {
56
+ "User-Agent": self.user_agent,
57
+ "Referer": self.referer
58
+ }
59
+
60
+ self.__available_optimizers = (
61
+ method
62
+ for method in dir(Optimizers)
63
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
64
+ )
65
+
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+
77
+ self.conversation = Conversation(
78
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
79
+ )
80
+ self.conversation.history_offset = history_offset
81
+
82
+ def ask(
83
+ self,
84
+ prompt: str,
85
+ stream: bool = False,
86
+ raw: bool = False,
87
+ optimizer: str = None,
88
+ conversationally: bool = False,
89
+ ) -> Union[Dict[str, Any], Generator]:
90
+ """
91
+ Sends a message to the Writecream API and returns the response.
92
+
93
+ Args:
94
+ prompt (str): Prompt to be sent.
95
+ stream (bool, optional): Flag for streaming response. Defaults to False.
96
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
97
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
98
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
+
100
+ Returns:
101
+ Union[Dict[str, Any], Generator]: Response from the API.
102
+ """
103
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
104
+ if optimizer:
105
+ if optimizer in self.__available_optimizers:
106
+ conversation_prompt = getattr(Optimizers, optimizer)(
107
+ conversation_prompt if conversationally else prompt
108
+ )
109
+ else:
110
+ raise exceptions.FailedToGenerateResponseError(
111
+ f"Optimizer is not one of {self.__available_optimizers}"
112
+ )
113
+
114
+ final_query = [
115
+ {"role": "system", "content": self.system_prompt},
116
+ {"role": "user", "content": conversation_prompt}
117
+ ]
118
+
119
+ params = {
120
+ "query": json.dumps(final_query),
121
+ "link": self.link
122
+ }
123
+
124
+ def for_non_stream():
125
+ try:
126
+ response = self.session.get(self.base_url, params=params, timeout=self.timeout)
127
+ response.raise_for_status()
128
+ data = response.json()
129
+
130
+ # Extract the response content
131
+ response_content = data.get("response", data.get("response_content", ""))
132
+
133
+ # Update conversation history
134
+ self.last_response = {"text": response_content}
135
+ self.conversation.update_chat_history(prompt, response_content)
136
+
137
+ return {"text": response_content}
138
+ except Exception as e:
139
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response from the chat API: {e}")
140
+
141
+ # Currently, Writecream API doesn't support streaming, so we always return non-streaming response
142
+ return for_non_stream()
143
+
144
+ def chat(
145
+ self,
146
+ prompt: str,
147
+ stream: bool = False,
148
+ optimizer: str = None,
149
+ conversationally: bool = False,
150
+ ) -> Union[str, Generator[str, None, None]]:
151
+ """
152
+ Generates a response from the Writecream API.
153
+
154
+ Args:
155
+ prompt (str): Prompt to be sent.
156
+ stream (bool, optional): Flag for streaming response. Defaults to False.
157
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
158
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
159
+
160
+ Returns:
161
+ Union[str, Generator[str, None, None]]: Response from the API.
162
+ """
163
+ def for_non_stream():
164
+ return self.get_message(
165
+ self.ask(
166
+ prompt,
167
+ stream=False,
168
+ optimizer=optimizer,
169
+ conversationally=conversationally,
170
+ )
171
+ )
172
+
173
+ # Currently, Writecream API doesn't support streaming
174
+ return for_non_stream()
175
+
176
+ def get_message(self, response: dict) -> str:
177
+ """
178
+ Retrieves message only from response.
179
+
180
+ Args:
181
+ response (dict): Response generated by `self.ask`
182
+
183
+ Returns:
184
+ str: Message extracted
185
+ """
186
+ assert isinstance(response, dict), "Response should be of dict data-type only"
187
+ return response["text"]
188
+
189
+
190
+ if __name__ == "__main__":
191
+ print("-" * 80)
192
+ print(f"{'Model':<30} {'Status':<10} {'Response'}")
193
+ print("-" * 80)
194
+
195
+ try:
196
+ test_api = Writecream(timeout=60)
197
+ prompt = "Say 'Hello' in one word"
198
+ response = test_api.chat(prompt)
199
+
200
+ if response and len(response.strip()) > 0:
201
+ status = "✓"
202
+ # Clean and truncate response
203
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
204
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
205
+ else:
206
+ status = "✗"
207
+ display_text = "Empty or invalid response"
208
+
209
+ print(f"{test_api.model:<30} {status:<10} {display_text}")
210
+ except Exception as e:
211
+ print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")
@@ -0,0 +1,197 @@
1
+ import re
2
+ import requests, json
3
+ from typing import Union, Any, Dict, Generator, Optional
4
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
5
+ from webscout.AIbase import Provider
6
+ from webscout import exceptions
7
+ from webscout.litagent import LitAgent
8
+
9
+ class WritingMate(Provider):
10
+ AVAILABLE_MODELS = [
11
+ "claude-3-haiku-20240307",
12
+ "gemini-1.5-flash-latest",
13
+ "llama3-8b-8192",
14
+ "llama3-70b-8192",
15
+ "google/gemini-flash-1.5-8b-exp",
16
+ "gpt-4o-mini"
17
+ ]
18
+ """
19
+ Provider for WritingMate streaming API.
20
+ """
21
+ api_endpoint = "https://chat.writingmate.ai/api/chat/tools-stream"
22
+
23
+ def __init__(
24
+ self,
25
+ cookies_path: str = "cookies.json",
26
+ is_conversation: bool = True,
27
+ max_tokens: int = 4096,
28
+ timeout: int = 60,
29
+ intro: str = None,
30
+ filepath: str = None,
31
+ update_file: bool = True,
32
+ act: str = None,
33
+ system_prompt: str = "You are a friendly, helpful AI assistant.",
34
+ model: str = "gpt-4o-mini"
35
+ ):
36
+ self.cookies_path = cookies_path
37
+ self.cookies = self._load_cookies(cookies_path)
38
+ self.session = requests.Session()
39
+ self.timeout = timeout
40
+ self.system_prompt = system_prompt
41
+ self.model = model
42
+ if self.model not in self.AVAILABLE_MODELS:
43
+ raise ValueError(f"Unknown model: {self.model}. Choose from {self.AVAILABLE_MODELS}")
44
+ self.last_response = {}
45
+ self.headers = {
46
+ "Accept": "*/*",
47
+ "Accept-Encoding": "gzip, deflate, br, zstd",
48
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
49
+ "Content-Type": "text/plain;charset=UTF-8",
50
+ "Origin": "https://chat.writingmate.ai",
51
+ "Referer": "https://chat.writingmate.ai/chat",
52
+ "Cookie": self.cookies,
53
+ "DNT": "1",
54
+ "sec-ch-ua": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
55
+ "sec-ch-ua-mobile": "?0",
56
+ "sec-ch-ua-platform": "\"Windows\"",
57
+ "Sec-Fetch-Dest": "empty",
58
+ "Sec-Fetch-Mode": "cors",
59
+ "Sec-Fetch-Site": "same-origin",
60
+ "Sec-GPC": "1",
61
+ "User-Agent": LitAgent().random()
62
+ }
63
+ self.session.headers.update(self.headers)
64
+ self.__available_optimizers = (
65
+ m for m in dir(Optimizers)
66
+ if callable(getattr(Optimizers, m)) and not m.startswith("__")
67
+ )
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
70
+ if act else intro or Conversation.intro
71
+ )
72
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
73
+ self.conversation.history_offset = 10250
74
+
75
+ def _load_cookies(self, path: str) -> str:
76
+ try:
77
+ with open(path, 'r') as f:
78
+ data = json.load(f)
79
+ return '; '.join(f"{c['name']}={c['value']}" for c in data)
80
+ except (FileNotFoundError, json.JSONDecodeError):
81
+ raise RuntimeError(f"Failed to load cookies from {path}")
82
+
83
+
84
+ def ask(
85
+ self,
86
+ prompt: str,
87
+ stream: bool = True,
88
+ raw: bool = False,
89
+ optimizer: str = None,
90
+ conversationally: bool = False
91
+ ) -> Union[Dict[str,Any], Generator[Any,None,None]]:
92
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
93
+ if optimizer:
94
+ if optimizer in self.__available_optimizers:
95
+ conversation_prompt = getattr(Optimizers, optimizer)(
96
+ conversation_prompt if conversationally else prompt
97
+ )
98
+ else:
99
+ raise exceptions.FailedToGenerateResponseError(f"Unknown optimizer: {optimizer}")
100
+
101
+ body = {
102
+ "chatSettings": {
103
+ "model": self.model,
104
+ "prompt": self.system_prompt,
105
+ "temperature": 0.5,
106
+ "contextLength": 4096,
107
+ "includeProfileContext": True,
108
+ "includeWorkspaceInstructions": True,
109
+ "embeddingsProvider": "openai"
110
+ },
111
+ "messages": [
112
+ {"role": "system", "content": self.system_prompt},
113
+ {"role": "user", "content": conversation_prompt}
114
+ ],
115
+ "selectedTools": []
116
+ }
117
+
118
+ def for_stream():
119
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=body, stream=True, timeout=self.timeout)
120
+ if not response.ok:
121
+ raise exceptions.FailedToGenerateResponseError(
122
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
123
+ )
124
+ streaming_response = ""
125
+ for line in response.iter_lines(decode_unicode=True):
126
+ if line:
127
+ match = re.search(r'0:"(.*?)"', line)
128
+ if match:
129
+ content = match.group(1)
130
+ streaming_response += content
131
+ yield content if raw else dict(text=content)
132
+ self.last_response.update(dict(text=streaming_response))
133
+ self.conversation.update_chat_history(
134
+ prompt, self.get_message(self.last_response)
135
+ )
136
+
137
+ def for_non_stream():
138
+ for _ in for_stream():
139
+ pass
140
+ return self.last_response
141
+
142
+ return for_stream() if stream else for_non_stream()
143
+
144
+ def chat(
145
+ self,
146
+ prompt: str,
147
+ stream: bool = False,
148
+ optimizer: str = None,
149
+ conversationally: bool = False
150
+ ) -> Union[str, Generator[str,None,None]]:
151
+ if stream:
152
+ # yield raw SSE lines
153
+ def raw_stream():
154
+ for line in self.ask(
155
+ prompt, stream=True, raw=True,
156
+ optimizer=optimizer, conversationally=conversationally
157
+ ):
158
+ yield line
159
+ return raw_stream()
160
+ # non‐stream: return aggregated text
161
+ return self.get_message(
162
+ self.ask(
163
+ prompt,
164
+ False,
165
+ raw=False,
166
+ optimizer=optimizer,
167
+ conversationally=conversationally,
168
+ )
169
+ )
170
+
171
+ def get_message(self, response: dict) -> str:
172
+ """
173
+ Extracts the message from the API response.
174
+
175
+ Args:
176
+ response (dict): The API response.
177
+
178
+ Returns:
179
+ str: The message content.
180
+
181
+ Examples:
182
+ >>> ai = X0GPT()
183
+ >>> response = ai.ask("Tell me a joke!")
184
+ >>> message = ai.get_message(response)
185
+ >>> print(message)
186
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
187
+ """
188
+ assert isinstance(response, dict), "Response should be of dict data-type only"
189
+ formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
190
+ return formatted_text
191
+
192
+ if __name__ == "__main__":
193
+ from rich import print
194
+ ai = WritingMate(cookies_path="cookies.json")
195
+ response = ai.chat(input(">>> "), stream=True)
196
+ for chunk in response:
197
+ print(chunk, end="", flush=True)
@@ -53,11 +53,11 @@ class YouChat(Provider):
53
53
  "command_r_plus",
54
54
 
55
55
  # Free models not enabled for user chat modes
56
- # "llama3_3_70b", # isAllowedForUserChatModes: false
57
- # "llama3_2_90b", # isAllowedForUserChatModes: false
58
- # "databricks_dbrx_instruct", # isAllowedForUserChatModes: false
59
- # "solar_1_mini", # isAllowedForUserChatModes: false
60
- # "dolphin_2_5", # isAllowedForUserChatModes: false, isUncensoredModel: true
56
+ "llama3_3_70b", # isAllowedForUserChatModes: false
57
+ "llama3_2_90b", # isAllowedForUserChatModes: false
58
+ "databricks_dbrx_instruct", # isAllowedForUserChatModes: false
59
+ "solar_1_mini", # isAllowedForUserChatModes: false
60
+ "dolphin_2_5", # isAllowedForUserChatModes: false, isUncensoredModel: true
61
61
  ]
62
62
 
63
63
  def __init__(
@@ -108,6 +108,7 @@ class YouChat(Provider):
108
108
  "Content-Type": "text/plain;charset=UTF-8",
109
109
  }
110
110
  self.cookies = {
111
+ "uuid_guest": uuid4().hex,
111
112
  "uuid_guest_backup": uuid4().hex,
112
113
  "youchat_personalization": "true",
113
114
  "youchat_smart_learn": "true",
@@ -188,9 +189,10 @@ class YouChat(Provider):
188
189
  "queryTraceId": trace_id,
189
190
  "chatId": trace_id,
190
191
  "conversationTurnId": conversation_turn_id,
191
- "pastChatLength": 0,
192
- "selectedChatMode": "smart_routing", # Updated from custom to smart_routing
193
- "enable_agent_clarification_questions": "true",
192
+ "pastChatLength": len(self.conversation.history) if hasattr(self.conversation, "history") else 0,
193
+ "selectedChatMode": "custom",
194
+ "selectedAiModel": self.model,
195
+ # "enable_agent_clarification_questions": "true",
194
196
  "traceId": f"{trace_id}|{conversation_turn_id}|{current_time}",
195
197
  "use_nested_youchat_updates": "true"
196
198
  }
@@ -217,29 +219,31 @@ class YouChat(Provider):
217
219
  )
218
220
 
219
221
  streaming_text = ""
220
- found_marker = False # Flag to track if we've passed the '####' marker
221
-
222
+ # New SSE event-based parsing
223
+ event_type = None
222
224
  for value in response.iter_lines(
223
225
  decode_unicode=True,
224
226
  chunk_size=self.stream_chunk_size,
225
227
  delimiter="\n",
226
228
  ):
227
- try:
228
- if bool(value) and value.startswith('data: ') and 'youChatToken' in value:
229
- data = json.loads(value[6:])
230
- token = data.get('youChatToken', '')
231
-
232
- # Check if this is the marker with '####'
233
- if token == '####':
234
- found_marker = True
235
- continue # Skip the marker itself
236
-
237
- # Only process tokens after the marker has been found
238
- if found_marker and token:
239
- streaming_text += token
240
- yield token if raw else dict(text=token)
241
- except json.decoder.JSONDecodeError:
242
- pass
229
+ if not value:
230
+ continue
231
+ if value.startswith("event: "):
232
+ event_type = value[7:].strip()
233
+ continue
234
+ if value.startswith("data: "):
235
+ data_str = value[6:]
236
+ if event_type == "youChatToken":
237
+ try:
238
+ data = json.loads(data_str)
239
+ token = data.get("youChatToken", "")
240
+ if token:
241
+ streaming_text += token
242
+ yield token if raw else dict(text=token)
243
+ except Exception:
244
+ pass
245
+ # Reset event_type after processing
246
+ event_type = None
243
247
 
244
248
  self.last_response.update(dict(text=streaming_text))
245
249
  self.conversation.update_chat_history(
@@ -9,8 +9,8 @@ from .Openai import OPENAI
9
9
  from .Openai import AsyncOPENAI
10
10
  from .Koboldai import KOBOLDAI
11
11
  from .Koboldai import AsyncKOBOLDAI
12
- from .Blackboxai import BLACKBOXAI
13
- from .Phind import PhindSearch
12
+ from .Blackboxai import BLACKBOXAI
13
+ from .Phind import PhindSearch
14
14
  from .Phind import Phindv2
15
15
  from .ai4chat import *
16
16
  from .Gemini import GEMINI
@@ -77,7 +77,6 @@ from .HuggingFaceChat import *
77
77
  from .GithubChat import *
78
78
  from .copilot import *
79
79
  from .C4ai import *
80
- from .flowith import *
81
80
  from .sonus import *
82
81
  from .uncovr import *
83
82
  from .labyrinth import *
@@ -92,12 +91,18 @@ from .searchchat import *
92
91
  from .ExaAI import ExaAI
93
92
  from .OpenGPT import OpenGPT
94
93
  from .scira_chat import *
94
+ from .StandardInput import *
95
+ from .Writecream import Writecream
96
+ from .toolbaz import Toolbaz
97
+ from .scnet import SCNet
98
+ from .WritingMate import WritingMate
95
99
  __all__ = [
96
100
  'LLAMA',
101
+ 'SCNet',
97
102
  'SciraAI',
103
+ 'StandardInputAI',
98
104
  'LabyrinthAI',
99
105
  'OpenGPT',
100
- 'Flowith',
101
106
  'C4ai',
102
107
  'Venice',
103
108
  'ExaAI',
@@ -109,6 +114,7 @@ __all__ = [
109
114
  'PerplexityLabs',
110
115
  'AkashGPT',
111
116
  'DeepSeek',
117
+ 'WritingMate',
112
118
  'WiseCat',
113
119
  'IBMGranite',
114
120
  'QwenLM',
@@ -124,8 +130,8 @@ __all__ = [
124
130
  'AsyncOPENAI',
125
131
  'KOBOLDAI',
126
132
  'AsyncKOBOLDAI',
127
- 'BLACKBOXAI',
128
- 'PhindSearch',
133
+ 'BLACKBOXAI',
134
+ 'PhindSearch',
129
135
  'GEMINI',
130
136
  'DeepInfra',
131
137
  'AI4Chat',
@@ -187,4 +193,6 @@ __all__ = [
187
193
  'AskSteve',
188
194
  'Aitopia',
189
195
  'SearchChatAI',
196
+ 'Writecream',
197
+ 'Toolbaz'
190
198
  ]
@@ -13,8 +13,8 @@ class KOALA(Provider):
13
13
  """
14
14
 
15
15
  AVAILABLE_MODELS = [
16
- "gpt-4o-mini",
17
- "gpt-4o",
16
+ "gpt-4.1-mini",
17
+ "gpt-4.1",
18
18
  ]
19
19
 
20
20
  def __init__(
@@ -20,6 +20,11 @@ class LLMChatCo(Provider):
20
20
  "gemini-flash-2.0", # Default model
21
21
  "llama-4-scout",
22
22
  "gpt-4o-mini",
23
+ "gpt-4.1-nano",
24
+
25
+
26
+ # "gpt-4.1",
27
+ # "gpt-4.1-mini",
23
28
  # "o3-mini",
24
29
  # "claude-3-5-sonnet",
25
30
  # "deepseek-r1",
@@ -1,3 +1,4 @@
1
+ from os import system
1
2
  import requests
2
3
  import json
3
4
  import uuid
@@ -19,8 +20,11 @@ class SciraAI(Provider):
19
20
  "scira-default": "Grok3",
20
21
  "scira-grok-3-mini": "Grok3-mini", # thinking model
21
22
  "scira-vision" : "Grok2-Vision", # vision model
22
- "scira-claude": "Sonnet-3.7",
23
- "scira-optimus": "optimus",
23
+ "scira-4.1-mini": "GPT4.1-mini",
24
+ "scira-qwq": "QWQ-32B",
25
+ "scira-o4-mini": "o4-mini",
26
+ "scira-google": "gemini 2.5 flash"
27
+
24
28
 
25
29
  }
26
30
 
@@ -38,7 +42,8 @@ class SciraAI(Provider):
38
42
  model: str = "scira-default",
39
43
  chat_id: str = None,
40
44
  user_id: str = None,
41
- browser: str = "chrome"
45
+ browser: str = "chrome",
46
+ system_prompt: str = "You are a helpful assistant.",
42
47
  ):
43
48
  """Initializes the Scira AI API client.
44
49
 
@@ -56,6 +61,7 @@ class SciraAI(Provider):
56
61
  chat_id (str): Unique identifier for the chat session.
57
62
  user_id (str): Unique identifier for the user.
58
63
  browser (str): Browser to emulate in requests.
64
+ system_prompt (str): System prompt for the AI.
59
65
 
60
66
  """
61
67
  if model not in self.AVAILABLE_MODELS:
@@ -67,7 +73,8 @@ class SciraAI(Provider):
67
73
  self.agent = LitAgent()
68
74
  # Use fingerprinting to create a consistent browser identity
69
75
  self.fingerprint = self.agent.generate_fingerprint(browser)
70
-
76
+ self.system_prompt = system_prompt
77
+
71
78
  # Use the fingerprint for headers
72
79
  self.headers = {
73
80
  "Accept": self.fingerprint["accept"],
@@ -158,18 +165,17 @@ class SciraAI(Provider):
158
165
  else:
159
166
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
160
167
 
168
+ messages = [
169
+ {"role": "system", "content": self.system_prompt},
170
+ {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
171
+ ]
172
+
161
173
  # Prepare the request payload
162
174
  payload = {
163
175
  "id": self.chat_id,
164
- "messages": [
165
- {
166
- "role": "user",
167
- "content": conversation_prompt,
168
- "parts": [{"type": "text", "text": conversation_prompt}]
169
- }
170
- ],
176
+ "messages": messages,
171
177
  "model": self.model,
172
- "group": "chat", # Always use chat mode (no web search)
178
+ "group": self.search_mode,
173
179
  "user_id": self.user_id,
174
180
  "timezone": "Asia/Calcutta"
175
181
  }