webscout 6.0__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/Onlinesearcher.py +22 -10
  3. webscout/Agents/functioncall.py +2 -2
  4. webscout/Bard.py +21 -21
  5. webscout/Extra/autollama.py +37 -20
  6. webscout/Local/__init__.py +6 -7
  7. webscout/Local/formats.py +406 -194
  8. webscout/Local/model.py +1074 -477
  9. webscout/Local/samplers.py +108 -144
  10. webscout/Local/thread.py +251 -410
  11. webscout/Local/ui.py +401 -0
  12. webscout/Local/utils.py +338 -136
  13. webscout/Provider/Amigo.py +51 -38
  14. webscout/Provider/Deepseek.py +7 -6
  15. webscout/Provider/EDITEE.py +2 -2
  16. webscout/Provider/GPTWeb.py +1 -1
  17. webscout/Provider/Llama3.py +1 -1
  18. webscout/Provider/NinjaChat.py +200 -0
  19. webscout/Provider/OLLAMA.py +1 -1
  20. webscout/Provider/Perplexity.py +1 -1
  21. webscout/Provider/Reka.py +12 -5
  22. webscout/Provider/TTI/AIuncensored.py +103 -0
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +4 -2
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/TTI/talkai.py +116 -0
  28. webscout/Provider/TeachAnything.py +0 -3
  29. webscout/Provider/Youchat.py +1 -1
  30. webscout/Provider/__init__.py +16 -12
  31. webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
  32. webscout/Provider/cerebras.py +143 -123
  33. webscout/Provider/cleeai.py +1 -1
  34. webscout/Provider/felo_search.py +1 -1
  35. webscout/Provider/gaurish.py +207 -0
  36. webscout/Provider/geminiprorealtime.py +160 -0
  37. webscout/Provider/genspark.py +1 -1
  38. webscout/Provider/julius.py +8 -3
  39. webscout/Provider/learnfastai.py +1 -1
  40. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  41. webscout/Provider/promptrefine.py +3 -1
  42. webscout/Provider/talkai.py +196 -0
  43. webscout/Provider/turboseek.py +3 -8
  44. webscout/Provider/tutorai.py +1 -1
  45. webscout/__init__.py +2 -43
  46. webscout/exceptions.py +5 -1
  47. webscout/tempid.py +4 -73
  48. webscout/utils.py +3 -0
  49. webscout/version.py +1 -1
  50. webscout/webai.py +1 -1
  51. webscout/webscout_search.py +154 -123
  52. {webscout-6.0.dist-info → webscout-6.2.dist-info}/METADATA +164 -245
  53. {webscout-6.0.dist-info → webscout-6.2.dist-info}/RECORD +57 -55
  54. webscout/Local/rawdog.py +0 -946
  55. webscout/Provider/BasedGPT.py +0 -214
  56. webscout/Provider/TTI/amigo.py +0 -148
  57. webscout/Provider/bixin.py +0 -264
  58. webscout/Provider/xdash.py +0 -182
  59. webscout/websx_search.py +0 -19
  60. {webscout-6.0.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  61. {webscout-6.0.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  62. {webscout-6.0.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  63. {webscout-6.0.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,160 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ import secrets
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+
13
+
14
+ class GeminiPro(Provider):
15
+ """
16
+ A class to interact with the Minitool AI API.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2049,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ ):
31
+ """Initializes the Minitool AI API client."""
32
+ self.url = "https://minitoolai.com/test_python/"
33
+ self.headers = {
34
+ 'authority': 'minitoolai.com',
35
+ 'method': 'POST',
36
+ 'path': '/test_python/',
37
+ 'scheme': 'https',
38
+ 'accept': '*/*',
39
+ 'content-type': 'application/json',
40
+ 'dnt': '1',
41
+ 'origin': 'https://minitoolai.com',
42
+ 'priority': 'u=1, i',
43
+ 'referer': 'https://minitoolai.com/Gemini-Pro/',
44
+ 'sec-ch-ua': '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
45
+ 'sec-ch-ua-mobile': '?0',
46
+ 'sec-ch-ua-platform': '"Windows"',
47
+ 'sec-fetch-dest': 'empty',
48
+ 'sec-fetch-mode': 'cors',
49
+ 'sec-fetch-site': 'same-origin',
50
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0',
51
+ 'x-requested-with': 'XMLHttpRequest'
52
+ }
53
+ self.session = requests.Session()
54
+ self.session.headers.update(self.headers)
55
+ self.session.proxies.update(proxies)
56
+ self.timeout = timeout
57
+ self.last_response = {}
58
+
59
+ self.is_conversation = is_conversation
60
+ self.max_tokens_to_sample = max_tokens
61
+ self.__available_optimizers = (
62
+ method
63
+ for method in dir(Optimizers)
64
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
65
+ )
66
+ Conversation.intro = (
67
+ AwesomePrompts().get_act(
68
+ act, raise_not_found=True, default=None, case_insensitive=True
69
+ )
70
+ if act
71
+ else intro or Conversation.intro
72
+ )
73
+ self.conversation = Conversation(
74
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
75
+ )
76
+ self.conversation.history_offset = history_offset
77
+
78
+
79
+ def ask(
80
+ self,
81
+ prompt: str,
82
+ stream: bool = False,
83
+ raw: bool = False,
84
+ optimizer: str = None,
85
+ conversationally: bool = False,
86
+ ) -> Union[Dict, Generator]:
87
+ """Sends a chat completion request to the Minitool AI API."""
88
+
89
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
+ if optimizer:
91
+ if optimizer in self.__available_optimizers:
92
+ conversation_prompt = getattr(Optimizers, optimizer)(
93
+ conversation_prompt if conversationally else prompt
94
+ )
95
+ else:
96
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
97
+
98
+
99
+ payload = {"utoken": secrets.token_hex(32), "message": conversation_prompt}
100
+
101
+ def for_stream():
102
+ # MinitoolAI doesn't support streaming; emulate with a single yield
103
+ try:
104
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
105
+ response.raise_for_status()
106
+ data = response.json()
107
+ text = data.get("response", "") # Get response, default to "" if missing
108
+ self.last_response.update({"text": text})
109
+ yield {"text": text} # Yield the entire response
110
+ except requests.exceptions.RequestException as e:
111
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
112
+ self.conversation.update_chat_history(prompt, text) #Update chat history
113
+
114
+
115
+ def for_non_stream():
116
+ for _ in for_stream(): pass # Update last_response
117
+ return self.last_response
118
+
119
+
120
+ return for_stream() if stream else for_non_stream()
121
+
122
+
123
+ def chat(
124
+ self,
125
+ prompt: str,
126
+ stream: bool = False,
127
+ optimizer: str = None,
128
+ conversationally: bool = False,
129
+ ) -> Union[str, Generator]:
130
+ """Generate response `str`"""
131
+ def for_stream():
132
+ for response in self.ask(
133
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
134
+ ):
135
+ yield self.get_message(response)
136
+
137
+ def for_non_stream():
138
+ return self.get_message(
139
+ self.ask(
140
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
141
+ )
142
+ )
143
+
144
+ return for_stream() if stream else for_non_stream()
145
+
146
+ def get_message(self, response: dict) -> str:
147
+ """Retrieves message only from response"""
148
+ assert isinstance(response, dict), "Response should be of dict data-type only"
149
+ return response.get("text", "") # Handle missing keys
150
+
151
+
152
+ if __name__ == "__main__":
153
+ from rich import print
154
+ bot = GeminiPro()
155
+ try:
156
+ response = bot.chat("hi", stream=True)
157
+ for chunk in response:
158
+ print(chunk, end="", flush=True)
159
+ except Exception as e:
160
+ print(f"An error occurred: {e}")
@@ -220,6 +220,6 @@ class Genspark(Provider):
220
220
  if __name__ == '__main__':
221
221
  from rich import print
222
222
  ai = Genspark()
223
- response = ai.chat(input(">>> "))
223
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
224
224
  for chunk in response:
225
225
  print(chunk, end="", flush=True)
@@ -25,9 +25,14 @@ class Julius(Provider):
25
25
  "GPT-4",
26
26
  "GPT-4o mini",
27
27
  "Command R+",
28
+ "o1-mini",
29
+ "o1-preview",
30
+
31
+
28
32
  ]
29
33
  def __init__(
30
34
  self,
35
+ api_key: str,
31
36
  is_conversation: bool = True,
32
37
  max_tokens: int = 600,
33
38
  timeout: int = 30,
@@ -65,12 +70,12 @@ class Julius(Provider):
65
70
  self.timeout = timeout
66
71
  self.last_response = {}
67
72
  self.model = model
73
+ self.api_key = api_key
68
74
  self.headers = {
69
- "authorization": "Bearer",
75
+ "authorization": f"Bearer {self.api_key}",
70
76
  "content-type": "application/json",
71
77
  "conversation-id": str(uuid.uuid4()),
72
78
  "interactive-charts": "true",
73
- "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
74
79
  "is-native": "false",
75
80
  "orient-split": "true",
76
81
  "request-id": str(uuid.uuid4()),
@@ -212,7 +217,7 @@ class Julius(Provider):
212
217
  return response["text"]
213
218
  if __name__ == '__main__':
214
219
  from rich import print
215
- ai = Julius(timeout=5000)
220
+ ai = Julius(api_key="",timeout=5000)
216
221
  response = ai.chat("write a poem about AI", stream=True)
217
222
  for chunk in response:
218
223
  print(chunk, end="", flush=True)
@@ -248,6 +248,6 @@ class LearnFast(Provider):
248
248
  if __name__ == "__main__":
249
249
  from rich import print
250
250
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path="photo_2024-07-06_22-19-42.jpg")
251
+ response = ai.chat(input(">>> "), image_path=None)
252
252
  for chunk in response:
253
253
  print(chunk, end="", flush=True)
@@ -1,21 +1,29 @@
1
1
  import requests
2
- import uuid
3
2
  import json
3
+ from typing import Any, Dict, Optional, Generator, List
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
+ from webscout import exceptions
9
10
 
10
- class AIGameIO(Provider):
11
+ class LLMChat(Provider):
11
12
  """
12
- A class to interact with the AI-Game.io API.
13
+ A class to interact with the LLMChat API.
13
14
  """
14
15
 
16
+ AVAILABLE_MODELS = [
17
+ "@cf/meta/llama-3.1-70b-instruct",
18
+ "@cf/meta/llama-3.1-8b-instruct",
19
+ "@cf/meta/llama-3.2-3b-instruct",
20
+ "@cf/meta/llama-3.2-1b-instruct"
21
+ ]
22
+
15
23
  def __init__(
16
24
  self,
17
25
  is_conversation: bool = True,
18
- max_tokens: int = 600,
26
+ max_tokens: int = 2048,
19
27
  timeout: int = 30,
20
28
  intro: str = None,
21
29
  filepath: str = None,
@@ -23,49 +31,35 @@ class AIGameIO(Provider):
23
31
  proxies: dict = {},
24
32
  history_offset: int = 10250,
25
33
  act: str = None,
26
- system_prompt: str = "You are a Helpful ai"
34
+ model: str = "@cf/meta/llama-3.1-70b-instruct", # Default model
35
+ system_prompt: str = "You are a helpful assistant.",
27
36
  ):
28
37
  """
29
- Initializes the AI-Game.io API with given parameters.
30
-
31
- Args:
32
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
33
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
34
- timeout (int, optional): Http request timeout. Defaults to 30.
35
- intro (str, optional): Conversation introductory prompt. Defaults to None.
36
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
37
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
38
- proxies (dict, optional): Http request proxies. Defaults to {}.
39
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
40
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
41
- system_prompt (str, optional): System prompt for AI-Game.io.
42
- Defaults to "You are a Helpful ai".
38
+ Initializes the LLMChat API with given parameters.
43
39
  """
40
+ if model not in self.AVAILABLE_MODELS:
41
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
+
44
43
  self.session = requests.Session()
45
44
  self.is_conversation = is_conversation
46
45
  self.max_tokens_to_sample = max_tokens
47
- self.api_endpoint = 'https://stream-chat-blmeirpipa-uc.a.run.app/streamChat'
48
- self.stream_chunk_size = 64
46
+ self.api_endpoint = "https://llmchat.in/inference/stream"
49
47
  self.timeout = timeout
50
48
  self.last_response = {}
49
+ self.model = model
51
50
  self.system_prompt = system_prompt
52
51
  self.headers = {
53
- 'authority': 'stream-chat-blmeirpipa-uc.a.run.app',
54
- 'method': 'POST',
55
- 'path': '/streamChat',
56
- 'accept': 'text/event-stream',
57
- 'content-type': 'application/json',
58
- 'origin': 'https://www.ai-game.io',
59
- 'priority': 'u=1, i',
60
- 'referer': 'https://www.ai-game.io/',
52
+ "Content-Type": "application/json",
53
+ "Accept": "*/*",
54
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0",
55
+ "Origin": "https://llmchat.in",
56
+ "Referer": "https://llmchat.in/"
61
57
  }
62
-
63
58
  self.__available_optimizers = (
64
59
  method
65
60
  for method in dir(Optimizers)
66
61
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
62
  )
68
- self.session.headers.update(self.headers)
69
63
  Conversation.intro = (
70
64
  AwesomePrompts().get_act(
71
65
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -86,22 +80,17 @@ class AIGameIO(Provider):
86
80
  raw: bool = False,
87
81
  optimizer: str = None,
88
82
  conversationally: bool = False,
89
- ) -> dict:
90
- """Chat with AI
83
+ ) -> Dict[str, Any]:
84
+ """Chat with LLMChat
91
85
 
92
86
  Args:
93
- prompt (str): Prompt to be send.
87
+ prompt (str): Prompt to be sent.
94
88
  stream (bool, optional): Flag for streaming response. Defaults to False.
95
89
  raw (bool, optional): Stream back raw response as received. Defaults to False.
96
90
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
97
91
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
98
92
  Returns:
99
- dict : {}
100
- ```json
101
- {
102
- "text" : "How may I assist you today?"
103
- }
104
- ```
93
+ dict: Response dictionary.
105
94
  """
106
95
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
107
96
  if optimizer:
@@ -110,49 +99,51 @@ class AIGameIO(Provider):
110
99
  conversation_prompt if conversationally else prompt
111
100
  )
112
101
  else:
113
- raise Exception(
102
+ raise exceptions.FailedToGenerateResponseError(
114
103
  f"Optimizer is not one of {self.__available_optimizers}"
115
104
  )
116
-
105
+
106
+ url = f"{self.api_endpoint}?model={self.model}"
117
107
  payload = {
118
- "history": [
119
- {
120
- "role": "system",
121
- "content": self.system_prompt
122
- },
123
- {
124
- "role": "user",
125
- "content": conversation_prompt
126
- }
127
- ]
108
+ "messages": [
109
+ {"role": "system", "content": self.system_prompt},
110
+ {"role": "user", "content": conversation_prompt}
111
+ ],
112
+ "max_tokens": self.max_tokens_to_sample,
113
+ "stream": stream
128
114
  }
129
- def for_stream():
130
- response = self.session.post(
131
- self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout
132
- )
133
- if not response.ok:
134
- raise Exception(
135
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
136
- )
137
115
 
138
- full_response = ''
139
- for line in response.iter_lines(decode_unicode=True):
140
- if line.startswith("data: "):
141
- try:
142
- event_data = json.loads(line[6:])
143
- if event_data['event'] == 'text-chunk':
144
- full_response += event_data['data']['text']
145
- yield event_data['data']['text'] if raw else dict(text=full_response)
146
- except json.JSONDecodeError:
147
- pass
148
- self.last_response.update(dict(text=full_response))
149
- self.conversation.update_chat_history(
150
- prompt, self.get_message(self.last_response)
151
- )
116
+ def for_stream():
117
+ try:
118
+ with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
119
+ response.raise_for_status()
120
+ full_response = ""
121
+ for line in response.iter_lines():
122
+ if line:
123
+ line = line.decode('utf-8')
124
+ if line.startswith('data: '):
125
+ try:
126
+ data = json.loads(line[6:])
127
+ if data.get('response'):
128
+ response_text = data['response']
129
+ full_response += response_text
130
+ yield response_text if raw else dict(text=response_text)
131
+ except json.JSONDecodeError:
132
+ if line.strip() != 'data: [DONE]':
133
+ print(f"Failed to parse line: {line}")
134
+ continue
135
+ self.last_response.update(dict(text=full_response))
136
+ self.conversation.update_chat_history(
137
+ prompt, self.get_message(self.last_response)
138
+ )
139
+ except requests.exceptions.RequestException as e:
140
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
141
+
152
142
  def for_non_stream():
153
- for _ in for_stream():
154
- pass
155
- return self.last_response
143
+ full_response = ""
144
+ for line in for_stream():
145
+ full_response += line['text'] if not raw else line
146
+ return dict(text=full_response)
156
147
 
157
148
  return for_stream() if stream else for_non_stream()
158
149
 
@@ -162,7 +153,7 @@ class AIGameIO(Provider):
162
153
  stream: bool = False,
163
154
  optimizer: str = None,
164
155
  conversationally: bool = False,
165
- ) -> str:
156
+ ) -> str | Generator[str, None, None]:
166
157
  """Generate response `str`
167
158
  Args:
168
159
  prompt (str): Prompt to be send.
@@ -191,14 +182,14 @@ class AIGameIO(Provider):
191
182
 
192
183
  return for_stream() if stream else for_non_stream()
193
184
 
194
- def get_message(self, response: dict) -> str:
195
- """Retrieves message only from response
185
+ def get_message(self, response: Dict[str, Any]) -> str:
186
+ """Retrieves message only from response.
196
187
 
197
188
  Args:
198
189
  response (dict): Response generated by `self.ask`
199
190
 
200
191
  Returns:
201
- str: Message extracted
192
+ str: Message extracted.
202
193
  """
203
194
  assert isinstance(response, dict), "Response should be of dict data-type only"
204
195
  return response["text"]
@@ -206,8 +197,7 @@ class AIGameIO(Provider):
206
197
 
207
198
  if __name__ == "__main__":
208
199
  from rich import print
209
-
210
- ai = AIGameIO()
211
- response = ai.chat("hi")
200
+ ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
201
+ response = ai.chat("What's the meaning of life?", stream=True)
212
202
  for chunk in response:
213
203
  print(chunk, end="", flush=True)
@@ -12,7 +12,7 @@ class PromptRefine(Provider):
12
12
  """
13
13
  A class to interact with the PromptRefine API.
14
14
  """
15
-
15
+ AVAILABLE_MODELS = ["openai/gpt-4", "openai/gpt-4o", "openai/gpt-4-1106-preview"]
16
16
  def __init__(
17
17
  self,
18
18
  is_conversation: bool = True,
@@ -76,6 +76,8 @@ class PromptRefine(Provider):
76
76
  )
77
77
  self.conversation.history_offset = history_offset
78
78
  self.session.proxies = proxies
79
+ if self.model not in self.AVAILABLE_MODELS:
80
+ raise ValueError(f"Invalid model: {self.model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
79
81
 
80
82
  def ask(
81
83
  self,
@@ -0,0 +1,196 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ from typing import Any, Dict, Optional, Generator
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+
12
+ class Talkai(Provider):
13
+ """
14
+ A class to interact with the Talkai.info API.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 2048,
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ model: str = "gpt-4o-mini", # Default model
29
+ ):
30
+ """
31
+ Initializes the Talkai.info API with given parameters.
32
+ """
33
+ self.session = requests.Session()
34
+ self.is_conversation = is_conversation
35
+ self.max_tokens_to_sample = max_tokens
36
+ self.api_endpoint = "https://talkai.info/chat/send/"
37
+ self.timeout = timeout
38
+ self.last_response = {}
39
+ self.model = model
40
+ self.headers = {
41
+ 'Accept': 'application/json, text/event-stream',
42
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
43
+ 'Content-Type': 'application/json',
44
+ 'Origin': 'https://talkai.info',
45
+ 'Referer': 'https://talkai.info/chat/',
46
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
47
+ 'sec-ch-ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
48
+ 'sec-ch-ua-platform': '"Windows"'
49
+ }
50
+ self.__available_optimizers = (
51
+ method
52
+ for method in dir(Optimizers)
53
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
54
+ )
55
+ Conversation.intro = (
56
+ AwesomePrompts().get_act(
57
+ act, raise_not_found=True, default=None, case_insensitive=True
58
+ )
59
+ if act
60
+ else intro or Conversation.intro
61
+ )
62
+ self.conversation = Conversation(
63
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
64
+ )
65
+ self.conversation.history_offset = history_offset
66
+ self.session.proxies = proxies
67
+
68
+ def ask(
69
+ self,
70
+ prompt: str,
71
+ stream: bool = False,
72
+ raw: bool = False,
73
+ optimizer: str = None,
74
+ conversationally: bool = False,
75
+ ) -> Dict[str, Any]:
76
+ """Chat with Talkai
77
+
78
+ Args:
79
+ prompt (str): Prompt to be sent.
80
+ stream (bool, optional): Flag for streaming response. Defaults to False.
81
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
82
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
83
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
84
+ Returns:
85
+ dict: Response dictionary.
86
+ """
87
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
+ if optimizer:
89
+ if optimizer in self.__available_optimizers:
90
+ conversation_prompt = getattr(Optimizers, optimizer)(
91
+ conversation_prompt if conversationally else prompt
92
+ )
93
+ else:
94
+ raise exceptions.FailedToGenerateResponseError(
95
+ f"Optimizer is not one of {self.__available_optimizers}"
96
+ )
97
+
98
+ payload = {
99
+ "type": "chat",
100
+ "messagesHistory": [
101
+ {
102
+ "id": str(uuid.uuid4()),
103
+ "from": "you",
104
+ "content": conversation_prompt
105
+ }
106
+ ],
107
+ "settings": {
108
+ "model": self.model
109
+ }
110
+ }
111
+
112
+ def for_stream():
113
+ try:
114
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
115
+ response.raise_for_status()
116
+
117
+ full_response = ""
118
+ for line in response.iter_lines():
119
+ if line:
120
+ decoded_line = line.decode('utf-8')
121
+ if 'event: trylimit' in decoded_line:
122
+ break # Stop if trylimit event is encountered
123
+ if decoded_line.startswith('data: '):
124
+ data = decoded_line[6:] # Remove 'data: ' prefix
125
+ full_response += data
126
+ yield data if raw else dict(text=data)
127
+
128
+ self.last_response.update(dict(text=full_response))
129
+ self.conversation.update_chat_history(
130
+ prompt, self.get_message(self.last_response)
131
+ )
132
+
133
+ except requests.exceptions.RequestException as e:
134
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
135
+
136
+ def for_non_stream():
137
+ full_response = ""
138
+ for line in for_stream():
139
+ full_response += line['text'] if not raw else line
140
+ return dict(text=full_response)
141
+
142
+ return for_stream() if stream else for_non_stream()
143
+
144
+
145
+ def chat(
146
+ self,
147
+ prompt: str,
148
+ stream: bool = False,
149
+ optimizer: str = None,
150
+ conversationally: bool = False,
151
+ ) -> str | Generator[str, None, None]:
152
+ """Generate response `str`
153
+ Args:
154
+ prompt (str): Prompt to be send.
155
+ stream (bool, optional): Flag for streaming response. Defaults to False.
156
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
157
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
158
+ Returns:
159
+ str: Response generated
160
+ """
161
+
162
+ def for_stream():
163
+ for response in self.ask(
164
+ prompt, True, optimizer=optimizer, conversationally=conversationally
165
+ ):
166
+ yield self.get_message(response)
167
+
168
+ def for_non_stream():
169
+ return self.get_message(
170
+ self.ask(
171
+ prompt,
172
+ False,
173
+ optimizer=optimizer,
174
+ conversationally=conversationally,
175
+ )
176
+ )
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def get_message(self, response: Dict[str, Any]) -> str:
181
+ """Retrieves message only from response.
182
+
183
+ Args:
184
+ response (dict): Response generated by `self.ask`
185
+
186
+ Returns:
187
+ str: Message extracted.
188
+ """
189
+ assert isinstance(response, dict), "Response should be of dict data-type only"
190
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
191
+
192
+ if __name__ == "__main__":
193
+ t = Talkai()
194
+ resp = t.chat("write me about AI", stream=True)
195
+ for chunk in resp:
196
+ print(chunk, end="", flush=True)