webscout 4.8__py3-none-any.whl → 5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,286 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+ import cloudscraper
31
+
32
+ class Cloudflare(Provider):
33
+ """
34
+ This class provides methods for interacting with the Playground AI API
35
+ (Cloudflare) in a consistent provider structure for webscout.
36
+ """
37
+
38
+ AVAILABLE_MODELS = [
39
+ "@cf/llava-hf/llava-1.5-7b-hf",
40
+ "@cf/unum/uform-gen2-qwen-500m",
41
+ "@cf/facebook/detr-resnet-50",
42
+ "@cf/facebook/bart-large-cnn",
43
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq",
44
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
45
+ "@cf/deepseek-ai/deepseek-math-7b-base",
46
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
47
+ "@cf/thebloke/discolm-german-7b-v1-awq",
48
+ "@cf/tiiuae/falcon-7b-instruct",
49
+ "@cf/google/gemma-2b-it-lora",
50
+ "@hf/google/gemma-7b-it",
51
+ "@cf/google/gemma-7b-it-lora",
52
+ "@hf/nousresearch/hermes-2-pro-mistral-7b",
53
+ "@hf/thebloke/llama-2-13b-chat-awq",
54
+ "@cf/meta-llama/llama-2-7b-chat-hf-lora",
55
+ "@cf/meta/llama-3-8b-instruct",
56
+ "@cf/meta/llama-3-8b-instruct-awq",
57
+ "@cf/meta/llama-3.1-8b-instruct",
58
+ "@hf/thebloke/llamaguard-7b-awq",
59
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
60
+ "@hf/mistral/mistral-7b-instruct-v0.2",
61
+ "@cf/mistral/mistral-7b-instruct-v0.2-lora",
62
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
63
+ "@cf/openchat/openchat-3.5-0106",
64
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
65
+ "@cf/microsoft/phi-2",
66
+ "@cf/qwen/qwen1.5-0.5b-chat",
67
+ "@cf/qwen/qwen1.5-1.8b-chat",
68
+ "@cf/qwen/qwen1.5-14b-chat-awq",
69
+ "@cf/qwen/qwen1.5-7b-chat-awq",
70
+ "@cf/defog/sqlcoder-7b-2",
71
+ "@hf/nexusflow/starling-lm-7b-beta",
72
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
73
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
74
+ "@hf/thebloke/zephyr-7b-beta-awq"
75
+ ]
76
+
77
+ def __init__(
78
+ self,
79
+ is_conversation: bool = True,
80
+ max_tokens: int = 600,
81
+ timeout: int = 30,
82
+ intro: str = None,
83
+ filepath: str = None,
84
+ update_file: bool = True,
85
+ proxies: dict = {},
86
+ history_offset: int = 10250,
87
+ act: str = None,
88
+ model: str = "@cf/meta/llama-3.1-8b-instruct",
89
+ system_prompt: str = "You are a helpful assistant."
90
+ ):
91
+ """Instantiates Cloudflare
92
+
93
+ Args:
94
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
95
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
96
+ timeout (int, optional): Http request timeout. Defaults to 30.
97
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
98
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
99
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
100
+ proxies (dict, optional): Http request proxies. Defaults to {}.
101
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
102
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
103
+ model (str, optional): Model to use for generating text.
104
+ Defaults to "@cf/meta/llama-3.1-8b-instruct".
105
+ Choose from AVAILABLE_MODELS.
106
+ system_prompt (str, optional): System prompt for Cloudflare.
107
+ Defaults to "You are a helpful assistant.".
108
+ """
109
+ if model not in self.AVAILABLE_MODELS:
110
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
111
+
112
+ self.scraper = cloudscraper.create_scraper()
113
+ self.is_conversation = is_conversation
114
+ self.max_tokens_to_sample = max_tokens
115
+ self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
116
+ self.stream_chunk_size = 64
117
+ self.timeout = timeout
118
+ self.last_response = {}
119
+ self.model = model
120
+ self.system_prompt = system_prompt
121
+ self.headers = {
122
+ 'Accept': 'text/event-stream',
123
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
124
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
125
+ 'Content-Type': 'application/json',
126
+ 'DNT': '1',
127
+ 'Origin': 'https://playground.ai.cloudflare.com',
128
+ 'Referer': 'https://playground.ai.cloudflare.com/',
129
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
130
+ 'Sec-CH-UA-Mobile': '?0',
131
+ 'Sec-CH-UA-Platform': '"Windows"',
132
+ 'Sec-Fetch-Dest': 'empty',
133
+ 'Sec-Fetch-Mode': 'cors',
134
+ 'Sec-Fetch-Site': 'same-origin',
135
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
136
+ }
137
+
138
+ self.cookies = {
139
+ 'cfzs_amplitude': uuid4().hex,
140
+ 'cfz_amplitude': uuid4().hex,
141
+ '__cf_bm': uuid4().hex,
142
+ }
143
+
144
+ self.__available_optimizers = (
145
+ method
146
+ for method in dir(Optimizers)
147
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
148
+ )
149
+ # FIX: Initialize the session here
150
+ self.session = cloudscraper.create_scraper()
151
+ self.session.headers.update(self.headers)
152
+ Conversation.intro = (
153
+ AwesomePrompts().get_act(
154
+ act, raise_not_found=True, default=None, case_insensitive=True
155
+ )
156
+ if act
157
+ else intro or Conversation.intro
158
+ )
159
+ self.conversation = Conversation(
160
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
161
+ )
162
+ self.conversation.history_offset = history_offset
163
+ self.session.proxies = proxies
164
+
165
+ def ask(
166
+ self,
167
+ prompt: str,
168
+ stream: bool = False,
169
+ raw: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> dict:
173
+ """Chat with AI
174
+
175
+ Args:
176
+ prompt (str): Prompt to be send.
177
+ stream (bool, optional): Whether to stream the response. Defaults to False.
178
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
179
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
180
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
181
+
182
+ Returns:
183
+ The response from the API.
184
+ """
185
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
186
+ if optimizer:
187
+ if optimizer in self.__available_optimizers:
188
+ conversation_prompt = getattr(Optimizers, optimizer)(
189
+ conversation_prompt if conversationally else prompt
190
+ )
191
+ else:
192
+ raise Exception(
193
+ f"Optimizer is not one of {self.__available_optimizers}"
194
+ )
195
+
196
+ payload = {
197
+ "messages": [
198
+ {"role": "system", "content": self.system_prompt},
199
+ {"role": "user", "content": conversation_prompt}
200
+ ],
201
+ "lora": None,
202
+ "model": self.model,
203
+ "max_tokens": 512,
204
+ "stream": True
205
+ }
206
+
207
+ def for_stream():
208
+ response = self.scraper.post(
209
+ self.chat_endpoint, headers=self.headers, cookies=self.cookies, data=json.dumps(payload), stream=True, timeout=self.timeout
210
+ )
211
+
212
+ if not response.ok:
213
+ raise exceptions.FailedToGenerateResponseError(
214
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
215
+ )
216
+ streaming_response = ""
217
+ for line in response.iter_lines(decode_unicode=True):
218
+ if line.startswith('data: ') and line != 'data: [DONE]':
219
+ data = json.loads(line[6:])
220
+ content = data.get('response', '')
221
+ streaming_response += content
222
+ yield content if raw else dict(text=streaming_response)
223
+ self.last_response.update(dict(text=streaming_response))
224
+ self.conversation.update_chat_history(
225
+ prompt, self.get_message(self.last_response)
226
+ )
227
+
228
+ def for_non_stream():
229
+ for _ in for_stream():
230
+ pass
231
+ return self.last_response
232
+
233
+ return for_stream() if stream else for_non_stream()
234
+
235
+ def chat(
236
+ self,
237
+ prompt: str,
238
+ stream: bool = False,
239
+ optimizer: str = None,
240
+ conversationally: bool = False,
241
+ ) -> str:
242
+ """Generate response `str`
243
+ Args:
244
+ prompt (str): Prompt to be send.
245
+ stream (bool, optional): Flag for streaming response. Defaults to False.
246
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
247
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
248
+ Returns:
249
+ str: Response generated
250
+ """
251
+
252
+ def for_stream():
253
+ for response in self.ask(
254
+ prompt, True, optimizer=optimizer, conversationally=conversationally
255
+ ):
256
+ yield self.get_message(response)
257
+
258
+ def for_non_stream():
259
+ return self.get_message(
260
+ self.ask(
261
+ prompt,
262
+ False,
263
+ optimizer=optimizer,
264
+ conversationally=conversationally,
265
+ )
266
+ )
267
+
268
+ return for_stream() if stream else for_non_stream()
269
+
270
+ def get_message(self, response: dict) -> str:
271
+ """Retrieves message only from response
272
+
273
+ Args:
274
+ response (dict): Response generated by `self.ask`
275
+
276
+ Returns:
277
+ str: Message extracted
278
+ """
279
+ assert isinstance(response, dict), "Response should be of dict data-type only"
280
+ return response["text"]
281
+ if __name__ == '__main__':
282
+ from rich import print
283
+ ai = Cloudflare()
284
+ response = ai.chat(input(">>> "))
285
+ for chunk in response:
286
+ print(chunk, end="", flush=True)
@@ -14,7 +14,7 @@ class DiscordRocks(Provider):
14
14
  A class to interact with the DiscordRocks API.
15
15
  """
16
16
 
17
- available_models = [
17
+ AVAILABLE_MODELS = [
18
18
  "claude-3-haiku-20240307",
19
19
  "claude-3-sonnet-20240229",
20
20
  "claude-3-5-sonnet-20240620",
@@ -122,7 +122,7 @@ class DiscordRocks(Provider):
122
122
 
123
123
  def __init__(
124
124
  self,
125
- model: str = "gpt-4o",
125
+ model: str = "llama-3.1-405b-turbo",
126
126
  max_tokens: int = 4096,
127
127
  temperature: float = 1,
128
128
  top_p: float = 1,
@@ -155,8 +155,8 @@ class DiscordRocks(Provider):
155
155
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
156
156
  system_prompt (str, optional): System prompt to guide the AI's behavior. Defaults to None.
157
157
  """
158
- if model not in self.available_models:
159
- raise ValueError(f"Invalid model name. Available models are: {self.available_models}")
158
+ if model not in self.AVAILABLE_MODELS:
159
+ raise ValueError(f"Invalid model name. Available models are: {self.AVAILABLE_MODELS}")
160
160
 
161
161
 
162
162
  self.model = model
@@ -176,6 +176,7 @@ class DiscordRocks(Provider):
176
176
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
177
177
  "content-type": "application/json",
178
178
  "dnt": "1",
179
+ "authorization": "Bearer missing api key",
179
180
  "origin": "https://llmplayground.net",
180
181
  "priority": "u=1, i",
181
182
  "referer": "https://llmplayground.net/",
@@ -14,7 +14,7 @@ class Farfalle(Provider):
14
14
  A class to interact with the Farfalle.dev API.
15
15
  """
16
16
 
17
- available_models = [
17
+ AVAILABLE_MODELS = [
18
18
  "gpt-3.5-turbo",
19
19
 
20
20
  ]
@@ -50,8 +50,8 @@ class Farfalle(Provider):
50
50
  model (str, optional): AI model to use. Defaults to "gpt-3.5-turbo".
51
51
  Options: "gpt-3.5-turbo", "gpt-4"
52
52
  """
53
- if model not in self.available_models:
54
- raise ValueError(f"Invalid model: {model}. Choose from: {self.available_models}")
53
+ if model not in self.AVAILABLE_MODELS:
54
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
55
 
56
56
  self.session = requests.Session()
57
57
  self.is_conversation = is_conversation
@@ -7,7 +7,7 @@ from webscout.AIbase import Provider
7
7
 
8
8
  class LLAMA3(Provider):
9
9
 
10
- available_models = ["llama3-70b", "llama3-8b", "llama3-405b"]
10
+ AVAILABLE_MODELS = ["llama3-70b", "llama3-8b", "llama3-405b"]
11
11
 
12
12
  def __init__(
13
13
  self,
@@ -38,8 +38,8 @@ class LLAMA3(Provider):
38
38
  model (str, optional): Snova model name. Defaults to "llama3-70b".
39
39
  system (str, optional): System prompt for Snova. Defaults to "Answer as concisely as possible.".
40
40
  """
41
- if model not in self.available_models:
42
- raise ValueError(f"Invalid model: {model}. Choose from: {self.available_models}")
41
+ if model not in self.AVAILABLE_MODELS:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
43
 
44
44
  self.session = requests.Session()
45
45
  self.is_conversation = is_conversation
@@ -0,0 +1,208 @@
1
+ import cloudscraper
2
+ import json
3
+
4
+ import requests
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+
10
+ class PiAI(Provider):
11
+ def __init__(
12
+ self,
13
+ conversation_id: str,
14
+ is_conversation: bool = True,
15
+ max_tokens: int = 600,
16
+ timeout: int = 30,
17
+ intro: str = None,
18
+ filepath: str = None,
19
+ update_file: bool = True,
20
+ proxies: dict = {},
21
+ history_offset: int = 10250,
22
+ act: str = None,
23
+ ):
24
+ """Instantiates PiAI
25
+
26
+ Args:
27
+ conversation_id (str): The conversation ID for the Pi.ai chat.
28
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
29
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
30
+ timeout (int, optional): Http request timeout. Defaults to 30.
31
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
32
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
33
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
34
+ proxies (dict, optional): Http request proxies. Defaults to {}.
35
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
36
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
37
+ """
38
+ self.conversation_id = conversation_id
39
+ self.scraper = cloudscraper.create_scraper()
40
+ self.url = 'https://pi.ai/api/chat'
41
+ self.headers = {
42
+ 'Accept': 'text/event-stream',
43
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
44
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
45
+ 'Content-Type': 'application/json',
46
+ 'DNT': '1',
47
+ 'Origin': 'https://pi.ai',
48
+ 'Referer': 'https://pi.ai/talk',
49
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
50
+ 'Sec-CH-UA-Mobile': '?0',
51
+ 'Sec-CH-UA-Platform': '"Windows"',
52
+ 'Sec-Fetch-Dest': 'empty',
53
+ 'Sec-Fetch-Mode': 'cors',
54
+ 'Sec-Fetch-Site': 'same-origin',
55
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
56
+ 'X-Api-Version': '3'
57
+ }
58
+ self.cookies = {
59
+ '__Host-session': 'Ca5SoyAMJEaaB79jj1T69',
60
+ '__cf_bm': 'g07oaL0jcstNfKDyZv7_YFjN0jnuBZjbMiXOWhy7V7A-1723536536-1.0.1.1-xwukd03L7oIAUqPG.OHbFNatDdHGZ28mRGsbsqfjBlpuy.b8w6UZIk8F3knMhhtNzwo4JQhBVdtYOlG0MvAw8A'
61
+ }
62
+
63
+ self.session = requests.Session()
64
+ self.is_conversation = is_conversation
65
+ self.max_tokens_to_sample = max_tokens
66
+ self.stream_chunk_size = 64
67
+ self.timeout = timeout
68
+ self.last_response = {}
69
+
70
+ self.__available_optimizers = (
71
+ method
72
+ for method in dir(Optimizers)
73
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
74
+ )
75
+ self.session.headers.update(self.headers)
76
+ Conversation.intro = (
77
+ AwesomePrompts().get_act(
78
+ act, raise_not_found=True, default=None, case_insensitive=True
79
+ )
80
+ if act
81
+ else intro or Conversation.intro
82
+ )
83
+ self.conversation = Conversation(
84
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
85
+ )
86
+ self.conversation.history_offset = history_offset
87
+ self.session.proxies = proxies
88
+
89
+ def ask(
90
+ self,
91
+ prompt: str,
92
+ stream: bool = False,
93
+ raw: bool = False,
94
+ optimizer: str = None,
95
+ conversationally: bool = False,
96
+ ) -> dict:
97
+ """Chat with AI
98
+
99
+ Args:
100
+ prompt (str): Prompt to be send.
101
+ stream (bool, optional): Flag for streaming response. Defaults to False.
102
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
103
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
+ Returns:
106
+ dict : {}
107
+ ```json
108
+ {
109
+ "text" : "How may I assist you today?"
110
+ }
111
+ ```
112
+ """
113
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114
+ if optimizer:
115
+ if optimizer in self.__available_optimizers:
116
+ conversation_prompt = getattr(Optimizers, optimizer)(
117
+ conversation_prompt if conversationally else prompt
118
+ )
119
+ else:
120
+ raise Exception(
121
+ f"Optimizer is not one of {self.__available_optimizers}"
122
+ )
123
+
124
+ data = {
125
+ 'text': conversation_prompt,
126
+ 'conversation': self.conversation_id
127
+ }
128
+
129
+ def for_stream():
130
+ response = self.scraper.post(self.url, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout)
131
+
132
+ streaming_text = ""
133
+ for line in response.iter_lines(decode_unicode=True):
134
+ if line.startswith("data: "):
135
+ json_data = line[6:]
136
+ try:
137
+ parsed_data = json.loads(json_data)
138
+ if 'text' in parsed_data:
139
+ streaming_text += parsed_data['text']
140
+ resp = dict(text=streaming_text)
141
+ self.last_response.update(resp)
142
+ yield parsed_data if raw else resp
143
+ except json.JSONDecodeError:
144
+ continue
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+
149
+ def for_non_stream():
150
+ for _ in for_stream():
151
+ pass
152
+ return self.last_response
153
+
154
+ return for_stream() if stream else for_non_stream()
155
+
156
+ def chat(
157
+ self,
158
+ prompt: str,
159
+ stream: bool = False,
160
+ optimizer: str = None,
161
+ conversationally: bool = False,
162
+ ) -> str:
163
+ """Generate response `str`
164
+ Args:
165
+ prompt (str): Prompt to be send.
166
+ stream (bool, optional): Flag for streaming response. Defaults to False.
167
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
168
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
169
+ Returns:
170
+ str: Response generated
171
+ """
172
+
173
+ def for_stream():
174
+ for response in self.ask(
175
+ prompt, True, optimizer=optimizer, conversationally=conversationally
176
+ ):
177
+ yield self.get_message(response).encode('utf-8').decode('utf-8')
178
+
179
+ def for_non_stream():
180
+ return self.get_message(
181
+ self.ask(
182
+ prompt,
183
+ False,
184
+ optimizer=optimizer,
185
+ conversationally=conversationally,
186
+ )
187
+ ).encode('utf-8').decode('utf-8')
188
+
189
+ return for_stream() if stream else for_non_stream()
190
+
191
+ def get_message(self, response: dict) -> str:
192
+ """Retrieves message only from response
193
+
194
+ Args:
195
+ response (dict): Response generated by `self.ask`
196
+
197
+ Returns:
198
+ str: Message extracted
199
+ """
200
+ assert isinstance(response, dict), "Response should be of dict data-type only"
201
+ return response["text"]
202
+
203
+ if __name__ == '__main__':
204
+ from rich import print
205
+ ai = PiAI(conversation_id="6kti6HPbUKKWUAEpeD7vQ")
206
+ response = ai.chat(input(">>> "))
207
+ for chunk in response:
208
+ print(chunk, end="", flush=True)