webscout 6.0__py3-none-any.whl → 6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+
13
+ class NinjaChat(Provider):
14
+ """
15
+ A class to interact with the NinjaChat API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = {
19
+ "mistral": "https://www.ninjachat.ai/api/mistral",
20
+ "perplexity": "https://www.ninjachat.ai/api/perplexity",
21
+ "claude-3.5": "https://www.ninjachat.ai/api/claude-pro",
22
+ "gemini-1.5-pro": "https://www.ninjachat.ai/api/gemini",
23
+ "llama": "https://www.ninjachat.ai/api/llama-pro",
24
+ "o1-mini": "https://www.ninjachat.ai/api/o1-mini",
25
+ }
26
+
27
+ def __init__(
28
+ self,
29
+ is_conversation: bool = True,
30
+ max_tokens: int = 2049,
31
+ timeout: int = 30,
32
+ intro: str = None, # System message/intro prompt
33
+ filepath: str = None,
34
+ update_file: bool = True,
35
+ proxies: dict = {},
36
+ history_offset: int = 10250,
37
+ act: str = None,
38
+ model: str = "perplexity", # Default model
39
+ system_message: str = "You are a helpful AI assistant.", # Default system message
40
+ ):
41
+ """Initializes the NinjaChat API client."""
42
+
43
+ self.headers = {
44
+ "Accept": "*/*",
45
+ "Accept-Encoding": "gzip, deflate, br, zstd",
46
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
47
+ "Content-Type": "application/json",
48
+ "Cookie": "_ga=GA1.1.298084589.1727859540; _ga_11N4NZX9WP=GS1.1.1727859539.1.0.1727859552.0.0.0; __stripe_mid=4f63db68-c41d-45b4-9111-2457a6cf1b538696a9; __Host-next-auth.csrf-token=a5cb5a40c73df3e808ebc072dcb116fe7dd4b9b8d39d8002ef7e54153e6aa665%7Cbffe3f934f2db43330d281453af2cd0b4757f439b958f2d1a06a36cea63e9cc8; __stripe_sid=118678d1-403a-43f9-b3b9-d80ed9392a0d2ac131; __Secure-next-auth.callback-url=https%3A%2F%2Fwww.ninjachat.ai%2Fdashboard; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..l34CIFGTJCtstUqU.VjEYgaUUPpgp-49wueXFlFYvbm8csuyX0HichHrPNH45nX4s_LeZX2VhK1ZvwmUpfdlsMD4bi8VzFfQUEgs8FLPhkbKnoZDP939vobV7K_2Q9CA8PgC0oXEsQf_azWmILZ8rOE37uYzTu1evCnOjCucDYrC1ONXzl9NbGNPVa8AQr7hXvatuqtqe-lBUQXWdrw3QLulbqxvh6yLoxJj04gqC-nPudGciU-_-3TZJYr98u8o7KtUGio1ZX9vHNFfv8djWM1NCkji3Kl9eUhiyMj71.6uhUS39UcCVRa6tFzHxz2g; ph_phc_wWUtqcGWqyyochfPvwKlXMkMjIoIQKUwcnHE3KMKm8K_posthog=%7B%22distinct_id%22%3A%2201924c74-2926-7042-a1fb-5b5debdbcd1c%22%2C%22%24sesid%22%3A%5B1727966419499%2C%22019252bb-9de4-75db-9f85-a389fb401670%22%2C1727964880355%5D%7D",
49
+ "DNT": "1",
50
+ "Origin": "https://www.ninjachat.ai",
51
+ "Priority": "u=1, i",
52
+ "Referer": "https://www.ninjachat.ai/dashboard",
53
+ "Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
54
+ "Sec-CH-UA-Mobile": "?0",
55
+ "Sec-CH-UA-Platform": '"Windows"',
56
+ "Sec-Fetch-Dest": "empty",
57
+ "Sec-Fetch-Mode": "cors",
58
+ "Sec-Fetch-Site": "same-origin",
59
+ "User-Agent": (
60
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
61
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
62
+ "Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
63
+ )
64
+ }
65
+ self.session = requests.Session()
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+ self.timeout = timeout
69
+ self.last_response = {}
70
+ self.system_message = system_message
71
+
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+
80
+ #Set the intro/system message
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or system_message or Conversation.intro #Priority: act > intro > system_message > Conversation.intro
87
+
88
+ )
89
+
90
+
91
+ self.conversation = Conversation(
92
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+
96
+ if model not in self.AVAILABLE_MODELS:
97
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
98
+ self.model_url = self.AVAILABLE_MODELS[model]
99
+ self.headers["Referer"] = self.model_url # Set initial referer
100
+
101
+
102
+
103
+ def ask(
104
+ self,
105
+ prompt: str,
106
+ stream: bool = False,
107
+ raw: bool = False,
108
+ optimizer: str = None,
109
+ conversationally: bool = False,
110
+ ) -> Union[Dict, Generator]:
111
+
112
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt, intro=Conversation.intro)
113
+
114
+ if optimizer:
115
+ if optimizer in self.__available_optimizers:
116
+ conversation_prompt = getattr(Optimizers, optimizer)(
117
+ conversation_prompt if conversationally else prompt
118
+ )
119
+ else:
120
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
121
+
122
+ #Include the system message in the payload
123
+ payload = {
124
+ "messages": [
125
+ {"role": "system", "content": self.system_message}, # System message here
126
+ {"role": "user", "content": conversation_prompt},
127
+ ],
128
+ "stream": stream # Now passed dynamically
129
+ }
130
+
131
+ def for_stream():
132
+ try:
133
+ with requests.post(self.model_url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
134
+ response.raise_for_status()
135
+ streaming_text = ""
136
+ for line in response.iter_lines(decode_unicode=True):
137
+ if line:
138
+ if line.startswith("0:"):
139
+ try:
140
+ text = json.loads(line[2:]) # Extract streaming text
141
+ streaming_text += text #Accumulate for history
142
+ resp = dict(text=text)
143
+ yield resp if raw else resp
144
+ except json.JSONDecodeError:
145
+ print("\n[Error] Failed to decode JSON content.")
146
+
147
+ elif line.startswith("d:"):
148
+ break #End of stream
149
+ self.conversation.update_chat_history(prompt, streaming_text)
150
+ self.last_response.update({"text": streaming_text})
151
+ except requests.exceptions.RequestException as e:
152
+ print("An error occurred:", e)
153
+
154
+
155
+
156
+ def for_non_stream():
157
+
158
+ for _ in for_stream():
159
+ pass
160
+ return self.last_response
161
+
162
+
163
+ return for_stream() if stream else for_non_stream()
164
+
165
+ def chat(
166
+ self,
167
+ prompt: str,
168
+ stream: bool = False,
169
+ optimizer: str = None,
170
+ conversationally: bool = False,
171
+ ) -> Union[str, Generator]:
172
+
173
+ def for_stream():
174
+ for response in self.ask(
175
+ prompt, True, optimizer=optimizer, conversationally=conversationally
176
+ ):
177
+ yield self.get_message(response)
178
+
179
+ def for_non_stream():
180
+ return self.get_message(
181
+ self.ask(
182
+ prompt, False, optimizer=optimizer, conversationally=conversationally
183
+ )
184
+ )
185
+ return for_stream() if stream else for_non_stream()
186
+
187
+ def get_message(self, response: dict) -> str:
188
+ assert isinstance(response, dict), "Response should be of dict data-type only"
189
+ return response["text"]
190
+
191
+
192
+
193
+ if __name__ == "__main__":
194
+ from rich import print
195
+ bot = NinjaChat(model="gemini-1.5-pro", system_message="You are a creative writer.")
196
+
197
+ response = bot.chat("write a poem about a robot learning to love", stream=True)
198
+
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -11,7 +11,7 @@ class NexraImager(ImageProvider):
11
11
  """Image provider for Nexra API"""
12
12
 
13
13
  AVAILABLE_MODELS = {
14
- "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini"],
14
+ "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini", "flux", "midjourney"],
15
15
  "prodia": [
16
16
  "dreamshaperXL10_alpha2.safetensors [c8afe2ef]",
17
17
  "dynavisionXL_0411.safetensors [c39cc051]",
@@ -37,7 +37,7 @@ class NexraImager(ImageProvider):
37
37
  self.image_extension: str = "png"
38
38
 
39
39
  def generate(
40
- self, prompt: str, model: str = "emi", amount: int = 1,
40
+ self, prompt: str, model: str = "flux", amount: int = 1,
41
41
  max_retries: int = 3, retry_delay: int = 5,
42
42
  additional_params: Optional[dict] = None
43
43
  ) -> List[bytes]:
@@ -116,5 +116,5 @@ class NexraImager(ImageProvider):
116
116
 
117
117
  if __name__ == "__main__":
118
118
  bot = NexraImager()
119
- resp_standard = bot.generate("AI-generated image - webscout", "emi", 1)
119
+ resp_standard = bot.generate("AI-generated image - webscout", "midjourney", 1)
120
120
  print(bot.save(resp_standard))
@@ -6,4 +6,5 @@ from .Nexra import *
6
6
  from .huggingface import *
7
7
  from .artbit import *
8
8
  from .amigo import *
9
- from .WebSimAI import *
9
+ from .WebSimAI import *
10
+ from .imgninza import *
@@ -8,7 +8,7 @@ from requests.exceptions import RequestException
8
8
 
9
9
  from webscout.AIbase import ImageProvider
10
10
 
11
- class AiForceimagger(ImageProvider):
11
+ class AiForceimager(ImageProvider):
12
12
  """Image provider for Airforce API"""
13
13
 
14
14
  AVAILABLE_MODELS = [
@@ -152,7 +152,7 @@ class AiForceimagger(ImageProvider):
152
152
  return filenames
153
153
 
154
154
  if __name__ == "__main__":
155
- bot = AiForceimagger()
155
+ bot = AiForceimager()
156
156
  try:
157
157
  resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1, model="flux-realism")
158
158
  print(bot.save(resp))
@@ -0,0 +1,136 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import List, Dict, Optional
5
+
6
+ from webscout.AIbase import ImageProvider
7
+ from webscout import exceptions # Import exceptions module
8
+
9
+
10
+ class NinjaImager(ImageProvider):
11
+ """
12
+ Image provider for NinjaChat.ai.
13
+ """
14
+
15
+ AVAILABLE_MODELS = {
16
+ "stable-diffusion": "https://www.ninjachat.ai/api/image-generator",
17
+ "flux-dev": "https://www.ninjachat.ai/api/flux-image-generator",
18
+ }
19
+
20
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
21
+ """Initializes the NinjaChatImager class."""
22
+ self.headers = {
23
+ "Accept": "*/*",
24
+ "Accept-Encoding": "gzip, deflate, br, zstd",
25
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
26
+ "Content-Type": "application/json",
27
+ "DNT": "1",
28
+ "Origin": "https://www.ninjachat.ai",
29
+ "Priority": "u=1, i",
30
+ "Referer": "https://www.ninjachat.ai/image-generation",
31
+ "Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
32
+ "Sec-CH-UA-Mobile": "?0",
33
+ "Sec-CH-UA-Platform": '"Windows"',
34
+ "Sec-Fetch-Dest": "empty",
35
+ "Sec-Fetch-Mode": "cors",
36
+ "Sec-Fetch-Site": "same-origin",
37
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
38
+ }
39
+ self.session = requests.Session()
40
+ self.session.headers.update(self.headers)
41
+ self.session.proxies.update(proxies)
42
+ self.timeout = timeout
43
+ self.prompt = "AI-generated image - webscout"
44
+ self.image_extension = "png" # Default extension
45
+
46
+ def generate(self, prompt: str, amount: int = 1, model: str = "flux-dev") -> List[str]:
47
+ """Generate images from a prompt."""
48
+
49
+ assert bool(prompt), "Prompt cannot be null"
50
+ assert isinstance(amount, int) and amount > 0, "Amount should be a positive integer"
51
+
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise exceptions.ModelNotFoundError(f"Model '{model}' not found. Available models: {', '.join(self.AVAILABLE_MODELS)}")
54
+
55
+ self.prompt = prompt # Store the prompt
56
+ url = self.AVAILABLE_MODELS[model]
57
+
58
+ payload = {
59
+ "prompt": prompt,
60
+ "model": model if model == "flux-dev" else "stable-diffusion", # Pass model name to API if needed
61
+ "negativePrompt": "", #Use negative prompt from API's data structure
62
+ "cfg": 7,
63
+ "aspectRatio": "1:1",
64
+ "outputFormat": self.image_extension,
65
+ "numOutputs": amount,
66
+ "outputQuality": 90
67
+ }
68
+
69
+
70
+ image_urls = []
71
+ try:
72
+ with requests.post(url, headers=self.headers, json=payload, timeout=self.timeout) as response:
73
+ if response.status_code != 200:
74
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code: {response.status_code}, {response.text}") # Raise Webscout exception
75
+
76
+ data = response.json()
77
+
78
+ if 'output' not in data:
79
+ raise exceptions.InvalidResponseError("Invalid API response format: 'output' key missing.")
80
+
81
+ for img_url in data['output']:
82
+ image_urls.append(img_url)
83
+
84
+ except requests.exceptions.RequestException as e:
85
+ raise exceptions.APIConnectionError(f"An error occurred during the request: {e}") # More specific exception
86
+ except json.JSONDecodeError as e:
87
+ raise exceptions.InvalidResponseError(f"Failed to parse JSON response: {e}")
88
+
89
+ return image_urls
90
+
91
+
92
+ def save(
93
+ self,
94
+ response: List[str],
95
+ name: str = None,
96
+ dir: str = os.getcwd(),
97
+ filenames_prefix: str = "",
98
+ ) -> List[str]:
99
+ """Saves generated images."""
100
+
101
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
102
+ name = self.prompt if name is None else name
103
+
104
+ filenames = []
105
+ count = 0
106
+ for img_url in response:
107
+ def complete_path():
108
+ count_value = "" if count == 0 else f"_{count}"
109
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
110
+
111
+ while os.path.isfile(complete_path()):
112
+ count += 1
113
+
114
+ absolute_path_to_file = complete_path()
115
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
116
+ try:
117
+ with requests.get(img_url, stream=True, timeout=self.timeout) as img_response:
118
+ img_response.raise_for_status()
119
+ with open(absolute_path_to_file, "wb") as f:
120
+ for chunk in img_response.iter_content(chunk_size=8192):
121
+ f.write(chunk)
122
+
123
+ except requests.exceptions.RequestException as e:
124
+ raise exceptions.FailedToSaveImageError(f"An error occurred while downloading/saving image: {e}")
125
+
126
+ return filenames
127
+
128
+
129
+
130
+ if __name__ == "__main__":
131
+ bot = NinjaImager()
132
+ try:
133
+ resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
134
+ print(bot.save(resp))
135
+ except Exception as e:
136
+ print(f"An error occurred: {e}")
@@ -224,6 +224,6 @@ class YouChat(Provider):
224
224
  if __name__ == '__main__':
225
225
  from rich import print
226
226
  ai = YouChat(timeout=5000)
227
- response = ai.chat("write a poem about AI", stream=True)
227
+ response = ai.chat("Who is Abhay Koul in AI?", stream=True)
228
228
  for chunk in response:
229
229
  print(chunk, end="", flush=True)
@@ -62,6 +62,10 @@ from .Amigo import *
62
62
  from .prefind import *
63
63
  from .bagoodex import *
64
64
  from .ChatHub import *
65
+ from .aimathgpt import *
66
+ from .gaurish import *
67
+ from .geminiprorealtime import *
68
+ from .NinjaChat import *
65
69
  __all__ = [
66
70
  'Farfalle',
67
71
  'LLAMA',
@@ -126,7 +130,10 @@ __all__ = [
126
130
  'PrefindAI',
127
131
  'Bagoodex',
128
132
  'ChatHub',
129
- # 'LearnFast',
133
+ 'AIMathGPT',
134
+ 'GaurishCerebras',
135
+ 'GeminiPro',
136
+ 'NinjaChat',
130
137
 
131
138
 
132
139
  ]
@@ -0,0 +1,193 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+
13
+ class AIMathGPT(Provider):
14
+ """
15
+ A class to interact with the AIMathGPT API.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ is_conversation: bool = True,
21
+ max_tokens: int = 2049,
22
+ timeout: int = 30,
23
+ intro: str = None,
24
+ filepath: str = None,
25
+ update_file: bool = True,
26
+ proxies: dict = {},
27
+ history_offset: int = 10250,
28
+ act: str = None,
29
+ model: str = "llama3", # Default model
30
+ system_prompt: str = "You are a helpful AI assistant.",
31
+ ):
32
+ """
33
+ Initializes the AIMathGPT API with the given parameters.
34
+ """
35
+ self.url = "https://aimathgpt.forit.ai/api/ai"
36
+ self.headers = {
37
+ "authority": "aimathgpt.forit.ai",
38
+ "method": "POST",
39
+ "path": "/api/ai",
40
+ "scheme": "https",
41
+ "accept": "*/*",
42
+ "accept-encoding": "gzip, deflate, br, zstd",
43
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
44
+ "content-type": "application/json",
45
+ "cookie": (
46
+ "NEXT_LOCALE=en; _ga=GA1.1.1515823701.1726936796; "
47
+ "_ga_1F3ZVN96B1=GS1.1.1726936795.1.1.1726936833.0.0.0"
48
+ ),
49
+ "dnt": "1",
50
+ "origin": "https://aimathgpt.forit.ai",
51
+ "priority": "u=1, i",
52
+ "referer": "https://aimathgpt.forit.ai/?ref=taaft&utm_source=taaft&utm_medium=referral",
53
+ "sec-ch-ua": (
54
+ "\"Microsoft Edge\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\""
55
+ ),
56
+ "sec-ch-ua-mobile": "?0",
57
+ "sec-ch-ua-platform": "\"Windows\"",
58
+ "sec-fetch-dest": "empty",
59
+ "sec-fetch-mode": "cors",
60
+ "sec-fetch-site": "same-origin",
61
+ "user-agent": (
62
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
63
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
64
+ "Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
65
+ ),
66
+ }
67
+ self.session = requests.Session()
68
+ self.session.headers.update(self.headers)
69
+ self.session.proxies.update(proxies)
70
+ self.timeout = timeout
71
+ self.last_response = {}
72
+ self.model = model
73
+ self.system_prompt = system_prompt
74
+ self.is_conversation = is_conversation
75
+ self.max_tokens_to_sample = max_tokens
76
+ self.__available_optimizers = (
77
+ method
78
+ for method in dir(Optimizers)
79
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
80
+ )
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or Conversation.intro
87
+ )
88
+ self.conversation = Conversation(
89
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
90
+ )
91
+ self.conversation.history_offset = history_offset
92
+
93
+ def ask(
94
+ self,
95
+ prompt: str,
96
+ stream: bool = False,
97
+ raw: bool = False,
98
+ optimizer: str = None,
99
+ conversationally: bool = False,
100
+ ) -> Union[Dict, Generator]:
101
+ """Sends a chat completion request to the AIMathGPT API."""
102
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
103
+
104
+ if optimizer:
105
+ if optimizer in self.__available_optimizers:
106
+ conversation_prompt = getattr(Optimizers, optimizer)(
107
+ conversation_prompt if conversationally else prompt
108
+ )
109
+ else:
110
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
111
+
112
+
113
+ payload = {
114
+ "messages": [
115
+ {"role": "system", "content": self.system_prompt},
116
+ {"role": "user", "content": conversation_prompt},
117
+ ],
118
+ "model": self.model,
119
+ }
120
+
121
+
122
+ def for_stream():
123
+ try:
124
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
125
+ if response.status_code != 200:
126
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}: {response.text}")
127
+
128
+ streaming_text = ""
129
+ for line in response.iter_lines(decode_unicode=True):
130
+ if line:
131
+ try:
132
+ data = json.loads(line)
133
+ if 'result' in data and 'response' in data['result']:
134
+ content = data['result']['response']
135
+ streaming_text += content
136
+ resp = dict(text=content) # Yield only the new content
137
+ yield resp if raw else resp
138
+ else:
139
+ pass
140
+ except json.JSONDecodeError:
141
+ pass
142
+ self.conversation.update_chat_history(prompt, streaming_text)
143
+ self.last_response.update({"text": streaming_text})
144
+ except requests.exceptions.RequestException as e:
145
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
146
+
147
+ def for_non_stream():
148
+ for _ in for_stream():
149
+ pass
150
+ return self.last_response
151
+
152
+ return for_stream() if stream else for_non_stream()
153
+
154
+
155
+
156
+ def chat(
157
+ self,
158
+ prompt: str,
159
+ stream: bool = False,
160
+ optimizer: str = None,
161
+ conversationally: bool = False,
162
+ ) -> Union[str, Generator]:
163
+
164
+ def for_stream():
165
+ for response in self.ask(
166
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
167
+ ):
168
+ yield self.get_message(response)
169
+
170
+ def for_non_stream():
171
+ return self.get_message(
172
+ self.ask(
173
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
174
+ )
175
+ )
176
+
177
+ return for_stream() if stream else for_non_stream()
178
+
179
+ def get_message(self, response: dict) -> str:
180
+ """Retrieves message only from response"""
181
+ assert isinstance(response, dict), "Response should be of dict data-type only"
182
+ return response["text"]
183
+
184
+
185
+ if __name__ == "__main__":
186
+ from rich import print
187
+ bot = AIMathGPT()
188
+ try:
189
+ response = bot.chat("What is the capital of France?", stream=True)
190
+ for chunk in response:
191
+ print(chunk, end="", flush=True)
192
+ except Exception as e:
193
+ print(f"An error occurred: {e}")
@@ -175,6 +175,6 @@ class Felo(Provider):
175
175
  if __name__ == '__main__':
176
176
  from rich import print
177
177
  ai = Felo()
178
- response = ai.chat("hi")
178
+ response = ai.chat("tell me about HelpingAI-9B", stream=True)
179
179
  for chunk in response:
180
180
  print(chunk, end="", flush=True)