webscout 5.5__py3-none-any.whl → 5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. webscout/Agents/Onlinesearcher.py +3 -3
  2. webscout/Agents/__init__.py +0 -1
  3. webscout/Agents/functioncall.py +3 -3
  4. webscout/Provider/Bing.py +243 -0
  5. webscout/Provider/Chatify.py +1 -1
  6. webscout/Provider/Cloudflare.py +1 -1
  7. webscout/Provider/DARKAI.py +1 -1
  8. webscout/Provider/DiscordRocks.py +109 -246
  9. webscout/Provider/Farfalle.py +1 -1
  10. webscout/Provider/Free2GPT.py +234 -0
  11. webscout/{Agents/ai.py → Provider/GPTWeb.py} +40 -33
  12. webscout/Provider/Llama3.py +65 -62
  13. webscout/Provider/OLLAMA.py +1 -1
  14. webscout/Provider/PizzaGPT.py +1 -1
  15. webscout/Provider/RUBIKSAI.py +13 -3
  16. webscout/Provider/TTI/Nexra.py +120 -0
  17. webscout/Provider/TTI/__init__.py +3 -1
  18. webscout/Provider/TTI/blackboximage.py +153 -0
  19. webscout/Provider/TTI/deepinfra.py +2 -2
  20. webscout/Provider/TeachAnything.py +1 -1
  21. webscout/Provider/Youchat.py +1 -1
  22. webscout/Provider/__init__.py +11 -6
  23. webscout/Provider/{NetFly.py → aigames.py} +76 -79
  24. webscout/Provider/cleeai.py +1 -1
  25. webscout/Provider/elmo.py +1 -1
  26. webscout/Provider/felo_search.py +1 -1
  27. webscout/Provider/genspark.py +1 -1
  28. webscout/Provider/julius.py +7 -1
  29. webscout/Provider/lepton.py +1 -1
  30. webscout/Provider/meta.py +1 -1
  31. webscout/Provider/turboseek.py +1 -1
  32. webscout/Provider/upstage.py +230 -0
  33. webscout/Provider/x0gpt.py +1 -1
  34. webscout/Provider/xdash.py +1 -1
  35. webscout/Provider/yep.py +2 -2
  36. webscout/version.py +1 -1
  37. webscout/webai.py +1 -1
  38. {webscout-5.5.dist-info → webscout-5.6.dist-info}/METADATA +5 -29
  39. {webscout-5.5.dist-info → webscout-5.6.dist-info}/RECORD +43 -39
  40. webscout/Provider/ThinkAnyAI.py +0 -219
  41. {webscout-5.5.dist-info → webscout-5.6.dist-info}/LICENSE.md +0 -0
  42. {webscout-5.5.dist-info → webscout-5.6.dist-info}/WHEEL +0 -0
  43. {webscout-5.5.dist-info → webscout-5.6.dist-info}/entry_points.txt +0 -0
  44. {webscout-5.5.dist-info → webscout-5.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,120 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ import time
5
+ from typing import List, Optional
6
+ from requests.exceptions import RequestException
7
+
8
+ from webscout.AIbase import ImageProvider
9
+
10
+ class NexraImager(ImageProvider):
11
+ """Image provider for Nexra API"""
12
+
13
+ AVAILABLE_MODELS = {
14
+ "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini"],
15
+ "prodia": [
16
+ "dreamshaperXL10_alpha2.safetensors [c8afe2ef]",
17
+ "dynavisionXL_0411.safetensors [c39cc051]",
18
+ "juggernautXL_v45.safetensors [e75f5471]",
19
+ "realismEngineSDXL_v10.safetensors [af771c3f]",
20
+ "sd_xl_base_1.0.safetensors [be9edd61]",
21
+ "animagineXLV3_v30.safetensors [75f2f05b]",
22
+ "sd_xl_base_1.0_inpainting_0.1.safetensors [5679a81a]",
23
+ "turbovisionXL_v431.safetensors [78890989]",
24
+ "devlishphotorealism_sdxl15.safetensors [77cba69f]",
25
+ "realvisxlV40.safetensors [f7fdcb51]"
26
+ ]
27
+ }
28
+
29
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
30
+ self.url = "https://nexra.aryahcr.cc/api/image/complements"
31
+ self.headers = {"Content-Type": "application/json"}
32
+ self.session = requests.Session()
33
+ self.session.headers.update(self.headers)
34
+ self.session.proxies.update(proxies)
35
+ self.timeout = timeout
36
+ self.prompt: str = "AI-generated image - webscout"
37
+ self.image_extension: str = "png"
38
+
39
+ def generate(
40
+ self, prompt: str, model: str = "emi", amount: int = 1,
41
+ max_retries: int = 3, retry_delay: int = 5,
42
+ additional_params: Optional[dict] = None
43
+ ) -> List[bytes]:
44
+ assert bool(prompt), "Prompt cannot be null"
45
+ assert isinstance(amount, int) and amount > 0, "Amount should be a positive integer"
46
+
47
+ self.prompt = prompt
48
+ response = []
49
+
50
+ payload = {
51
+ "prompt": prompt,
52
+ "model": "prodia" if model in self.AVAILABLE_MODELS["prodia"] else model,
53
+ }
54
+
55
+ if model in self.AVAILABLE_MODELS["prodia"]:
56
+ payload["data"] = {
57
+ "model": model,
58
+ "steps": 25,
59
+ "cfg_scale": 7,
60
+ "sampler": "DPM++ 2M Karras",
61
+ "negative_prompt": ""
62
+ }
63
+ if additional_params:
64
+ payload.update(additional_params)
65
+
66
+ for _ in range(max_retries):
67
+ try:
68
+ resp = self.session.post(self.url, json=payload, timeout=self.timeout)
69
+ resp.raise_for_status()
70
+
71
+ # Remove leading underscores and then parse JSON
72
+ response_data = json.loads(resp.text.lstrip("_"))
73
+
74
+ if response_data.get("status") and "images" in response_data:
75
+ for image_url in response_data["images"]:
76
+ img_resp = requests.get(image_url)
77
+ img_resp.raise_for_status()
78
+ response.append(img_resp.content)
79
+ break
80
+ else:
81
+ raise Exception("Failed to generate image: " + str(response_data))
82
+ except json.JSONDecodeError as json_err:
83
+ print(f"JSON Decode Error: {json_err}")
84
+ print(f"Raw response: {resp.text}")
85
+ if _ == max_retries - 1:
86
+ raise
87
+ except RequestException as e:
88
+ print(f"Request Exception: {e}")
89
+ if _ == max_retries - 1:
90
+ raise
91
+ print(f"Retrying in {retry_delay} seconds...")
92
+ time.sleep(retry_delay)
93
+
94
+ return response
95
+
96
+ def save(
97
+ self,
98
+ response: List[bytes],
99
+ name: str = None,
100
+ dir: str = os.getcwd(),
101
+ filenames_prefix: str = "",
102
+ ) -> List[str]:
103
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
104
+ name = self.prompt if name is None else name
105
+
106
+ filenames = []
107
+ for i, image in enumerate(response):
108
+ filename = f"{filenames_prefix}{name}_{i}.{self.image_extension}"
109
+ filepath = os.path.join(dir, filename)
110
+
111
+ with open(filepath, "wb") as fh:
112
+ fh.write(image)
113
+ filenames.append(filename)
114
+
115
+ return filenames
116
+
117
+ if __name__ == "__main__":
118
+ bot = NexraImager()
119
+ resp_standard = bot.generate("AI-generated image - webscout", "emi", 1)
120
+ print(bot.save(resp_standard))
@@ -1,3 +1,5 @@
1
1
  from .deepinfra import *
2
2
  from .PollinationsAI import *
3
- from .aiforce import *
3
+ from .aiforce import *
4
+ from .blackboximage import *
5
+ from .Nexra import *
@@ -0,0 +1,153 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ import os
5
+ import time
6
+ from typing import List
7
+ from requests.exceptions import RequestException
8
+
9
+ from webscout.AIbase import ImageProvider
10
+
11
+ class BlackboxAIImager(ImageProvider):
12
+ """Image provider for Blackbox AI"""
13
+
14
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
15
+ """Initializes the BlackboxAIImager class.
16
+
17
+ Args:
18
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
19
+ proxies (dict, optional): HTTP request proxies. Defaults to {}.
20
+ """
21
+ self.url = "https://www.blackbox.ai/api/chat"
22
+ self.headers = {
23
+ "Content-Type": "application/json",
24
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
25
+ "Origin": "https://www.blackbox.ai",
26
+ "Referer": "https://www.blackbox.ai/agent/ImageGenerationLV45LJp"
27
+ }
28
+ self.session = requests.Session()
29
+ self.session.headers.update(self.headers)
30
+ self.session.proxies.update(proxies)
31
+ self.timeout = timeout
32
+ self.prompt: str = "AI-generated image - webscout"
33
+ self.image_extension: str = "jpg"
34
+
35
+ def generate(
36
+ self, prompt: str, amount: int = 1,
37
+ max_retries: int = 3, retry_delay: int = 5
38
+ ) -> List[bytes]:
39
+ """Generate image from prompt
40
+
41
+ Args:
42
+ prompt (str): Image description.
43
+ amount (int): Total images to be generated. Defaults to 1.
44
+ max_retries (int, optional): Maximum number of retry attempts. Defaults to 3.
45
+ retry_delay (int, optional): Delay between retries in seconds. Defaults to 5.
46
+
47
+ Returns:
48
+ List[bytes]: List of generated images as bytes.
49
+ """
50
+ assert bool(prompt), "Prompt cannot be null"
51
+ assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
52
+ assert amount > 0, "Amount should be greater than 0"
53
+
54
+ self.prompt = prompt
55
+ response = []
56
+
57
+ for _ in range(amount):
58
+ message_id = str(uuid.uuid4())
59
+ payload = {
60
+ "messages": [
61
+ {
62
+ "id": message_id,
63
+ "content": prompt,
64
+ "role": "user"
65
+ }
66
+ ],
67
+ "id": message_id,
68
+ "previewToken": None,
69
+ "userId": None,
70
+ "codeModelMode": True,
71
+ "agentMode": {
72
+ "mode": True,
73
+ "id": "ImageGenerationLV45LJp",
74
+ "name": "Image Generation"
75
+ },
76
+ "trendingAgentMode": {},
77
+ "isMicMode": False,
78
+ "maxTokens": 1024,
79
+ "isChromeExt": False,
80
+ "githubToken": None,
81
+ "clickedAnswer2": False,
82
+ "clickedAnswer3": False,
83
+ "clickedForceWebSearch": False,
84
+ "visitFromDelta": False,
85
+ "mobileClient": False
86
+ }
87
+
88
+ for attempt in range(max_retries):
89
+ try:
90
+ resp = self.session.post(self.url, json=payload, timeout=self.timeout)
91
+ resp.raise_for_status()
92
+ response_data = resp.text
93
+ image_url = response_data.split("(")[1].split(")")[0]
94
+ image_response = requests.get(image_url)
95
+ image_response.raise_for_status()
96
+ response.append(image_response.content)
97
+ break
98
+ except RequestException as e:
99
+ if attempt == max_retries - 1:
100
+ print(f"Failed to generate image after {max_retries} attempts: {e}")
101
+ raise
102
+ else:
103
+ print(f"Attempt {attempt + 1} failed. Retrying in {retry_delay} seconds...")
104
+ time.sleep(retry_delay)
105
+
106
+ return response
107
+
108
+ def save(
109
+ self,
110
+ response: List[bytes],
111
+ name: str = None,
112
+ dir: str = os.getcwd(),
113
+ filenames_prefix: str = "",
114
+ ) -> List[str]:
115
+ """Save generated images
116
+
117
+ Args:
118
+ response (List[bytes]): List of generated images as bytes.
119
+ name (str): Filename for the images. Defaults to the last prompt.
120
+ dir (str, optional): Directory for saving images. Defaults to os.getcwd().
121
+ filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
122
+
123
+ Returns:
124
+ List[str]: List of saved filenames.
125
+ """
126
+ assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
127
+ name = self.prompt if name is None else name
128
+
129
+ filenames = []
130
+ count = 0
131
+ for image in response:
132
+ def complete_path():
133
+ count_value = "" if count == 0 else f"_{count}"
134
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
135
+
136
+ while os.path.isfile(complete_path()):
137
+ count += 1
138
+
139
+ absolute_path_to_file = complete_path()
140
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
141
+
142
+ with open(absolute_path_to_file, "wb") as fh:
143
+ fh.write(image)
144
+
145
+ return filenames
146
+
147
+ if __name__ == "__main__":
148
+ bot = BlackboxAIImager()
149
+ try:
150
+ resp = bot.generate("AI-generated image - webscout", 1)
151
+ print(bot.save(resp))
152
+ except Exception as e:
153
+ print(f"An error occurred: {e}")
@@ -13,7 +13,7 @@ class DeepInfraImager(ImageProvider):
13
13
 
14
14
  def __init__(
15
15
  self,
16
- model: str = "black-forest-labs/FLUX-1-dev",
16
+ model: str = "black-forest-labs/FLUX-1-schnell",
17
17
  timeout: int = 60,
18
18
  proxies: dict = {},
19
19
  ):
@@ -21,7 +21,7 @@ class DeepInfraImager(ImageProvider):
21
21
 
22
22
  Args:
23
23
  model (str, optional): The name of the DeepInfra model to use.
24
- Defaults to "black-forest-labs/FLUX-1-dev".
24
+ Defaults to "black-forest-labs/FLUX-1-schnell".
25
25
  timeout (int, optional): Http request timeout. Defaults to 60 seconds.
26
26
  proxies (dict, optional): Http request proxies (socks). Defaults to {}.
27
27
  """
@@ -172,6 +172,6 @@ class TeachAnything:
172
172
  if __name__ == '__main__':
173
173
  from rich import print
174
174
  ai = TeachAnything()
175
- response = ai.chat(input(">>> "))
175
+ response = ai.chat("hi")
176
176
  for chunk in response:
177
177
  print(chunk, end="", flush=True)
@@ -225,6 +225,6 @@ class YouChat(Provider):
225
225
  if __name__ == '__main__':
226
226
  from rich import print
227
227
  ai = YouChat()
228
- response = ai.chat(input(">>> "))
228
+ response = ai.chat("hi")
229
229
  for chunk in response:
230
230
  print(chunk, end="", flush=True)
@@ -1,6 +1,4 @@
1
1
  # webscout/providers/__init__.py
2
-
3
- from .ThinkAnyAI import ThinkAnyAI
4
2
  from .PI import *
5
3
  from .Llama import LLAMA
6
4
  from .Cohere import Cohere
@@ -40,7 +38,7 @@ from .Youchat import *
40
38
  from .yep import *
41
39
  from .Cloudflare import *
42
40
  from .turboseek import *
43
- from .NetFly import *
41
+ from .Free2GPT import *
44
42
  from .EDITEE import *
45
43
  from .TeachAnything import *
46
44
  from .AI21 import *
@@ -51,8 +49,11 @@ from .lepton import *
51
49
  from .geminiapi import *
52
50
  from .elmo import *
53
51
  from .genspark import *
52
+ from .upstage import *
53
+ from .Bing import *
54
+ from .GPTWeb import *
55
+ from .aigames import *
54
56
  __all__ = [
55
- 'ThinkAnyAI',
56
57
  'Farfalle',
57
58
  'LLAMA',
58
59
  'Cohere',
@@ -93,7 +94,6 @@ __all__ = [
93
94
  'YEPCHAT',
94
95
  'Cloudflare',
95
96
  'TurboSeek',
96
- 'NetFly',
97
97
  'Editee',
98
98
  'TeachAnything',
99
99
  'AI21',
@@ -104,6 +104,11 @@ __all__ = [
104
104
  'GEMINIAPI',
105
105
  'Cleeai',
106
106
  'Elmo',
107
- 'Genspark'
107
+ 'Genspark',
108
+ 'Upstage',
109
+ 'Free2GPT',
110
+ 'Bing',
111
+ 'GPTWeb',
112
+ 'AIGameIO',
108
113
 
109
114
  ]
@@ -1,24 +1,17 @@
1
1
  import requests
2
-
3
- from random import randint
4
-
2
+ import uuid
5
3
  import json
6
4
 
7
5
  from webscout.AIutel import Optimizers
8
6
  from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider, AsyncProvider
11
- from webscout import exceptions
12
- from typing import Any, AsyncGenerator, Dict
13
-
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
14
9
 
15
- class NetFly(Provider):
10
+ class AIGameIO(Provider):
16
11
  """
17
- A class to interact with the NetFly API.
12
+ A class to interact with the AI-Game.io API.
18
13
  """
19
14
 
20
- AVAILABLE_MODELS = ["gpt-3.5-turbo"]
21
-
22
15
  def __init__(
23
16
  self,
24
17
  is_conversation: bool = True,
@@ -30,11 +23,10 @@ class NetFly(Provider):
30
23
  proxies: dict = {},
31
24
  history_offset: int = 10250,
32
25
  act: str = None,
33
- model: str = "gpt-3.5-turbo",
34
- system_prompt: str = "You are a helpful and friendly AI assistant.",
26
+ system_prompt: str = "You are a Helpful ai"
35
27
  ):
36
28
  """
37
- Initializes the NetFly API with given parameters.
29
+ Initializes the AI-Game.io API with given parameters.
38
30
 
39
31
  Args:
40
32
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
@@ -46,36 +38,26 @@ class NetFly(Provider):
46
38
  proxies (dict, optional): Http request proxies. Defaults to {}.
47
39
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
48
40
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
49
- model (str, optional): AI model to use for text generation. Defaults to "gpt-3.5-turbo".
50
- system_prompt (str, optional): System prompt for NetFly. Defaults to the provided string.
41
+ system_prompt (str, optional): System prompt for AI-Game.io.
42
+ Defaults to "You are a Helpful ai".
51
43
  """
52
- if model not in self.AVAILABLE_MODELS:
53
- raise ValueError(f"Invalid model: {model}. Available model is: {self.AVAILABLE_MODELS[0]}")
54
-
55
44
  self.session = requests.Session()
56
45
  self.is_conversation = is_conversation
57
46
  self.max_tokens_to_sample = max_tokens
58
- self.api_endpoint = "https://free.netfly.top/api/openai/v1/chat/completions"
47
+ self.api_endpoint = 'https://stream-chat-blmeirpipa-uc.a.run.app/streamChat'
59
48
  self.stream_chunk_size = 64
60
49
  self.timeout = timeout
61
50
  self.last_response = {}
62
- self.model = model
63
51
  self.system_prompt = system_prompt
64
52
  self.headers = {
65
- "accept": "application/json, text/event-stream",
66
- "accept-encoding": "gzip, deflate, br, zstd",
67
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
- "content-type": "application/json",
69
- "dnt": "1",
70
- "origin": "https://free.netfly.top",
71
- "referer": "https://free.netfly.top/",
72
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
73
- "sec-ch-ua-mobile": "?0",
74
- "sec-ch-ua-platform": '"Windows"',
75
- "sec-fetch-dest": "empty",
76
- "sec-fetch-mode": "cors",
77
- "sec-fetch-site": "same-origin",
78
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
53
+ 'authority': 'stream-chat-blmeirpipa-uc.a.run.app',
54
+ 'method': 'POST',
55
+ 'path': '/streamChat',
56
+ 'accept': 'text/event-stream',
57
+ 'content-type': 'application/json',
58
+ 'origin': 'https://www.ai-game.io',
59
+ 'priority': 'u=1, i',
60
+ 'referer': 'https://www.ai-game.io/',
79
61
  }
80
62
 
81
63
  self.__available_optimizers = (
@@ -105,6 +87,22 @@ class NetFly(Provider):
105
87
  optimizer: str = None,
106
88
  conversationally: bool = False,
107
89
  ) -> dict:
90
+ """Chat with AI
91
+
92
+ Args:
93
+ prompt (str): Prompt to be send.
94
+ stream (bool, optional): Flag for streaming response. Defaults to False.
95
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
96
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
97
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
98
+ Returns:
99
+ dict : {}
100
+ ```json
101
+ {
102
+ "text" : "How may I assist you today?"
103
+ }
104
+ ```
105
+ """
108
106
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
109
107
  if optimizer:
110
108
  if optimizer in self.__available_optimizers:
@@ -115,58 +113,46 @@ class NetFly(Provider):
115
113
  raise Exception(
116
114
  f"Optimizer is not one of {self.__available_optimizers}"
117
115
  )
118
-
116
+
119
117
  payload = {
120
- "messages": [
121
- {"role": "system", "content": self.system_prompt},
122
- {"role": "user", "content": conversation_prompt},
123
- ],
124
- "stream": True,
125
- "model": self.model,
126
- "temperature": 0.5,
127
- "presence_penalty": 0,
128
- "frequency_penalty": 0,
129
- "top_p": 1
118
+ "history": [
119
+ {
120
+ "role": "system",
121
+ "content": self.system_prompt
122
+ },
123
+ {
124
+ "role": "user",
125
+ "content": conversation_prompt
126
+ }
127
+ ]
130
128
  }
131
-
132
129
  def for_stream():
133
130
  response = self.session.post(
134
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
131
+ self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout
135
132
  )
136
-
137
133
  if not response.ok:
138
- raise exceptions.FailedToGenerateResponseError(
139
- f"Failed to generate response - ({response.status_code}, {response.reason})"
134
+ raise Exception(
135
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
136
  )
141
137
 
142
- full_response = ""
138
+ full_response = ''
143
139
  for line in response.iter_lines(decode_unicode=True):
144
- if line:
145
- if line.startswith("data: "):
146
- json_data = line[6:]
147
- if json_data == "[DONE]":
148
- break
149
- try:
150
- data = json.loads(json_data)
151
- content = data["choices"][0]["delta"].get("content", "")
152
- full_response += content
153
- yield content if raw else dict(text=content)
154
- except json.decoder.JSONDecodeError:
155
- continue
156
-
140
+ if line.startswith("data: "):
141
+ try:
142
+ event_data = json.loads(line[6:])
143
+ if event_data['event'] == 'text-chunk':
144
+ full_response += event_data['data']['text']
145
+ yield event_data['data']['text'] if raw else dict(text=full_response)
146
+ except json.JSONDecodeError:
147
+ pass
157
148
  self.last_response.update(dict(text=full_response))
158
149
  self.conversation.update_chat_history(
159
150
  prompt, self.get_message(self.last_response)
160
151
  )
161
-
162
152
  def for_non_stream():
163
- full_response = ""
164
- for chunk in for_stream():
165
- if isinstance(chunk, dict):
166
- full_response += chunk['text']
167
- else:
168
- full_response += chunk
169
- return dict(text=full_response)
153
+ for _ in for_stream():
154
+ pass
155
+ return self.last_response
170
156
 
171
157
  return for_stream() if stream else for_non_stream()
172
158
 
@@ -177,6 +163,16 @@ class NetFly(Provider):
177
163
  optimizer: str = None,
178
164
  conversationally: bool = False,
179
165
  ) -> str:
166
+ """Generate response `str`
167
+ Args:
168
+ prompt (str): Prompt to be send.
169
+ stream (bool, optional): Flag for streaming response. Defaults to False.
170
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
171
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
172
+ Returns:
173
+ str: Response generated
174
+ """
175
+
180
176
  def for_stream():
181
177
  for response in self.ask(
182
178
  prompt, True, optimizer=optimizer, conversationally=conversationally
@@ -207,10 +203,11 @@ class NetFly(Provider):
207
203
  assert isinstance(response, dict), "Response should be of dict data-type only"
208
204
  return response["text"]
209
205
 
210
- if __name__ == '__main__':
206
+
207
+ if __name__ == "__main__":
211
208
  from rich import print
212
- ai = NetFly()
213
- response = ai.chat("tell me about india", stream=True)
209
+
210
+ ai = AIGameIO()
211
+ response = ai.chat("hi")
214
212
  for chunk in response:
215
- print(chunk, end="", flush=True)
216
- print() # Add a newline at the end
213
+ print(chunk, end="", flush=True)
@@ -207,6 +207,6 @@ class Cleeai(Provider):
207
207
  if __name__ == "__main__":
208
208
  from rich import print
209
209
  ai = Cleeai()
210
- response = ai.chat(input(">>> "))
210
+ response = ai.chat("hi")
211
211
  for chunk in response:
212
212
  print(chunk, end="", flush=True)
webscout/Provider/elmo.py CHANGED
@@ -232,6 +232,6 @@ class Elmo(Provider):
232
232
  if __name__ == "__main__":
233
233
  from rich import print
234
234
  ai = Elmo()
235
- response = ai.chat(input(">>> "))
235
+ response = ai.chat("hi")
236
236
  for chunk in response:
237
237
  print(chunk, end="", flush=True)
@@ -175,6 +175,6 @@ class Felo(Provider):
175
175
  if __name__ == '__main__':
176
176
  from rich import print
177
177
  ai = Felo()
178
- response = ai.chat(input(">>> "))
178
+ response = ai.chat("hi")
179
179
  for chunk in response:
180
180
  print(chunk, end="", flush=True)
@@ -217,6 +217,6 @@ class Genspark(Provider):
217
217
  if __name__ == "__main__":
218
218
  from rich import print
219
219
  ai = Genspark()
220
- response = ai.chat(input(">>> "))
220
+ response = ai.chat("hi")
221
221
  for chunk in response:
222
222
  print(chunk, end="", flush=True)
@@ -19,6 +19,12 @@ class Julius(Provider):
19
19
  "Command R",
20
20
  "Gemini Flash",
21
21
  "Gemini 1.5",
22
+ "Claude Sonnet",
23
+ "Claude Opus",
24
+ "Claude Haiku",
25
+ "GPT-4",
26
+ "GPT-4o mini",
27
+ "Command R+",
22
28
  ]
23
29
  def __init__(
24
30
  self,
@@ -241,6 +247,6 @@ class Julius(Provider):
241
247
  if __name__ == '__main__':
242
248
  from rich import print
243
249
  ai = Julius()
244
- response = ai.chat(input(">>> "))
250
+ response = ai.chat("hi")
245
251
  for chunk in response:
246
252
  print(chunk, end="", flush=True)
@@ -189,6 +189,6 @@ class Lepton(Provider):
189
189
  if __name__ == '__main__':
190
190
  from rich import print
191
191
  ai = Lepton()
192
- response = ai.chat("tell me about india")
192
+ response = ai.chat("hi")
193
193
  for chunk in response:
194
194
  print(chunk, end="", flush=True)