webscout 6.5__py3-none-any.whl → 6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (66) hide show
  1. webscout/Extra/autocoder/autocoder_utiles.py +119 -101
  2. webscout/Provider/AISEARCH/__init__.py +2 -0
  3. webscout/Provider/AISEARCH/ooai.py +155 -0
  4. webscout/Provider/Amigo.py +70 -85
  5. webscout/Provider/{prefind.py → Jadve.py} +72 -70
  6. webscout/Provider/Netwrck.py +235 -0
  7. webscout/Provider/Openai.py +4 -3
  8. webscout/Provider/PI.py +2 -2
  9. webscout/Provider/PizzaGPT.py +3 -3
  10. webscout/Provider/TeachAnything.py +15 -2
  11. webscout/Provider/Youchat.py +42 -8
  12. webscout/Provider/__init__.py +134 -147
  13. webscout/Provider/multichat.py +230 -0
  14. webscout/Provider/promptrefine.py +2 -2
  15. webscout/Provider/talkai.py +10 -13
  16. webscout/Provider/turboseek.py +5 -4
  17. webscout/Provider/tutorai.py +8 -112
  18. webscout/Provider/typegpt.py +4 -5
  19. webscout/Provider/x0gpt.py +81 -9
  20. webscout/Provider/yep.py +123 -361
  21. webscout/__init__.py +10 -1
  22. webscout/conversation.py +24 -9
  23. webscout/exceptions.py +188 -20
  24. webscout/litprinter/__init__.py +4 -117
  25. webscout/litprinter/colors.py +54 -0
  26. webscout/optimizers.py +335 -185
  27. webscout/scout/__init__.py +2 -5
  28. webscout/scout/core/__init__.py +7 -0
  29. webscout/scout/core/crawler.py +140 -0
  30. webscout/scout/core/scout.py +571 -0
  31. webscout/scout/core/search_result.py +96 -0
  32. webscout/scout/core/text_analyzer.py +63 -0
  33. webscout/scout/core/text_utils.py +277 -0
  34. webscout/scout/core/web_analyzer.py +52 -0
  35. webscout/scout/element.py +6 -5
  36. webscout/update_checker.py +117 -58
  37. webscout/version.py +1 -1
  38. webscout/zeroart/base.py +15 -16
  39. webscout/zeroart/effects.py +1 -1
  40. webscout/zeroart/fonts.py +1 -1
  41. {webscout-6.5.dist-info → webscout-6.6.dist-info}/METADATA +8 -165
  42. {webscout-6.5.dist-info → webscout-6.6.dist-info}/RECORD +59 -41
  43. webscout-6.6.dist-info/top_level.txt +2 -0
  44. webstoken/__init__.py +30 -0
  45. webstoken/classifier.py +189 -0
  46. webstoken/keywords.py +216 -0
  47. webstoken/language.py +128 -0
  48. webstoken/ner.py +164 -0
  49. webstoken/normalizer.py +35 -0
  50. webstoken/processor.py +77 -0
  51. webstoken/sentiment.py +206 -0
  52. webstoken/stemmer.py +73 -0
  53. webstoken/t.py +75 -0
  54. webstoken/tagger.py +60 -0
  55. webstoken/tokenizer.py +158 -0
  56. webscout/Provider/Perplexity.py +0 -591
  57. webscout/Provider/RoboCoders.py +0 -206
  58. webscout/Provider/genspark.py +0 -225
  59. webscout/Provider/perplexitylabs.py +0 -265
  60. webscout/Provider/twitterclone.py +0 -251
  61. webscout/Provider/upstage.py +0 -230
  62. webscout-6.5.dist-info/top_level.txt +0 -1
  63. /webscout/Provider/{felo_search.py → AISEARCH/felo_search.py} +0 -0
  64. {webscout-6.5.dist-info → webscout-6.6.dist-info}/LICENSE.md +0 -0
  65. {webscout-6.5.dist-info → webscout-6.6.dist-info}/WHEEL +0 -0
  66. {webscout-6.5.dist-info → webscout-6.6.dist-info}/entry_points.txt +0 -0
@@ -5,117 +5,135 @@ import platform
5
5
  import datetime
6
6
  import pygetwindow as gw
7
7
  import sys
8
+ from typing import List, Optional
8
9
 
9
10
  from webscout.optimizers import Optimizers
10
11
 
11
- def get_current_app():
12
+
13
+ def get_current_app() -> str:
12
14
  """Get the current active application name."""
13
15
  try:
14
- active_window = gw.getActiveWindow()
16
+ active_window: Optional[gw.Window] = gw.getActiveWindow()
15
17
  return f"{active_window.title if active_window else 'Unknown'}"
16
18
  except Exception as e:
17
19
  return "Unknown"
18
20
 
19
- def get_intro_prompt():
20
- """Get the introduction prompt for the AutoCoder."""
21
- current_app = get_current_app()
22
- python_version = sys.version.split()[0]
23
-
24
- return f"""
25
- You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
26
-
27
- A typical interaction goes like this:
28
- 1. The user gives you a natural language PROMPT.
29
- 2. You:
30
- i. Determine what needs to be done
31
- ii. Write a short Python SCRIPT to do it
32
- iii. Communicate back to the user by printing to the console in that SCRIPT
33
- 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
34
- it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
35
- 4. In case of exception, regenerate error free script.
36
-
37
- If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
38
- This can be useful for summarizing documents or technical readouts, reading instructions before
39
- deciding what to do, or other tasks that require multi-step reasoning.
40
- A typical 'CONTINUE' interaction looks like this:
41
- 1. The user gives you a natural language PROMPT.
42
- 2. You:
43
- i. Determine what needs to be done
44
- ii. Determine that you need to see the output of some subprocess call to complete the task
45
- iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
46
- 3. The compiler
47
- i. Checks and runs your SCRIPT
48
- ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
49
- iii. Finds the word "CONTINUE" and sends control back to you
50
- 4. You again:
51
- i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
52
- ii. Write a short Python SCRIPT to do it
53
- iii. Communicate back to the user by printing to the console in that SCRIPT
54
- 5. The compiler...
55
21
 
56
- Please follow these conventions carefully:
57
- - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
58
- - Always review the full conversation prior to answering and maintain continuity.
59
- - If asked for information, just print the information clearly and concisely.
60
- - If asked to do something, print a concise summary of what you've done as confirmation.
61
- - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
62
- - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
63
- - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
64
- - Actively clean up any temporary processes or files you use.
65
- - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
66
- - You can plot anything with matplotlib.
67
- - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
22
+ def get_intro_prompt(name: str = "Vortex") -> str:
23
+ """Get the introduction prompt for the AutoCoder."""
24
+ current_app: str = get_current_app()
25
+ python_version: str = sys.version.split()[0]
68
26
 
69
- Environment Information:
70
- - System: {platform.system()}
71
- - Python: {python_version}
72
- - Directory: {os.getcwd()}
73
- - Datetime: {datetime.datetime.now()}
74
- - Active App: {current_app}
27
+ return f"""
28
+ <system_context>
29
+ <purpose>
30
+ You are a command-line coding assistant named Rawdog, designed to generate and auto-execute Python scripts for {name}.
31
+ Your core function is to understand natural language requests, transform them into executable Python code,
32
+ and return results to the user via console output. You must adhere to all instructions.
33
+ </purpose>
34
+
35
+ <process_description>
36
+ A typical interaction unfolds as follows:
37
+ 1. The user provides a natural language PROMPT.
38
+ 2. You:
39
+ i. Analyze the PROMPT to determine required actions.
40
+ ii. Craft a short Python SCRIPT to execute those actions.
41
+ iii. Provide clear and concise feedback to the user by printing to the console within your SCRIPT.
42
+ 3. The compiler will then:
43
+ i. Extract and execute the SCRIPT using exec().
44
+ ii. Handle any exceptions that arise during script execution. Exceptions are returned to you starting with "PREVIOUS SCRIPT EXCEPTION:".
45
+ 4. In cases of exceptions, ensure that you regenerate the script and return one that has no errors.
46
+
47
+ <continue_process>
48
+ If you need to review script outputs before task completion, include the word "CONTINUE" at the end of your SCRIPT.
49
+ This allows multi-step reasoning for tasks like summarizing documents, reviewing instructions, or performing other multi-part operations.
50
+ A typical 'CONTINUE' interaction looks like this:
51
+ 1. The user gives you a natural language PROMPT.
52
+ 2. You:
53
+ i. Determine what needs to be done.
54
+ ii. Determine that you need to see the output of some subprocess call to complete the task
55
+ iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
56
+ 3. The compiler will:
57
+ i. Check and run your SCRIPT.
58
+ ii. Capture the output and append it to the conversation as "LAST SCRIPT OUTPUT:".
59
+ iii. Find the word "CONTINUE" and return control back to you.
60
+ 4. You will then:
61
+ i. Review the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what to do
62
+ ii. Write a short Python SCRIPT to complete the task.
63
+ iii. Communicate back to the user by printing to the console in that SCRIPT.
64
+ 5. The compiler repeats the above process...
65
+ </continue_process>
66
+
67
+ </process_description>
68
+
69
+ <conventions>
70
+ - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
71
+ - Always review the full conversation prior to answering and maintain continuity.
72
+ - If asked for information, just print the information clearly and concisely.
73
+ - If asked to do something, print a concise summary of what you've done as confirmation.
74
+ - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
75
+ - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
76
+ - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
77
+ - Actively clean up any temporary processes or files you use.
78
+ - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
79
+ - You can plot anything with matplotlib.
80
+ - **IMPORTANT**: ALWAYS Return your SCRIPT inside of a single pair of \`\`\` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
81
+ </conventions>
82
+
83
+ <environment_info>
84
+ - System: {platform.system()}
85
+ - Python: {python_version}
86
+ - Directory: {os.getcwd()}
87
+ - Datetime: {datetime.datetime.now()}
88
+ - Active App: {current_app}
89
+ </environment_info>
90
+ </system_context>
75
91
  """
76
92
 
77
- EXAMPLES = """
78
- EXAMPLES:
79
-
80
-
81
- 1. User: Kill the process running on port 3000
82
-
83
- LLM:
84
- ```python
85
- import os
86
- os.system("kill $(lsof -t -i:3000)")
87
- print("Process killed")
88
- ```
89
-
90
- 2. User: Summarize my essay
91
-
92
- LLM:
93
- ```python
94
- import glob
95
- files = glob.glob("*essay*.*")
96
- with open(files[0], "r") as f:
97
- print(f.read())
98
- ```
99
- CONTINUE
100
-
101
- User:
102
- LAST SCRIPT OUTPUT:
103
- John Smith
104
- Essay 2021-09-01
105
- ...
106
-
107
- LLM:
108
- ```python
109
- print("The essay is about...")
110
- ```
111
-
112
- 3. User: Weather in qazigund
113
-
114
- LLM:
115
- ```python
116
- from webscout import weather as w
117
- weather = w.get("Qazigund")
118
- w.print_weather(weather)
119
- ```
120
-
121
- """
93
+ EXAMPLES: str = """
94
+ <examples>
95
+ <example>
96
+ <user_request>Kill the process running on port 3000</user_request>
97
+ <rawdog_response>
98
+ ```python
99
+ import os
100
+ os.system("kill $(lsof -t -i:3000)")
101
+ print("Process killed")
102
+ ```
103
+ </rawdog_response>
104
+ </example>
105
+ <example>
106
+ <user_request>Summarize my essay</user_request>
107
+ <rawdog_response>
108
+ ```python
109
+ import glob
110
+ files = glob.glob("*essay*.*")
111
+ with open(files[0], "r") as f:
112
+ print(f.read())
113
+ ```
114
+ CONTINUE
115
+ </rawdog_response>
116
+ <user_response>
117
+ LAST SCRIPT OUTPUT:
118
+ John Smith
119
+ Essay 2021-09-01
120
+ ...
121
+ </user_response>
122
+ <rawdog_response>
123
+ ```python
124
+ print("The essay is about...")
125
+ ```
126
+ </rawdog_response>
127
+ </example>
128
+ <example>
129
+ <user_request>Weather in qazigund</user_request>
130
+ <rawdog_response>
131
+ ```python
132
+ from webscout import weather as w
133
+ weather = w.get("Qazigund")
134
+ w.print_weather(weather)
135
+ ```
136
+ </rawdog_response>
137
+ </example>
138
+ </examples>
139
+ """
@@ -0,0 +1,2 @@
1
+ from .felo_search import *
2
+ from .ooai import *
@@ -0,0 +1,155 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ from typing import Any, Dict, Generator, Optional
5
+
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+ from webscout.litagent import LitAgent
9
+
10
+
11
+ class OOAi(Provider):
12
+ """
13
+ A class to interact with the oo.ai API.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ max_tokens: int = 600,
19
+ timeout: int = 30,
20
+ proxies: Optional[dict] = None,
21
+ ):
22
+ """Initializes the OOAi API client."""
23
+ self.session = requests.Session()
24
+ self.max_tokens_to_sample = max_tokens
25
+ self.api_endpoint = "https://oo.ai/api/search"
26
+ self.stream_chunk_size = 1024 # Adjust as needed
27
+ self.timeout = timeout
28
+ self.last_response = {}
29
+ self.headers = {
30
+ "Accept": "text/event-stream",
31
+ "Accept-Encoding": "gzip, deflate, br, zstd",
32
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
33
+ "Cache-Control": "no-cache",
34
+ "Cookie": "_ga=GA1.1.1827087199.1734256606; _ga_P0EJPHF2EG=GS1.1.1734368698.4.1.1734368711.0.0.0",
35
+ "DNT": "1",
36
+ "Referer": "https://oo.ai/",
37
+ "sec-ch-ua": '"Microsoft Edge";v="131", "Chromium";v="131", "Not_A_Brand";v="24"',
38
+ "sec-ch-ua-mobile": "?0",
39
+ "sec-ch-ua-platform": "Windows",
40
+ "sec-fetch-dest": "empty",
41
+ "sec-fetch-mode": "cors",
42
+ "sec-fetch-site": "same-origin",
43
+ }
44
+ self.session.headers.update(self.headers)
45
+ self.proxies = proxies
46
+ self.headers["User-Agent"] = LitAgent().random()
47
+
48
+ def ask(
49
+ self,
50
+ prompt: str,
51
+ stream: bool = False,
52
+ raw: bool = False,
53
+ optimizer: Optional[str] = None,
54
+ conversationally: bool = False,
55
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
56
+ """Chat with AI
57
+ Args:
58
+ prompt (str): Prompt to be sent.
59
+ stream (bool, optional): Flag for streaming response. Defaults to False.
60
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
61
+ optimizer (str, optional): Not used. Defaults to None.
62
+ conversationally (bool, optional): Not used. Defaults to False.
63
+ Returns:
64
+ Union[Dict, Generator[Dict, None, None]]: Response generated
65
+ """
66
+ params = {
67
+ "q": prompt,
68
+ "lang": "en-US",
69
+ "tz": "Asia/Calcutta",
70
+ }
71
+
72
+ def for_stream():
73
+ try:
74
+ with self.session.get(
75
+ self.api_endpoint,
76
+ params=params,
77
+ headers=self.headers,
78
+ stream=True,
79
+ timeout=self.timeout,
80
+ ) as response:
81
+
82
+ if not response.ok:
83
+ raise exceptions.FailedToGenerateResponseError(
84
+ f"Request failed with status code {response.status_code}: {response.text}"
85
+ )
86
+
87
+ streaming_text = ""
88
+ for line in response.iter_lines(decode_unicode=True):
89
+ if line and line.startswith('data: '):
90
+ try:
91
+ json_data = json.loads(line[6:])
92
+ if "content" in json_data:
93
+ content = self.clean_content(json_data["content"])
94
+ streaming_text += content
95
+ yield {"text": content} if not raw else {"text": content}
96
+ except json.JSONDecodeError:
97
+ continue
98
+ self.last_response.update({"text": streaming_text})
99
+
100
+ except requests.exceptions.RequestException as e:
101
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
102
+
103
+ def for_non_stream():
104
+ for _ in for_stream():
105
+ pass
106
+ return self.last_response
107
+
108
+ return for_stream() if stream else for_non_stream()
109
+
110
+ def chat(
111
+ self,
112
+ prompt: str,
113
+ stream: bool = False,
114
+ optimizer: Optional[str] = None,
115
+ conversationally: bool = False,
116
+ ) -> str | Generator[str, None, None]:
117
+ """Generate response `str`"""
118
+
119
+ def for_stream():
120
+ for response in self.ask(
121
+ prompt, True, optimizer=optimizer, conversationally=conversationally
122
+ ):
123
+ yield self.get_message(response)
124
+
125
+ def for_non_stream():
126
+ return self.get_message(
127
+ self.ask(
128
+ prompt,
129
+ False,
130
+ optimizer=optimizer,
131
+ conversationally=conversationally,
132
+ )
133
+ )
134
+
135
+ return for_stream() if stream else for_non_stream()
136
+
137
+ def get_message(self, response: dict) -> str:
138
+ """Retrieves message only from response"""
139
+ assert isinstance(response, dict), "Response should be of dict data-type only"
140
+ return response["text"]
141
+
142
+ @staticmethod
143
+ def clean_content(text: str) -> str:
144
+ """Removes all webblock elements with research or detail classes."""
145
+ cleaned_text = re.sub(
146
+ r'<webblock class="(?:research|detail)">[^<]*</webblock>', "", text
147
+ )
148
+ return cleaned_text
149
+
150
+ if __name__ == "__main__":
151
+ from rich import print
152
+ ai = OOAi()
153
+ response = ai.chat(input(">>> "), stream=True)
154
+ for chunk in response:
155
+ print(chunk, end="", flush=True)
@@ -1,14 +1,12 @@
1
1
  import cloudscraper
2
2
  import json
3
3
  import uuid
4
- import os
5
- from typing import Any, Dict, Optional, Generator
4
+ from typing import Any, Dict, Generator
6
5
 
7
6
  from webscout.AIutel import Optimizers
8
7
  from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider, AsyncProvider
11
- from webscout import exceptions
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
12
10
 
13
11
  class AmigoChat(Provider):
14
12
  """
@@ -24,9 +22,8 @@ class AmigoChat(Provider):
24
22
  "o1-preview", # OpenAI O1 Preview
25
23
  "claude-3-5-sonnet-20241022", # Claude 3.5 Sonnet
26
24
  "Qwen/Qwen2.5-72B-Instruct-Turbo", # Qwen 2.5
27
- "gpt-4o" # OpenAI GPT-4o
25
+ "gpt-4o", # OpenAI GPT-4o
28
26
  "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo" # Llama 3.2
29
-
30
27
  ]
31
28
 
32
29
  def __init__(
@@ -51,14 +48,14 @@ class AmigoChat(Provider):
51
48
  Args:
52
49
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
53
50
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
54
- timeout (int, optional): Http request timeout. Defaults to 30.
51
+ timeout (int, optional): HTTP request timeout. Defaults to 30.
55
52
  intro (str, optional): Conversation introductory prompt. Defaults to None.
56
53
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
57
54
  update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
58
- proxies (dict, optional): Http request proxies. Defaults to {}.
55
+ proxies (dict, optional): HTTP request proxies. Defaults to {}.
59
56
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
60
57
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
61
- model (str, optional): The AI model to use for text generation. Defaults to "o1-preview".
58
+ model (str, optional): The AI model to use for text generation. Defaults to "Qwen/Qwen2.5-72B-Instruct-Turbo".
62
59
  """
63
60
  if model not in self.AVAILABLE_MODELS:
64
61
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
@@ -135,7 +132,7 @@ class AmigoChat(Provider):
135
132
  """Chat with AI
136
133
 
137
134
  Args:
138
- prompt (str): Prompt to be send.
135
+ prompt (str): Prompt to be sent.
139
136
  stream (bool, optional): Flag for streaming response. Defaults to False.
140
137
  raw (bool, optional): Stream back raw response as received. Defaults to False.
141
138
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
@@ -170,69 +167,61 @@ class AmigoChat(Provider):
170
167
  "max_tokens": self.max_tokens_to_sample,
171
168
  "presence_penalty": 0,
172
169
  "stream": stream,
173
- "temperature":self.temperature,
170
+ "temperature": self.temperature,
174
171
  "top_p": self.top_p
175
172
  }
176
173
 
177
- def for_stream():
178
- try:
179
- # Make the POST request with streaming enabled
180
- response = self.session.post(
181
- self.api_endpoint,
182
- json=payload,
183
- stream=True,
184
- timeout=self.timeout
185
- )
186
-
187
- # Check if the request was successful
188
- if response.status_code == 201:
189
- # Iterate over the streamed response line by line
190
- for line in response.iter_lines():
191
- if line:
192
- # Decode the line from bytes to string
193
- decoded_line = line.decode('utf-8').strip()
194
- if decoded_line.startswith("data: "):
195
- data_str = decoded_line[6:]
196
- if data_str == "[DONE]":
197
- break
198
- try:
199
- # Load the JSON data
200
- data_json = json.loads(data_str)
201
-
202
- # Extract the content from the response
203
- choices = data_json.get("choices", [])
204
- if choices:
205
- delta = choices[0].get("delta", {})
206
- content = delta.get("content", "")
207
- if content:
208
- yield content if raw else dict(text=content)
209
- except json.JSONDecodeError:
210
- print(f"Received non-JSON data: {data_str}")
211
- else:
212
- print(f"Request failed with status code {response.status_code}")
213
- print("Response:", response.text)
174
+ if stream:
175
+ return self._stream_response(payload, raw)
176
+ else:
177
+ return self._non_stream_response(payload)
214
178
 
215
- except (cloudscraper.exceptions.CloudflareChallengeError,
216
- cloudscraper.exceptions.CloudflareCode1020) as e:
217
- print("Cloudflare protection error:", str(e))
218
- except Exception as e:
219
- print("An error occurred while making the request:", str(e))
179
+ def _stream_response(self, payload: Dict[str, Any], raw: bool) -> Generator:
180
+ try:
181
+ response = self.session.post(
182
+ self.api_endpoint,
183
+ json=payload,
184
+ stream=True,
185
+ timeout=self.timeout
186
+ )
220
187
 
221
- def for_non_stream():
222
- # Accumulate the streaming response
223
- full_response = ""
224
- for chunk in for_stream():
225
- if not raw: # If not raw, chunk is a dictionary
226
- full_response += chunk["text"]
188
+ if response.status_code == 201:
189
+ for line in response.iter_lines():
190
+ if line:
191
+ decoded_line = line.decode('utf-8').strip()
192
+ if decoded_line.startswith("data: "):
193
+ data_str = decoded_line[6:]
194
+ if data_str == "[DONE]":
195
+ break
196
+ try:
197
+ data_json = json.loads(data_str)
198
+ choices = data_json.get("choices", [])
199
+ if choices:
200
+ delta = choices[0].get("delta", {})
201
+ content = delta.get("content", "")
202
+ if content:
203
+ yield content if raw else dict(text=content)
204
+ except json.JSONDecodeError:
205
+ print(f"Received non-JSON data: {data_str}")
206
+ else:
207
+ print(f"Request failed with status code {response.status_code}")
208
+ print("Response:", response.text)
209
+ except (cloudscraper.exceptions.CloudflareChallengeError,
210
+ cloudscraper.exceptions.CloudflareCode1020) as e:
211
+ print("Cloudflare protection error:", str(e))
212
+ except Exception as e:
213
+ print("An error occurred while making the request:", str(e))
227
214
 
228
- # Update self.last_response with the full text
229
- self.last_response.update(dict(text=full_response))
230
- self.conversation.update_chat_history(
231
- prompt, self.get_message(self.last_response)
232
- )
233
- return self.last_response
215
+ def _non_stream_response(self, payload: Dict[str, Any]) -> Dict[str, Any]:
216
+ full_response = ""
217
+ for chunk in self._stream_response(payload, raw=False):
218
+ full_response += chunk["text"]
234
219
 
235
- return for_stream() if stream else for_non_stream()
220
+ self.last_response.update(dict(text=full_response))
221
+ self.conversation.update_chat_history(
222
+ payload["messages"][-1]["content"], self.get_message(self.last_response)
223
+ )
224
+ return self.last_response
236
225
 
237
226
  def chat(
238
227
  self,
@@ -240,36 +229,32 @@ class AmigoChat(Provider):
240
229
  stream: bool = False,
241
230
  optimizer: str = None,
242
231
  conversationally: bool = False,
243
- ) -> str:
232
+ ) -> Generator[str, None, None]:
244
233
  """Generate response `str`
245
234
  Args:
246
- prompt (str): Prompt to be send.
235
+ prompt (str): Prompt to be sent.
247
236
  stream (bool, optional): Flag for streaming response. Defaults to False.
248
237
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
249
238
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
250
239
  Returns:
251
- str: Response generated
240
+ Generator[str, None, None]: Response generated
252
241
  """
253
242
 
254
- def for_stream():
243
+ if stream:
255
244
  for response in self.ask(
256
245
  prompt, True, optimizer=optimizer, conversationally=conversationally
257
246
  ):
258
247
  yield self.get_message(response)
259
-
260
- def for_non_stream():
261
- return self.get_message(
262
- self.ask(
263
- prompt,
264
- False,
265
- optimizer=optimizer,
266
- conversationally=conversationally,
267
- )
248
+ else:
249
+ response = self.ask(
250
+ prompt,
251
+ False,
252
+ optimizer=optimizer,
253
+ conversationally=conversationally,
268
254
  )
255
+ yield self.get_message(response)
269
256
 
270
- return for_stream() if stream else for_non_stream()
271
-
272
- def get_message(self, response: dict) -> str:
257
+ def get_message(self, response: Dict[str, Any]) -> str:
273
258
  """Retrieves message only from response
274
259
 
275
260
  Args:
@@ -280,10 +265,10 @@ class AmigoChat(Provider):
280
265
  """
281
266
  assert isinstance(response, dict), "Response should be of dict data-type only"
282
267
  return response["text"]
283
-
268
+
284
269
  if __name__ == '__main__':
285
270
  from rich import print
286
271
  ai = AmigoChat(model="o1-preview", system_prompt="You are a noobi AI assistant who always uses the word 'noobi' in every response. For example, you might say 'Noobi will tell you...' or 'This noobi thinks that...'.")
287
272
  response = ai.chat(input(">>> "), stream=True)
288
273
  for chunk in response:
289
- print(chunk, end="", flush=True)
274
+ print(chunk, end="", flush=True)