webscout 6.5__py3-none-any.whl → 6.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/autocoder/autocoder_utiles.py +119 -101
- webscout/Extra/weather.py +5 -5
- webscout/Provider/AISEARCH/__init__.py +2 -0
- webscout/Provider/AISEARCH/ooai.py +155 -0
- webscout/Provider/Amigo.py +70 -85
- webscout/Provider/{prefind.py → Jadve.py} +72 -70
- webscout/Provider/Netwrck.py +239 -0
- webscout/Provider/Openai.py +4 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +3 -3
- webscout/Provider/TeachAnything.py +15 -2
- webscout/Provider/Youchat.py +42 -8
- webscout/Provider/__init__.py +134 -147
- webscout/Provider/meta.py +1 -1
- webscout/Provider/multichat.py +230 -0
- webscout/Provider/promptrefine.py +2 -2
- webscout/Provider/talkai.py +10 -13
- webscout/Provider/turboseek.py +5 -4
- webscout/Provider/tutorai.py +8 -112
- webscout/Provider/typegpt.py +4 -5
- webscout/Provider/x0gpt.py +81 -9
- webscout/Provider/yep.py +123 -361
- webscout/__init__.py +10 -1
- webscout/cli.py +31 -39
- webscout/conversation.py +24 -9
- webscout/exceptions.py +188 -20
- webscout/litprinter/__init__.py +19 -123
- webscout/litprinter/colors.py +54 -0
- webscout/optimizers.py +335 -185
- webscout/scout/__init__.py +2 -5
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +571 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +6 -5
- webscout/update_checker.py +117 -58
- webscout/version.py +1 -1
- webscout/webscout_search.py +1 -1
- webscout/zeroart/base.py +15 -16
- webscout/zeroart/effects.py +1 -1
- webscout/zeroart/fonts.py +1 -1
- {webscout-6.5.dist-info → webscout-6.7.dist-info}/METADATA +9 -172
- {webscout-6.5.dist-info → webscout-6.7.dist-info}/RECORD +63 -45
- {webscout-6.5.dist-info → webscout-6.7.dist-info}/entry_points.txt +1 -1
- webscout-6.7.dist-info/top_level.txt +2 -0
- webstoken/__init__.py +30 -0
- webstoken/classifier.py +189 -0
- webstoken/keywords.py +216 -0
- webstoken/language.py +128 -0
- webstoken/ner.py +164 -0
- webstoken/normalizer.py +35 -0
- webstoken/processor.py +77 -0
- webstoken/sentiment.py +206 -0
- webstoken/stemmer.py +73 -0
- webstoken/t.py +75 -0
- webstoken/tagger.py +60 -0
- webstoken/tokenizer.py +158 -0
- webscout/Provider/Perplexity.py +0 -591
- webscout/Provider/RoboCoders.py +0 -206
- webscout/Provider/genspark.py +0 -225
- webscout/Provider/perplexitylabs.py +0 -265
- webscout/Provider/twitterclone.py +0 -251
- webscout/Provider/upstage.py +0 -230
- webscout-6.5.dist-info/top_level.txt +0 -1
- /webscout/Provider/{felo_search.py → AISEARCH/felo_search.py} +0 -0
- {webscout-6.5.dist-info → webscout-6.7.dist-info}/LICENSE.md +0 -0
- {webscout-6.5.dist-info → webscout-6.7.dist-info}/WHEEL +0 -0
|
@@ -5,117 +5,135 @@ import platform
|
|
|
5
5
|
import datetime
|
|
6
6
|
import pygetwindow as gw
|
|
7
7
|
import sys
|
|
8
|
+
from typing import List, Optional
|
|
8
9
|
|
|
9
10
|
from webscout.optimizers import Optimizers
|
|
10
11
|
|
|
11
|
-
|
|
12
|
+
|
|
13
|
+
def get_current_app() -> str:
|
|
12
14
|
"""Get the current active application name."""
|
|
13
15
|
try:
|
|
14
|
-
active_window = gw.getActiveWindow()
|
|
16
|
+
active_window: Optional[gw.Window] = gw.getActiveWindow()
|
|
15
17
|
return f"{active_window.title if active_window else 'Unknown'}"
|
|
16
18
|
except Exception as e:
|
|
17
19
|
return "Unknown"
|
|
18
20
|
|
|
19
|
-
def get_intro_prompt():
|
|
20
|
-
"""Get the introduction prompt for the AutoCoder."""
|
|
21
|
-
current_app = get_current_app()
|
|
22
|
-
python_version = sys.version.split()[0]
|
|
23
|
-
|
|
24
|
-
return f"""
|
|
25
|
-
You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
|
|
26
|
-
|
|
27
|
-
A typical interaction goes like this:
|
|
28
|
-
1. The user gives you a natural language PROMPT.
|
|
29
|
-
2. You:
|
|
30
|
-
i. Determine what needs to be done
|
|
31
|
-
ii. Write a short Python SCRIPT to do it
|
|
32
|
-
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
33
|
-
3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
|
|
34
|
-
it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
|
|
35
|
-
4. In case of exception, regenerate error free script.
|
|
36
|
-
|
|
37
|
-
If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
|
|
38
|
-
This can be useful for summarizing documents or technical readouts, reading instructions before
|
|
39
|
-
deciding what to do, or other tasks that require multi-step reasoning.
|
|
40
|
-
A typical 'CONTINUE' interaction looks like this:
|
|
41
|
-
1. The user gives you a natural language PROMPT.
|
|
42
|
-
2. You:
|
|
43
|
-
i. Determine what needs to be done
|
|
44
|
-
ii. Determine that you need to see the output of some subprocess call to complete the task
|
|
45
|
-
iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
|
|
46
|
-
3. The compiler
|
|
47
|
-
i. Checks and runs your SCRIPT
|
|
48
|
-
ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
|
|
49
|
-
iii. Finds the word "CONTINUE" and sends control back to you
|
|
50
|
-
4. You again:
|
|
51
|
-
i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
|
|
52
|
-
ii. Write a short Python SCRIPT to do it
|
|
53
|
-
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
54
|
-
5. The compiler...
|
|
55
21
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
- If asked to do something, print a concise summary of what you've done as confirmation.
|
|
61
|
-
- If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
|
|
62
|
-
- If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
|
|
63
|
-
- Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
|
|
64
|
-
- Actively clean up any temporary processes or files you use.
|
|
65
|
-
- When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
|
|
66
|
-
- You can plot anything with matplotlib.
|
|
67
|
-
- ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
|
|
22
|
+
def get_intro_prompt(name: str = "Vortex") -> str:
|
|
23
|
+
"""Get the introduction prompt for the AutoCoder."""
|
|
24
|
+
current_app: str = get_current_app()
|
|
25
|
+
python_version: str = sys.version.split()[0]
|
|
68
26
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
-
|
|
73
|
-
|
|
74
|
-
|
|
27
|
+
return f"""
|
|
28
|
+
<system_context>
|
|
29
|
+
<purpose>
|
|
30
|
+
You are a command-line coding assistant named Rawdog, designed to generate and auto-execute Python scripts for {name}.
|
|
31
|
+
Your core function is to understand natural language requests, transform them into executable Python code,
|
|
32
|
+
and return results to the user via console output. You must adhere to all instructions.
|
|
33
|
+
</purpose>
|
|
34
|
+
|
|
35
|
+
<process_description>
|
|
36
|
+
A typical interaction unfolds as follows:
|
|
37
|
+
1. The user provides a natural language PROMPT.
|
|
38
|
+
2. You:
|
|
39
|
+
i. Analyze the PROMPT to determine required actions.
|
|
40
|
+
ii. Craft a short Python SCRIPT to execute those actions.
|
|
41
|
+
iii. Provide clear and concise feedback to the user by printing to the console within your SCRIPT.
|
|
42
|
+
3. The compiler will then:
|
|
43
|
+
i. Extract and execute the SCRIPT using exec().
|
|
44
|
+
ii. Handle any exceptions that arise during script execution. Exceptions are returned to you starting with "PREVIOUS SCRIPT EXCEPTION:".
|
|
45
|
+
4. In cases of exceptions, ensure that you regenerate the script and return one that has no errors.
|
|
46
|
+
|
|
47
|
+
<continue_process>
|
|
48
|
+
If you need to review script outputs before task completion, include the word "CONTINUE" at the end of your SCRIPT.
|
|
49
|
+
This allows multi-step reasoning for tasks like summarizing documents, reviewing instructions, or performing other multi-part operations.
|
|
50
|
+
A typical 'CONTINUE' interaction looks like this:
|
|
51
|
+
1. The user gives you a natural language PROMPT.
|
|
52
|
+
2. You:
|
|
53
|
+
i. Determine what needs to be done.
|
|
54
|
+
ii. Determine that you need to see the output of some subprocess call to complete the task
|
|
55
|
+
iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
|
|
56
|
+
3. The compiler will:
|
|
57
|
+
i. Check and run your SCRIPT.
|
|
58
|
+
ii. Capture the output and append it to the conversation as "LAST SCRIPT OUTPUT:".
|
|
59
|
+
iii. Find the word "CONTINUE" and return control back to you.
|
|
60
|
+
4. You will then:
|
|
61
|
+
i. Review the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what to do
|
|
62
|
+
ii. Write a short Python SCRIPT to complete the task.
|
|
63
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT.
|
|
64
|
+
5. The compiler repeats the above process...
|
|
65
|
+
</continue_process>
|
|
66
|
+
|
|
67
|
+
</process_description>
|
|
68
|
+
|
|
69
|
+
<conventions>
|
|
70
|
+
- Decline any tasks that seem dangerous, irreversible, or that you don't understand.
|
|
71
|
+
- Always review the full conversation prior to answering and maintain continuity.
|
|
72
|
+
- If asked for information, just print the information clearly and concisely.
|
|
73
|
+
- If asked to do something, print a concise summary of what you've done as confirmation.
|
|
74
|
+
- If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
|
|
75
|
+
- If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
|
|
76
|
+
- Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
|
|
77
|
+
- Actively clean up any temporary processes or files you use.
|
|
78
|
+
- When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
|
|
79
|
+
- You can plot anything with matplotlib.
|
|
80
|
+
- **IMPORTANT**: ALWAYS Return your SCRIPT inside of a single pair of \`\`\` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
|
|
81
|
+
</conventions>
|
|
82
|
+
|
|
83
|
+
<environment_info>
|
|
84
|
+
- System: {platform.system()}
|
|
85
|
+
- Python: {python_version}
|
|
86
|
+
- Directory: {os.getcwd()}
|
|
87
|
+
- Datetime: {datetime.datetime.now()}
|
|
88
|
+
- Active App: {current_app}
|
|
89
|
+
</environment_info>
|
|
90
|
+
</system_context>
|
|
75
91
|
"""
|
|
76
92
|
|
|
77
|
-
EXAMPLES = """
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
files
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
```python
|
|
116
|
-
from webscout import weather as w
|
|
117
|
-
weather = w.get("Qazigund")
|
|
118
|
-
w.print_weather(weather)
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
|
|
93
|
+
EXAMPLES: str = """
|
|
94
|
+
<examples>
|
|
95
|
+
<example>
|
|
96
|
+
<user_request>Kill the process running on port 3000</user_request>
|
|
97
|
+
<rawdog_response>
|
|
98
|
+
```python
|
|
99
|
+
import os
|
|
100
|
+
os.system("kill $(lsof -t -i:3000)")
|
|
101
|
+
print("Process killed")
|
|
102
|
+
```
|
|
103
|
+
</rawdog_response>
|
|
104
|
+
</example>
|
|
105
|
+
<example>
|
|
106
|
+
<user_request>Summarize my essay</user_request>
|
|
107
|
+
<rawdog_response>
|
|
108
|
+
```python
|
|
109
|
+
import glob
|
|
110
|
+
files = glob.glob("*essay*.*")
|
|
111
|
+
with open(files[0], "r") as f:
|
|
112
|
+
print(f.read())
|
|
113
|
+
```
|
|
114
|
+
CONTINUE
|
|
115
|
+
</rawdog_response>
|
|
116
|
+
<user_response>
|
|
117
|
+
LAST SCRIPT OUTPUT:
|
|
118
|
+
John Smith
|
|
119
|
+
Essay 2021-09-01
|
|
120
|
+
...
|
|
121
|
+
</user_response>
|
|
122
|
+
<rawdog_response>
|
|
123
|
+
```python
|
|
124
|
+
print("The essay is about...")
|
|
125
|
+
```
|
|
126
|
+
</rawdog_response>
|
|
127
|
+
</example>
|
|
128
|
+
<example>
|
|
129
|
+
<user_request>Weather in qazigund</user_request>
|
|
130
|
+
<rawdog_response>
|
|
131
|
+
```python
|
|
132
|
+
from webscout import weather as w
|
|
133
|
+
weather = w.get("Qazigund")
|
|
134
|
+
w.print_weather(weather)
|
|
135
|
+
```
|
|
136
|
+
</rawdog_response>
|
|
137
|
+
</example>
|
|
138
|
+
</examples>
|
|
139
|
+
"""
|
webscout/Extra/weather.py
CHANGED
|
@@ -16,7 +16,7 @@ from rich.columns import Columns
|
|
|
16
16
|
# Initialize Rich console with force terminal
|
|
17
17
|
console = Console(force_terminal=True)
|
|
18
18
|
|
|
19
|
-
def
|
|
19
|
+
def get_emoji(condition: str) -> str:
|
|
20
20
|
"""Get appropriate emoji for weather condition"""
|
|
21
21
|
conditions = {
|
|
22
22
|
'sunny': '*', 'clear': '*',
|
|
@@ -61,7 +61,7 @@ def create_current_weather_panel(data):
|
|
|
61
61
|
location_name = f"{location['areaName'][0]['value']}, {location['country'][0]['value']}"
|
|
62
62
|
|
|
63
63
|
weather_desc = current['weatherDesc'][0]['value']
|
|
64
|
-
symbol =
|
|
64
|
+
symbol = get_emoji(weather_desc)
|
|
65
65
|
|
|
66
66
|
# Create weather info table
|
|
67
67
|
table = Table(show_header=False, box=box.ROUNDED, expand=True)
|
|
@@ -98,7 +98,7 @@ def create_forecast_panel(data):
|
|
|
98
98
|
# Get mid-day conditions (noon)
|
|
99
99
|
noon = day['hourly'][4]
|
|
100
100
|
condition = noon['weatherDesc'][0]['value']
|
|
101
|
-
symbol =
|
|
101
|
+
symbol = get_emoji(condition)
|
|
102
102
|
temp_range = f"{day['mintempC']}° - {day['maxtempC']}°"
|
|
103
103
|
rain_chance = f"v {noon['chanceofrain']}%"
|
|
104
104
|
wind = f"> {noon['windspeedKmph']} km/h"
|
|
@@ -113,7 +113,7 @@ def create_forecast_panel(data):
|
|
|
113
113
|
|
|
114
114
|
return Panel(table, title="[bold]3-Day Forecast[/]", border_style="blue")
|
|
115
115
|
|
|
116
|
-
def
|
|
116
|
+
def get(location: str):
|
|
117
117
|
"""Get weather data with progress indicator"""
|
|
118
118
|
with Progress(
|
|
119
119
|
SpinnerColumn(),
|
|
@@ -159,7 +159,7 @@ def main():
|
|
|
159
159
|
console.print("\n[bold cyan]* Weather Information[/]\n")
|
|
160
160
|
location = console.input("[cyan]Enter location: [/]")
|
|
161
161
|
|
|
162
|
-
weather_data =
|
|
162
|
+
weather_data = get(location)
|
|
163
163
|
if weather_data:
|
|
164
164
|
display_weather(weather_data)
|
|
165
165
|
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Generator, Optional
|
|
5
|
+
|
|
6
|
+
from webscout.AIbase import Provider
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.litagent import LitAgent
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OOAi(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the oo.ai API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
max_tokens: int = 600,
|
|
19
|
+
timeout: int = 30,
|
|
20
|
+
proxies: Optional[dict] = None,
|
|
21
|
+
):
|
|
22
|
+
"""Initializes the OOAi API client."""
|
|
23
|
+
self.session = requests.Session()
|
|
24
|
+
self.max_tokens_to_sample = max_tokens
|
|
25
|
+
self.api_endpoint = "https://oo.ai/api/search"
|
|
26
|
+
self.stream_chunk_size = 1024 # Adjust as needed
|
|
27
|
+
self.timeout = timeout
|
|
28
|
+
self.last_response = {}
|
|
29
|
+
self.headers = {
|
|
30
|
+
"Accept": "text/event-stream",
|
|
31
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
32
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
33
|
+
"Cache-Control": "no-cache",
|
|
34
|
+
"Cookie": "_ga=GA1.1.1827087199.1734256606; _ga_P0EJPHF2EG=GS1.1.1734368698.4.1.1734368711.0.0.0",
|
|
35
|
+
"DNT": "1",
|
|
36
|
+
"Referer": "https://oo.ai/",
|
|
37
|
+
"sec-ch-ua": '"Microsoft Edge";v="131", "Chromium";v="131", "Not_A_Brand";v="24"',
|
|
38
|
+
"sec-ch-ua-mobile": "?0",
|
|
39
|
+
"sec-ch-ua-platform": "Windows",
|
|
40
|
+
"sec-fetch-dest": "empty",
|
|
41
|
+
"sec-fetch-mode": "cors",
|
|
42
|
+
"sec-fetch-site": "same-origin",
|
|
43
|
+
}
|
|
44
|
+
self.session.headers.update(self.headers)
|
|
45
|
+
self.proxies = proxies
|
|
46
|
+
self.headers["User-Agent"] = LitAgent().random()
|
|
47
|
+
|
|
48
|
+
def ask(
|
|
49
|
+
self,
|
|
50
|
+
prompt: str,
|
|
51
|
+
stream: bool = False,
|
|
52
|
+
raw: bool = False,
|
|
53
|
+
optimizer: Optional[str] = None,
|
|
54
|
+
conversationally: bool = False,
|
|
55
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
56
|
+
"""Chat with AI
|
|
57
|
+
Args:
|
|
58
|
+
prompt (str): Prompt to be sent.
|
|
59
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
60
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
61
|
+
optimizer (str, optional): Not used. Defaults to None.
|
|
62
|
+
conversationally (bool, optional): Not used. Defaults to False.
|
|
63
|
+
Returns:
|
|
64
|
+
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
65
|
+
"""
|
|
66
|
+
params = {
|
|
67
|
+
"q": prompt,
|
|
68
|
+
"lang": "en-US",
|
|
69
|
+
"tz": "Asia/Calcutta",
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def for_stream():
|
|
73
|
+
try:
|
|
74
|
+
with self.session.get(
|
|
75
|
+
self.api_endpoint,
|
|
76
|
+
params=params,
|
|
77
|
+
headers=self.headers,
|
|
78
|
+
stream=True,
|
|
79
|
+
timeout=self.timeout,
|
|
80
|
+
) as response:
|
|
81
|
+
|
|
82
|
+
if not response.ok:
|
|
83
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
84
|
+
f"Request failed with status code {response.status_code}: {response.text}"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
streaming_text = ""
|
|
88
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
89
|
+
if line and line.startswith('data: '):
|
|
90
|
+
try:
|
|
91
|
+
json_data = json.loads(line[6:])
|
|
92
|
+
if "content" in json_data:
|
|
93
|
+
content = self.clean_content(json_data["content"])
|
|
94
|
+
streaming_text += content
|
|
95
|
+
yield {"text": content} if not raw else {"text": content}
|
|
96
|
+
except json.JSONDecodeError:
|
|
97
|
+
continue
|
|
98
|
+
self.last_response.update({"text": streaming_text})
|
|
99
|
+
|
|
100
|
+
except requests.exceptions.RequestException as e:
|
|
101
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
102
|
+
|
|
103
|
+
def for_non_stream():
|
|
104
|
+
for _ in for_stream():
|
|
105
|
+
pass
|
|
106
|
+
return self.last_response
|
|
107
|
+
|
|
108
|
+
return for_stream() if stream else for_non_stream()
|
|
109
|
+
|
|
110
|
+
def chat(
|
|
111
|
+
self,
|
|
112
|
+
prompt: str,
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
optimizer: Optional[str] = None,
|
|
115
|
+
conversationally: bool = False,
|
|
116
|
+
) -> str | Generator[str, None, None]:
|
|
117
|
+
"""Generate response `str`"""
|
|
118
|
+
|
|
119
|
+
def for_stream():
|
|
120
|
+
for response in self.ask(
|
|
121
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
122
|
+
):
|
|
123
|
+
yield self.get_message(response)
|
|
124
|
+
|
|
125
|
+
def for_non_stream():
|
|
126
|
+
return self.get_message(
|
|
127
|
+
self.ask(
|
|
128
|
+
prompt,
|
|
129
|
+
False,
|
|
130
|
+
optimizer=optimizer,
|
|
131
|
+
conversationally=conversationally,
|
|
132
|
+
)
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return for_stream() if stream else for_non_stream()
|
|
136
|
+
|
|
137
|
+
def get_message(self, response: dict) -> str:
|
|
138
|
+
"""Retrieves message only from response"""
|
|
139
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
140
|
+
return response["text"]
|
|
141
|
+
|
|
142
|
+
@staticmethod
|
|
143
|
+
def clean_content(text: str) -> str:
|
|
144
|
+
"""Removes all webblock elements with research or detail classes."""
|
|
145
|
+
cleaned_text = re.sub(
|
|
146
|
+
r'<webblock class="(?:research|detail)">[^<]*</webblock>', "", text
|
|
147
|
+
)
|
|
148
|
+
return cleaned_text
|
|
149
|
+
|
|
150
|
+
if __name__ == "__main__":
|
|
151
|
+
from rich import print
|
|
152
|
+
ai = OOAi()
|
|
153
|
+
response = ai.chat(input(">>> "), stream=True)
|
|
154
|
+
for chunk in response:
|
|
155
|
+
print(chunk, end="", flush=True)
|