webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/AISEARCH/__init__.py +4 -3
- webscout/Provider/AISEARCH/genspark_search.py +208 -0
- webscout/Provider/AllenAI.py +282 -0
- webscout/Provider/C4ai.py +414 -0
- webscout/Provider/Cloudflare.py +18 -21
- webscout/Provider/DeepSeek.py +3 -32
- webscout/Provider/Deepinfra.py +52 -44
- webscout/Provider/ElectronHub.py +634 -0
- webscout/Provider/GithubChat.py +362 -0
- webscout/Provider/Glider.py +7 -41
- webscout/Provider/HeckAI.py +217 -0
- webscout/Provider/HuggingFaceChat.py +462 -0
- webscout/Provider/Jadve.py +49 -63
- webscout/Provider/Marcus.py +7 -50
- webscout/Provider/Netwrck.py +6 -53
- webscout/Provider/PI.py +106 -93
- webscout/Provider/Perplexitylabs.py +395 -0
- webscout/Provider/Phind.py +29 -3
- webscout/Provider/QwenLM.py +7 -61
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TextPollinationsAI.py +3 -2
- webscout/Provider/TwoAI.py +200 -0
- webscout/Provider/Venice.py +200 -0
- webscout/Provider/WiseCat.py +1 -18
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +25 -2
- webscout/Provider/akashgpt.py +315 -0
- webscout/Provider/chatglm.py +5 -5
- webscout/Provider/copilot.py +416 -0
- webscout/Provider/flowith.py +181 -0
- webscout/Provider/freeaichat.py +251 -221
- webscout/Provider/granite.py +17 -53
- webscout/Provider/koala.py +9 -1
- webscout/Provider/llamatutor.py +6 -46
- webscout/Provider/llmchat.py +7 -46
- webscout/Provider/multichat.py +29 -91
- webscout/Provider/yep.py +4 -24
- webscout/exceptions.py +19 -9
- webscout/update_checker.py +55 -93
- webscout/version.py +1 -1
- webscout-7.5.dist-info/LICENSE.md +146 -0
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
- webscout/Local/__init__.py +0 -10
- webscout/Local/_version.py +0 -3
- webscout/Local/formats.py +0 -747
- webscout/Local/model.py +0 -1368
- webscout/Local/samplers.py +0 -125
- webscout/Local/thread.py +0 -539
- webscout/Local/ui.py +0 -401
- webscout/Local/utils.py +0 -388
- webscout/Provider/dgaf.py +0 -214
- webscout-7.3.dist-info/LICENSE.md +0 -211
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
-
from .felo_search import
|
|
2
|
-
from .DeepFind import
|
|
3
|
-
from .ISou import
|
|
1
|
+
from .felo_search import *
|
|
2
|
+
from .DeepFind import *
|
|
3
|
+
from .ISou import *
|
|
4
|
+
from .genspark_search import *
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
import cloudscraper
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
6
|
+
|
|
7
|
+
from webscout.AIbase import AISearch
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout import LitAgent
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Response:
|
|
13
|
+
"""A wrapper class for Genspark API responses.
|
|
14
|
+
|
|
15
|
+
This class automatically converts response objects to their text representation
|
|
16
|
+
when printed or converted to string.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
text (str): The text content of the response
|
|
20
|
+
|
|
21
|
+
Example:
|
|
22
|
+
>>> response = Response("Hello, world!")
|
|
23
|
+
>>> print(response)
|
|
24
|
+
Hello, world!
|
|
25
|
+
>>> str(response)
|
|
26
|
+
'Hello, world!'
|
|
27
|
+
"""
|
|
28
|
+
def __init__(self, text: str):
|
|
29
|
+
self.text = text
|
|
30
|
+
|
|
31
|
+
def __str__(self):
|
|
32
|
+
return self.text
|
|
33
|
+
|
|
34
|
+
def __repr__(self):
|
|
35
|
+
return self.text
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class Genspark(AISearch):
|
|
39
|
+
"""A class to interact with the Genspark AI search API.
|
|
40
|
+
|
|
41
|
+
Genspark provides a powerful search interface that returns AI-generated responses
|
|
42
|
+
based on web content. It supports both streaming and non-streaming responses.
|
|
43
|
+
|
|
44
|
+
Basic Usage:
|
|
45
|
+
>>> from webscout import Genspark
|
|
46
|
+
>>> ai = Genspark()
|
|
47
|
+
>>> # Non-streaming example
|
|
48
|
+
>>> response = ai.search("What is Python?")
|
|
49
|
+
>>> print(response)
|
|
50
|
+
Python is a high-level programming language...
|
|
51
|
+
|
|
52
|
+
>>> # Streaming example
|
|
53
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
54
|
+
... print(chunk, end="", flush=True)
|
|
55
|
+
Artificial Intelligence is...
|
|
56
|
+
|
|
57
|
+
>>> # Raw response format
|
|
58
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
59
|
+
... print(chunk)
|
|
60
|
+
{'text': 'Hello'}
|
|
61
|
+
{'text': ' there!'}
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
65
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
66
|
+
max_tokens (int, optional): Maximum tokens to generate. Defaults to 600.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def __init__(
|
|
70
|
+
self,
|
|
71
|
+
timeout: int = 30,
|
|
72
|
+
proxies: Optional[dict] = None,
|
|
73
|
+
max_tokens: int = 600,
|
|
74
|
+
):
|
|
75
|
+
"""Initialize the Genspark API client.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
79
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
80
|
+
max_tokens (int, optional): Maximum tokens to generate. Defaults to 600.
|
|
81
|
+
"""
|
|
82
|
+
self.session = cloudscraper.create_scraper()
|
|
83
|
+
self.max_tokens = max_tokens
|
|
84
|
+
self.chat_endpoint = "https://www.genspark.ai/api/search/stream"
|
|
85
|
+
self.stream_chunk_size = 64
|
|
86
|
+
self.timeout = timeout
|
|
87
|
+
self.last_response = {}
|
|
88
|
+
|
|
89
|
+
self.headers = {
|
|
90
|
+
"Accept": "*/*",
|
|
91
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
92
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
93
|
+
"Content-Type": "application/json",
|
|
94
|
+
"DNT": "1",
|
|
95
|
+
"Origin": "https://www.genspark.ai",
|
|
96
|
+
"Priority": "u=1, i",
|
|
97
|
+
"Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
98
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
99
|
+
"Sec-CH-UA-Platform": '"Windows"',
|
|
100
|
+
"Sec-Fetch-Dest": "empty",
|
|
101
|
+
"Sec-Fetch-Mode": "cors",
|
|
102
|
+
"Sec-Fetch-Site": "same-origin",
|
|
103
|
+
"User-Agent": LitAgent().random(),
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
self.cookies = {
|
|
107
|
+
"i18n_redirected": "en-US",
|
|
108
|
+
"agree_terms": "0",
|
|
109
|
+
"session_id": uuid4().hex,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
self.session.headers.update(self.headers)
|
|
113
|
+
self.session.proxies = proxies or {}
|
|
114
|
+
|
|
115
|
+
def search(
|
|
116
|
+
self,
|
|
117
|
+
prompt: str,
|
|
118
|
+
stream: bool = False,
|
|
119
|
+
raw: bool = False,
|
|
120
|
+
) -> Union[Dict[str, Any], Generator[Union[Dict[str, Any], str], None, None]]:
|
|
121
|
+
"""Search using the Genspark API and get AI-generated responses.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
prompt (str): The search query or prompt to send to the API.
|
|
125
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
126
|
+
If False, returns complete response. Defaults to False.
|
|
127
|
+
raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
|
|
128
|
+
If False, returns Response objects that convert to text automatically.
|
|
129
|
+
Defaults to False.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Union[Dict[str, Any], Generator[Union[Dict[str, Any], str], None, None]]:
|
|
133
|
+
- If stream=False: Returns complete response
|
|
134
|
+
- If stream=True: Yields response chunks as they arrive
|
|
135
|
+
|
|
136
|
+
Raises:
|
|
137
|
+
APIConnectionError: If the API request fails
|
|
138
|
+
"""
|
|
139
|
+
url = f"https://www.genspark.ai/api/search/stream?query={prompt.replace(' ', '%20')}"
|
|
140
|
+
|
|
141
|
+
def for_stream():
|
|
142
|
+
try:
|
|
143
|
+
with self.session.post(
|
|
144
|
+
url,
|
|
145
|
+
headers=self.headers,
|
|
146
|
+
cookies=self.cookies,
|
|
147
|
+
json={}, # Empty payload as query is in URL
|
|
148
|
+
stream=True,
|
|
149
|
+
timeout=self.timeout,
|
|
150
|
+
) as response:
|
|
151
|
+
if not response.ok:
|
|
152
|
+
raise exceptions.APIConnectionError(
|
|
153
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
157
|
+
if line and line.startswith("data: "):
|
|
158
|
+
try:
|
|
159
|
+
data = json.loads(line[6:])
|
|
160
|
+
if "field_name" in data and "delta" in data:
|
|
161
|
+
if data["field_name"].startswith("streaming_detail_answer"):
|
|
162
|
+
delta_text = data.get("delta", "")
|
|
163
|
+
|
|
164
|
+
# Clean up markdown links in text
|
|
165
|
+
delta_text = re.sub(r"\[.*?\]\(.*?\)", "", delta_text)
|
|
166
|
+
|
|
167
|
+
if raw:
|
|
168
|
+
yield {"text": delta_text}
|
|
169
|
+
else:
|
|
170
|
+
yield Response(delta_text)
|
|
171
|
+
except json.JSONDecodeError:
|
|
172
|
+
continue
|
|
173
|
+
|
|
174
|
+
except cloudscraper.exceptions as e:
|
|
175
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
176
|
+
|
|
177
|
+
def for_non_stream():
|
|
178
|
+
full_response = ""
|
|
179
|
+
for chunk in for_stream():
|
|
180
|
+
if raw:
|
|
181
|
+
yield chunk
|
|
182
|
+
else:
|
|
183
|
+
full_response += str(chunk)
|
|
184
|
+
|
|
185
|
+
if not raw:
|
|
186
|
+
# Process the full response to clean up any JSON structures
|
|
187
|
+
try:
|
|
188
|
+
text_json = json.loads(full_response)
|
|
189
|
+
if isinstance(text_json, dict) and "detailAnswer" in text_json:
|
|
190
|
+
full_response = text_json.get("detailAnswer", full_response)
|
|
191
|
+
except (json.JSONDecodeError, TypeError):
|
|
192
|
+
# Not valid JSON or not a dictionary, keep as is
|
|
193
|
+
pass
|
|
194
|
+
|
|
195
|
+
self.last_response = Response(full_response)
|
|
196
|
+
return self.last_response
|
|
197
|
+
|
|
198
|
+
return for_stream() if stream else for_non_stream()
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
if __name__ == "__main__":
|
|
202
|
+
|
|
203
|
+
from rich import print
|
|
204
|
+
|
|
205
|
+
ai = Genspark()
|
|
206
|
+
response = ai.search(input(">>> "), stream=True, raw=False)
|
|
207
|
+
for chunk in response:
|
|
208
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from uuid import uuid4
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout import LitAgent
|
|
13
|
+
|
|
14
|
+
class AllenAI(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the AllenAI (Ai2 Playground) API.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
'tulu3-405b',
|
|
21
|
+
'OLMo-2-1124-13B-Instruct',
|
|
22
|
+
'tulu-3-1-8b',
|
|
23
|
+
'Llama-3-1-Tulu-3-70B',
|
|
24
|
+
'olmoe-0125'
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 2048,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
model: str = "tulu3-405b",
|
|
40
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
41
|
+
):
|
|
42
|
+
"""Initializes the AllenAI API client."""
|
|
43
|
+
if model not in self.AVAILABLE_MODELS:
|
|
44
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
45
|
+
|
|
46
|
+
self.url = "https://playground.allenai.org"
|
|
47
|
+
self.api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
|
|
48
|
+
|
|
49
|
+
# Use LitAgent for user-agent
|
|
50
|
+
self.headers = {
|
|
51
|
+
'User-Agent': LitAgent().random(),
|
|
52
|
+
'Accept': '*/*',
|
|
53
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
54
|
+
'Origin': self.url,
|
|
55
|
+
'Referer': f"{self.url}/",
|
|
56
|
+
'Connection': 'keep-alive',
|
|
57
|
+
'Cache-Control': 'no-cache',
|
|
58
|
+
'Pragma': 'no-cache',
|
|
59
|
+
'Sec-Fetch-Dest': 'empty',
|
|
60
|
+
'Sec-Fetch-Mode': 'cors',
|
|
61
|
+
'Sec-Fetch-Site': 'same-site',
|
|
62
|
+
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
63
|
+
'sec-ch-ua-mobile': '?0',
|
|
64
|
+
'sec-ch-ua-platform': '"Windows"'
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
self.session = requests.Session()
|
|
68
|
+
self.session.headers.update(self.headers)
|
|
69
|
+
self.session.proxies.update(proxies)
|
|
70
|
+
self.model = model
|
|
71
|
+
self.system_prompt = system_prompt
|
|
72
|
+
self.is_conversation = is_conversation
|
|
73
|
+
self.max_tokens_to_sample = max_tokens
|
|
74
|
+
self.timeout = timeout
|
|
75
|
+
self.last_response = {}
|
|
76
|
+
# Generate user ID if needed
|
|
77
|
+
self.x_anonymous_user_id = str(uuid4())
|
|
78
|
+
self.parent = None
|
|
79
|
+
|
|
80
|
+
self.__available_optimizers = (
|
|
81
|
+
method
|
|
82
|
+
for method in dir(Optimizers)
|
|
83
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
84
|
+
)
|
|
85
|
+
Conversation.intro = (
|
|
86
|
+
AwesomePrompts().get_act(
|
|
87
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
88
|
+
)
|
|
89
|
+
if act
|
|
90
|
+
else intro or Conversation.intro
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
self.conversation = Conversation(
|
|
94
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
95
|
+
)
|
|
96
|
+
self.conversation.history_offset = history_offset
|
|
97
|
+
|
|
98
|
+
def format_prompt(self, messages):
|
|
99
|
+
"""Format messages into a prompt string"""
|
|
100
|
+
formatted = []
|
|
101
|
+
for msg in messages:
|
|
102
|
+
role = msg.get("role", "")
|
|
103
|
+
content = msg.get("content", "")
|
|
104
|
+
if role == "system":
|
|
105
|
+
formatted.append(f"System: {content}")
|
|
106
|
+
elif role == "user":
|
|
107
|
+
formatted.append(f"User: {content}")
|
|
108
|
+
elif role == "assistant":
|
|
109
|
+
formatted.append(f"Assistant: {content}")
|
|
110
|
+
return "\n".join(formatted)
|
|
111
|
+
|
|
112
|
+
def ask(
|
|
113
|
+
self,
|
|
114
|
+
prompt: str,
|
|
115
|
+
stream: bool = False,
|
|
116
|
+
raw: bool = False,
|
|
117
|
+
optimizer: str = None,
|
|
118
|
+
conversationally: bool = False,
|
|
119
|
+
host: str = "inferd",
|
|
120
|
+
private: bool = True,
|
|
121
|
+
top_p: float = None,
|
|
122
|
+
temperature: float = None,
|
|
123
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
124
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
125
|
+
if optimizer:
|
|
126
|
+
if optimizer in self.__available_optimizers:
|
|
127
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
128
|
+
conversation_prompt if conversationally else prompt
|
|
129
|
+
)
|
|
130
|
+
else:
|
|
131
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
132
|
+
|
|
133
|
+
# Generate boundary for multipart form
|
|
134
|
+
boundary = f"----WebKitFormBoundary{uuid4().hex}"
|
|
135
|
+
|
|
136
|
+
# Set content-type header for this specific request
|
|
137
|
+
self.session.headers.update({
|
|
138
|
+
"content-type": f"multipart/form-data; boundary={boundary}",
|
|
139
|
+
"x-anonymous-user-id": self.x_anonymous_user_id
|
|
140
|
+
})
|
|
141
|
+
|
|
142
|
+
# Format messages for AllenAI
|
|
143
|
+
messages = [
|
|
144
|
+
{"role": "system", "content": self.system_prompt},
|
|
145
|
+
{"role": "user", "content": conversation_prompt}
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
# Build multipart form data
|
|
149
|
+
form_data = [
|
|
150
|
+
f'--{boundary}\r\n'
|
|
151
|
+
f'Content-Disposition: form-data; name="model"\r\n\r\n{self.model}\r\n',
|
|
152
|
+
|
|
153
|
+
f'--{boundary}\r\n'
|
|
154
|
+
f'Content-Disposition: form-data; name="host"\r\n\r\n{host}\r\n',
|
|
155
|
+
|
|
156
|
+
f'--{boundary}\r\n'
|
|
157
|
+
f'Content-Disposition: form-data; name="content"\r\n\r\n{self.format_prompt(messages)}\r\n',
|
|
158
|
+
|
|
159
|
+
f'--{boundary}\r\n'
|
|
160
|
+
f'Content-Disposition: form-data; name="private"\r\n\r\n{str(private).lower()}\r\n'
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
# Add parent if exists
|
|
164
|
+
if self.parent:
|
|
165
|
+
form_data.append(
|
|
166
|
+
f'--{boundary}\r\n'
|
|
167
|
+
f'Content-Disposition: form-data; name="parent"\r\n\r\n{self.parent}\r\n'
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Add optional parameters
|
|
171
|
+
if temperature is not None:
|
|
172
|
+
form_data.append(
|
|
173
|
+
f'--{boundary}\r\n'
|
|
174
|
+
f'Content-Disposition: form-data; name="temperature"\r\n\r\n{temperature}\r\n'
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if top_p is not None:
|
|
178
|
+
form_data.append(
|
|
179
|
+
f'--{boundary}\r\n'
|
|
180
|
+
f'Content-Disposition: form-data; name="top_p"\r\n\r\n{top_p}\r\n'
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
form_data.append(f'--{boundary}--\r\n')
|
|
184
|
+
data = "".join(form_data).encode()
|
|
185
|
+
|
|
186
|
+
def for_stream():
|
|
187
|
+
nonlocal data # Explicitly capture the data variable from outer scope
|
|
188
|
+
try:
|
|
189
|
+
response = self.session.post(
|
|
190
|
+
self.api_endpoint,
|
|
191
|
+
data=data,
|
|
192
|
+
stream=True,
|
|
193
|
+
timeout=self.timeout
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
if response.status_code != 200:
|
|
197
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
198
|
+
f"Request failed with status code {response.status_code}: {response.text}"
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
streaming_text = ""
|
|
202
|
+
current_parent = None
|
|
203
|
+
|
|
204
|
+
for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
|
|
205
|
+
if not chunk:
|
|
206
|
+
continue
|
|
207
|
+
|
|
208
|
+
decoded = chunk.decode(errors="ignore")
|
|
209
|
+
for line in decoded.splitlines():
|
|
210
|
+
line = line.strip()
|
|
211
|
+
if not line:
|
|
212
|
+
continue
|
|
213
|
+
|
|
214
|
+
try:
|
|
215
|
+
data = json.loads(line)
|
|
216
|
+
except json.JSONDecodeError:
|
|
217
|
+
continue
|
|
218
|
+
|
|
219
|
+
if isinstance(data, dict):
|
|
220
|
+
# Update the parent ID
|
|
221
|
+
if data.get("children"):
|
|
222
|
+
for child in data["children"]:
|
|
223
|
+
if child.get("role") == "assistant":
|
|
224
|
+
current_parent = child.get("id")
|
|
225
|
+
break
|
|
226
|
+
|
|
227
|
+
# Process content
|
|
228
|
+
if "message" in data and data.get("content"):
|
|
229
|
+
content = data["content"]
|
|
230
|
+
if content.strip():
|
|
231
|
+
streaming_text += content
|
|
232
|
+
resp = dict(text=content)
|
|
233
|
+
yield resp if raw else resp
|
|
234
|
+
|
|
235
|
+
# Handle completion
|
|
236
|
+
if data.get("final") or data.get("finish_reason") == "stop":
|
|
237
|
+
if current_parent:
|
|
238
|
+
self.parent = current_parent
|
|
239
|
+
|
|
240
|
+
# Update conversation history
|
|
241
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
242
|
+
self.last_response = {"text": streaming_text}
|
|
243
|
+
return
|
|
244
|
+
|
|
245
|
+
except requests.RequestException as e:
|
|
246
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
247
|
+
|
|
248
|
+
def for_non_stream():
|
|
249
|
+
streaming_text = ""
|
|
250
|
+
for resp in for_stream():
|
|
251
|
+
streaming_text += resp["text"]
|
|
252
|
+
self.last_response = {"text": streaming_text}
|
|
253
|
+
return self.last_response
|
|
254
|
+
|
|
255
|
+
return for_stream() if stream else for_non_stream()
|
|
256
|
+
|
|
257
|
+
def chat(
|
|
258
|
+
self,
|
|
259
|
+
prompt: str,
|
|
260
|
+
stream: bool = False,
|
|
261
|
+
optimizer: str = None,
|
|
262
|
+
conversationally: bool = False,
|
|
263
|
+
) -> str:
|
|
264
|
+
def for_stream():
|
|
265
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
266
|
+
yield self.get_message(response)
|
|
267
|
+
def for_non_stream():
|
|
268
|
+
return self.get_message(
|
|
269
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
270
|
+
)
|
|
271
|
+
return for_stream() if stream else for_non_stream()
|
|
272
|
+
|
|
273
|
+
def get_message(self, response: dict) -> str:
|
|
274
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
275
|
+
return response["text"]
|
|
276
|
+
|
|
277
|
+
if __name__ == "__main__":
|
|
278
|
+
from rich import print
|
|
279
|
+
ai = AllenAI(timeout=5000)
|
|
280
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
281
|
+
for chunk in response:
|
|
282
|
+
print(chunk, end="", flush=True)
|