webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -25,6 +25,8 @@ class Completions(BaseCompletions):
|
|
|
25
25
|
stream: bool = False,
|
|
26
26
|
temperature: Optional[float] = None,
|
|
27
27
|
top_p: Optional[float] = None,
|
|
28
|
+
timeout: Optional[int] = None,
|
|
29
|
+
proxies: Optional[dict] = None,
|
|
28
30
|
**kwargs: Any
|
|
29
31
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
30
32
|
payload = {
|
|
@@ -47,162 +49,180 @@ class Completions(BaseCompletions):
|
|
|
47
49
|
created_time = int(time.time())
|
|
48
50
|
|
|
49
51
|
if stream:
|
|
50
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
52
|
+
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
51
53
|
else:
|
|
52
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
54
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
53
55
|
|
|
54
56
|
def _create_stream(
|
|
55
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
57
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
58
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
56
59
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
60
|
+
original_proxies = self._client.session.proxies.copy()
|
|
61
|
+
if proxies is not None:
|
|
62
|
+
self._client.session.proxies = proxies
|
|
63
|
+
else:
|
|
64
|
+
self._client.session.proxies = {}
|
|
65
|
+
try:
|
|
66
|
+
session = self._client.session
|
|
67
|
+
headers = self._client.headers
|
|
68
|
+
# Step 1: Join the queue
|
|
69
|
+
join_resp = session.post(self._client.api_endpoint, headers=headers, json=payload, timeout=timeout if timeout is not None else self._client.timeout)
|
|
70
|
+
join_resp.raise_for_status()
|
|
71
|
+
event_id = join_resp.json().get('event_id')
|
|
72
|
+
session_hash = payload["session_hash"]
|
|
64
73
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
74
|
+
# Step 2: Stream data
|
|
75
|
+
params = {'session_hash': session_hash}
|
|
76
|
+
stream_resp = session.get(self._client.url + "/gradio_api/queue/data", headers=self._client.stream_headers, params=params, stream=True, timeout=timeout if timeout is not None else self._client.timeout)
|
|
77
|
+
stream_resp.raise_for_status()
|
|
69
78
|
|
|
70
|
-
|
|
71
|
-
|
|
79
|
+
# --- New logic to yield all content, tool reasoning, and status, similar to Reasoning class ---
|
|
80
|
+
is_thinking_tag_open = False # True if <think> has been yielded and not yet </think>
|
|
72
81
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
82
|
+
for line in stream_resp.iter_lines():
|
|
83
|
+
if line:
|
|
84
|
+
decoded_line = line.decode('utf-8')
|
|
85
|
+
if decoded_line.startswith('data: '):
|
|
86
|
+
try:
|
|
87
|
+
json_data = json.loads(decoded_line[6:])
|
|
88
|
+
if json_data.get('msg') == 'process_generating':
|
|
89
|
+
if 'output' in json_data and 'data' in json_data['output'] and len(json_data['output']['data']) > 5:
|
|
90
|
+
updates_list = json_data['output']['data'][5] # This is a list of operations
|
|
91
|
+
for op_details in updates_list:
|
|
92
|
+
action = op_details[0]
|
|
93
|
+
path = op_details[1]
|
|
94
|
+
value = op_details[2]
|
|
86
95
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
96
|
+
content_to_yield = None
|
|
97
|
+
is_current_op_tool = False
|
|
98
|
+
is_current_op_text = False
|
|
90
99
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
100
|
+
# Case 1: Adding a new content block (tool or text object)
|
|
101
|
+
if action == "add" and isinstance(value, dict) and "type" in value:
|
|
102
|
+
if len(path) == 4 and path[0] == "value" and path[2] == "content":
|
|
103
|
+
block_type = value.get("type")
|
|
104
|
+
content_to_yield = value.get("content")
|
|
105
|
+
if block_type == "tool":
|
|
106
|
+
is_current_op_tool = True
|
|
107
|
+
elif block_type == "text":
|
|
108
|
+
is_current_op_text = True
|
|
109
|
+
|
|
110
|
+
# Case 2: Appending content string to an existing block
|
|
111
|
+
elif action == "append" and isinstance(value, str):
|
|
112
|
+
if len(path) == 5 and path[0] == "value" and path[2] == "content" and path[4] == "content":
|
|
113
|
+
block_index = path[3] # 0 for tool's content, 1 for text's content
|
|
114
|
+
content_to_yield = value
|
|
115
|
+
if block_index == 0: # Appending to tool's content
|
|
116
|
+
is_current_op_tool = True
|
|
117
|
+
elif block_index == 1: # Appending to text's content
|
|
118
|
+
is_current_op_text = True
|
|
119
|
+
|
|
120
|
+
# Case 3: Tool status update (e.g., "End of Thought")
|
|
121
|
+
elif action == "replace" and len(path) == 6 and \
|
|
122
|
+
path[0] == "value" and path[2] == "content" and \
|
|
123
|
+
path[3] == 0 and path[4] == "options" and path[5] == "status": # path[3]==0 ensures it's the tool block
|
|
124
|
+
if value == "done": # Tool block processing is complete
|
|
125
|
+
if is_thinking_tag_open:
|
|
126
|
+
delta = ChoiceDelta(content="</think>\n\n", role="assistant")
|
|
127
|
+
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
128
|
+
is_thinking_tag_open = False
|
|
129
|
+
continue # This operation itself doesn't yield visible content
|
|
121
130
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
131
|
+
# Yielding logic
|
|
132
|
+
if is_current_op_tool and content_to_yield:
|
|
133
|
+
if not is_thinking_tag_open:
|
|
134
|
+
delta = ChoiceDelta(content="<think>", role="assistant")
|
|
135
|
+
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
136
|
+
is_thinking_tag_open = True
|
|
137
|
+
|
|
138
|
+
delta = ChoiceDelta(content=content_to_yield, role="assistant")
|
|
126
139
|
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
127
|
-
is_thinking_tag_open = True
|
|
128
|
-
|
|
129
|
-
delta = ChoiceDelta(content=content_to_yield, role="assistant")
|
|
130
|
-
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
131
140
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
141
|
+
elif is_current_op_text and content_to_yield:
|
|
142
|
+
if is_thinking_tag_open: # If text starts, close any open thinking tag
|
|
143
|
+
delta = ChoiceDelta(content="</think>", role="assistant")
|
|
144
|
+
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
145
|
+
is_thinking_tag_open = False
|
|
146
|
+
|
|
147
|
+
delta = ChoiceDelta(content=content_to_yield, role="assistant")
|
|
135
148
|
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
136
|
-
is_thinking_tag_open = False
|
|
137
|
-
|
|
138
|
-
delta = ChoiceDelta(content=content_to_yield, role="assistant")
|
|
139
|
-
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
140
149
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
150
|
+
if json_data.get('msg') == 'process_completed':
|
|
151
|
+
if is_thinking_tag_open: # Ensure </think> is yielded if process completes mid-thought
|
|
152
|
+
delta = ChoiceDelta(content="</think>", role="assistant")
|
|
153
|
+
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
154
|
+
is_thinking_tag_open = False
|
|
155
|
+
break
|
|
156
|
+
except json.JSONDecodeError:
|
|
157
|
+
continue
|
|
158
|
+
except Exception as e:
|
|
159
|
+
# Log or handle other potential exceptions
|
|
160
|
+
continue
|
|
161
|
+
|
|
162
|
+
# After the loop, ensure the tag is closed if the stream broke for reasons other than 'process_completed'
|
|
163
|
+
if is_thinking_tag_open:
|
|
164
|
+
delta = ChoiceDelta(content="</think>", role="assistant")
|
|
165
|
+
yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
|
|
166
|
+
finally:
|
|
167
|
+
self._client.session.proxies = original_proxies
|
|
157
168
|
|
|
158
169
|
def _create_non_stream(
|
|
159
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
170
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
171
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
160
172
|
) -> ChatCompletion:
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
resp = session.post(self._client.api_endpoint, headers=headers, json=payload, timeout=self._client.timeout)
|
|
165
|
-
resp.raise_for_status()
|
|
166
|
-
data = resp.json()
|
|
167
|
-
# Return the full content as a single message, including all tool and text reasoning if present
|
|
168
|
-
output = ""
|
|
169
|
-
if 'output' in data and 'data' in data['output'] and len(data['output']['data']) > 5:
|
|
170
|
-
updates = data['output']['data'][5]
|
|
171
|
-
parts = []
|
|
172
|
-
for update in updates:
|
|
173
|
-
if isinstance(update, list) and len(update) > 2 and isinstance(update[2], str):
|
|
174
|
-
parts.append(update[2])
|
|
175
|
-
elif isinstance(update, list) and isinstance(update[1], list) and len(update[1]) > 4:
|
|
176
|
-
if update[1][4] == "content":
|
|
177
|
-
parts.append(update[2])
|
|
178
|
-
elif update[1][4] == "options" and update[2] != "done":
|
|
179
|
-
parts.append(str(update[2]))
|
|
180
|
-
elif isinstance(update, dict):
|
|
181
|
-
if update.get('type') == 'tool':
|
|
182
|
-
parts.append(update.get('content', ''))
|
|
183
|
-
elif update.get('type') == 'text':
|
|
184
|
-
parts.append(update.get('content', ''))
|
|
185
|
-
output = "\n".join([str(p) for p in parts if p])
|
|
173
|
+
original_proxies = self._client.session.proxies.copy()
|
|
174
|
+
if proxies is not None:
|
|
175
|
+
self._client.session.proxies = proxies
|
|
186
176
|
else:
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
177
|
+
self._client.session.proxies = {}
|
|
178
|
+
try:
|
|
179
|
+
# For non-streaming, just call the join endpoint and parse the result
|
|
180
|
+
session = self._client.session
|
|
181
|
+
headers = self._client.headers
|
|
182
|
+
resp = session.post(self._client.api_endpoint, headers=headers, json=payload, timeout=timeout if timeout is not None else self._client.timeout)
|
|
183
|
+
resp.raise_for_status()
|
|
184
|
+
data = resp.json()
|
|
185
|
+
# Return the full content as a single message, including all tool and text reasoning if present
|
|
186
|
+
output = ""
|
|
187
|
+
if 'output' in data and 'data' in data['output'] and len(data['output']['data']) > 5:
|
|
188
|
+
updates = data['output']['data'][5]
|
|
189
|
+
parts = []
|
|
190
|
+
for update in updates:
|
|
191
|
+
if isinstance(update, list) and len(update) > 2 and isinstance(update[2], str):
|
|
192
|
+
parts.append(update[2])
|
|
193
|
+
elif isinstance(update, list) and isinstance(update[1], list) and len(update[1]) > 4:
|
|
194
|
+
if update[1][4] == "content":
|
|
195
|
+
parts.append(update[2])
|
|
196
|
+
elif update[1][4] == "options" and update[2] != "done":
|
|
197
|
+
parts.append(str(update[2]))
|
|
198
|
+
elif isinstance(update, dict):
|
|
199
|
+
if update.get('type') == 'tool':
|
|
200
|
+
parts.append(update.get('content', ''))
|
|
201
|
+
elif update.get('type') == 'text':
|
|
202
|
+
parts.append(update.get('content', ''))
|
|
203
|
+
output = "\n".join([str(p) for p in parts if p])
|
|
204
|
+
else:
|
|
205
|
+
output = data.get('output', {}).get('data', ["", "", "", "", "", [["", "", ""]]])[5][0][2]
|
|
206
|
+
message = ChatCompletionMessage(role="assistant", content=output)
|
|
207
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
208
|
+
# Use count_tokens to compute usage
|
|
209
|
+
prompt_tokens = count_tokens([m.get('content', '') for m in payload['data'] if isinstance(m, dict) and 'content' in m or isinstance(m, str)])
|
|
210
|
+
completion_tokens = count_tokens(output)
|
|
211
|
+
usage = CompletionUsage(
|
|
212
|
+
prompt_tokens=prompt_tokens,
|
|
213
|
+
completion_tokens=completion_tokens,
|
|
214
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
215
|
+
)
|
|
216
|
+
completion = ChatCompletion(
|
|
217
|
+
id=request_id,
|
|
218
|
+
choices=[choice],
|
|
219
|
+
created=created_time,
|
|
220
|
+
model=model,
|
|
221
|
+
usage=usage,
|
|
222
|
+
)
|
|
223
|
+
return completion
|
|
224
|
+
finally:
|
|
225
|
+
self._client.session.proxies = original_proxies
|
|
206
226
|
|
|
207
227
|
class Chat(BaseChat):
|
|
208
228
|
def __init__(self, client: 'Qwen3'):
|
|
@@ -231,9 +251,10 @@ class Qwen3(OpenAICompatibleProvider):
|
|
|
231
251
|
"qwen-3-0.6b": "qwen3-0.6b"
|
|
232
252
|
}
|
|
233
253
|
|
|
234
|
-
def __init__(self
|
|
235
|
-
self.timeout =
|
|
254
|
+
def __init__(self):
|
|
255
|
+
self.timeout = 30
|
|
236
256
|
self.session = requests.Session()
|
|
257
|
+
self.session.proxies = {}
|
|
237
258
|
self.headers = {
|
|
238
259
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
|
|
239
260
|
'Accept': '*/*',
|