webscout 8.3__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +4 -4
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/LambdaChat.py +7 -1
- webscout/Provider/OPENAI/BLACKBOXAI.py +1049 -1017
- webscout/Provider/OPENAI/Qwen3.py +303 -303
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/__init__.py +2 -1
- webscout/Provider/OPENAI/api.py +298 -13
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +89 -12
- webscout/Provider/OPENAI/chatgpt.py +15 -2
- webscout/Provider/OPENAI/chatgptclone.py +14 -3
- webscout/Provider/OPENAI/deepinfra.py +339 -328
- webscout/Provider/OPENAI/e2b.py +295 -73
- webscout/Provider/OPENAI/opkfc.py +18 -6
- webscout/Provider/OPENAI/scirachat.py +3 -2
- webscout/Provider/OPENAI/toolbaz.py +0 -1
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +367 -367
- webscout/Provider/OPENAI/yep.py +383 -383
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -0
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/METADATA +1 -1
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/RECORD +61 -51
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/WHEEL +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
+
|
|
3
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
4
|
+
from webscout.Provider.OPENAI.utils import (
|
|
5
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
6
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
import requests
|
|
10
|
+
import uuid
|
|
11
|
+
import time
|
|
12
|
+
import json
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
class Completions(BaseCompletions):
|
|
16
|
+
def __init__(self, client: 'TogetherAI'):
|
|
17
|
+
self._client = client
|
|
18
|
+
|
|
19
|
+
def create(
|
|
20
|
+
self,
|
|
21
|
+
*,
|
|
22
|
+
model: str,
|
|
23
|
+
messages: List[Dict[str, str]],
|
|
24
|
+
max_tokens: Optional[int] = None,
|
|
25
|
+
stream: bool = False,
|
|
26
|
+
temperature: Optional[float] = None,
|
|
27
|
+
top_p: Optional[float] = None,
|
|
28
|
+
timeout: Optional[int] = None,
|
|
29
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
30
|
+
stop: Optional[Union[str, List[str]]] = None,
|
|
31
|
+
**kwargs: Any
|
|
32
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
33
|
+
"""
|
|
34
|
+
Creates a model response for the given chat conversation.
|
|
35
|
+
Mimics openai.chat.completions.create
|
|
36
|
+
"""
|
|
37
|
+
# Get API key if not already set
|
|
38
|
+
if not self._client.headers.get("Authorization"):
|
|
39
|
+
api_key = self._client.get_activation_key()
|
|
40
|
+
self._client.headers["Authorization"] = f"Bearer {api_key}"
|
|
41
|
+
self._client.session.headers.update(self._client.headers)
|
|
42
|
+
|
|
43
|
+
model_name = self._client.convert_model_name(model)
|
|
44
|
+
payload = {
|
|
45
|
+
"model": model_name,
|
|
46
|
+
"messages": messages,
|
|
47
|
+
"stream": stream,
|
|
48
|
+
}
|
|
49
|
+
if max_tokens is not None:
|
|
50
|
+
payload["max_tokens"] = max_tokens
|
|
51
|
+
if temperature is not None:
|
|
52
|
+
payload["temperature"] = temperature
|
|
53
|
+
if top_p is not None:
|
|
54
|
+
payload["top_p"] = top_p
|
|
55
|
+
if stop is not None:
|
|
56
|
+
payload["stop"] = stop
|
|
57
|
+
payload.update(kwargs)
|
|
58
|
+
|
|
59
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
60
|
+
created_time = int(time.time())
|
|
61
|
+
|
|
62
|
+
if stream:
|
|
63
|
+
return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
|
|
64
|
+
else:
|
|
65
|
+
return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
|
|
66
|
+
|
|
67
|
+
def _create_stream(
|
|
68
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
69
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
70
|
+
try:
|
|
71
|
+
response = self._client.session.post(
|
|
72
|
+
self._client.api_endpoint,
|
|
73
|
+
headers=self._client.headers,
|
|
74
|
+
json=payload,
|
|
75
|
+
stream=True,
|
|
76
|
+
timeout=timeout or self._client.timeout,
|
|
77
|
+
proxies=proxies
|
|
78
|
+
)
|
|
79
|
+
response.raise_for_status()
|
|
80
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
81
|
+
completion_tokens = 0
|
|
82
|
+
total_tokens = prompt_tokens
|
|
83
|
+
|
|
84
|
+
for line in response.iter_lines():
|
|
85
|
+
if line:
|
|
86
|
+
line = line.decode('utf-8')
|
|
87
|
+
if line.startswith('data: '):
|
|
88
|
+
line = line[6:]
|
|
89
|
+
if line.strip() == '[DONE]':
|
|
90
|
+
break
|
|
91
|
+
try:
|
|
92
|
+
chunk_data = json.loads(line)
|
|
93
|
+
if 'choices' in chunk_data and chunk_data['choices']:
|
|
94
|
+
delta = chunk_data['choices'][0].get('delta', {})
|
|
95
|
+
content = delta.get('content')
|
|
96
|
+
if content:
|
|
97
|
+
completion_tokens += count_tokens(content)
|
|
98
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
99
|
+
choice_delta = ChoiceDelta(
|
|
100
|
+
content=content,
|
|
101
|
+
role=delta.get('role', 'assistant'),
|
|
102
|
+
tool_calls=delta.get('tool_calls')
|
|
103
|
+
)
|
|
104
|
+
choice = Choice(
|
|
105
|
+
index=0,
|
|
106
|
+
delta=choice_delta,
|
|
107
|
+
finish_reason=None,
|
|
108
|
+
logprobs=None
|
|
109
|
+
)
|
|
110
|
+
chunk = ChatCompletionChunk(
|
|
111
|
+
id=request_id,
|
|
112
|
+
choices=[choice],
|
|
113
|
+
created=created_time,
|
|
114
|
+
model=model
|
|
115
|
+
)
|
|
116
|
+
chunk.usage = {
|
|
117
|
+
"prompt_tokens": prompt_tokens,
|
|
118
|
+
"completion_tokens": completion_tokens,
|
|
119
|
+
"total_tokens": total_tokens,
|
|
120
|
+
"estimated_cost": None
|
|
121
|
+
}
|
|
122
|
+
yield chunk
|
|
123
|
+
except Exception:
|
|
124
|
+
continue
|
|
125
|
+
|
|
126
|
+
# Final chunk with finish_reason="stop"
|
|
127
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
128
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
129
|
+
chunk = ChatCompletionChunk(
|
|
130
|
+
id=request_id,
|
|
131
|
+
choices=[choice],
|
|
132
|
+
created=created_time,
|
|
133
|
+
model=model
|
|
134
|
+
)
|
|
135
|
+
chunk.usage = {
|
|
136
|
+
"prompt_tokens": prompt_tokens,
|
|
137
|
+
"completion_tokens": completion_tokens,
|
|
138
|
+
"total_tokens": total_tokens,
|
|
139
|
+
"estimated_cost": None
|
|
140
|
+
}
|
|
141
|
+
yield chunk
|
|
142
|
+
except Exception as e:
|
|
143
|
+
raise IOError(f"TogetherAI stream request failed: {e}") from e
|
|
144
|
+
|
|
145
|
+
def _create_non_stream(
|
|
146
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
147
|
+
) -> ChatCompletion:
|
|
148
|
+
try:
|
|
149
|
+
payload_copy = payload.copy()
|
|
150
|
+
payload_copy["stream"] = False
|
|
151
|
+
response = self._client.session.post(
|
|
152
|
+
self._client.api_endpoint,
|
|
153
|
+
headers=self._client.headers,
|
|
154
|
+
json=payload_copy,
|
|
155
|
+
timeout=timeout or self._client.timeout,
|
|
156
|
+
proxies=proxies
|
|
157
|
+
)
|
|
158
|
+
response.raise_for_status()
|
|
159
|
+
data = response.json()
|
|
160
|
+
|
|
161
|
+
full_text = ""
|
|
162
|
+
finish_reason = "stop"
|
|
163
|
+
if 'choices' in data and data['choices']:
|
|
164
|
+
full_text = data['choices'][0]['message']['content']
|
|
165
|
+
finish_reason = data['choices'][0].get('finish_reason', 'stop')
|
|
166
|
+
|
|
167
|
+
message = ChatCompletionMessage(
|
|
168
|
+
role="assistant",
|
|
169
|
+
content=full_text
|
|
170
|
+
)
|
|
171
|
+
choice = Choice(
|
|
172
|
+
index=0,
|
|
173
|
+
message=message,
|
|
174
|
+
finish_reason=finish_reason
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
178
|
+
completion_tokens = count_tokens(full_text)
|
|
179
|
+
usage = CompletionUsage(
|
|
180
|
+
prompt_tokens=prompt_tokens,
|
|
181
|
+
completion_tokens=completion_tokens,
|
|
182
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
completion = ChatCompletion(
|
|
186
|
+
id=request_id,
|
|
187
|
+
choices=[choice],
|
|
188
|
+
created=created_time,
|
|
189
|
+
model=model,
|
|
190
|
+
usage=usage,
|
|
191
|
+
)
|
|
192
|
+
return completion
|
|
193
|
+
except Exception as e:
|
|
194
|
+
raise IOError(f"TogetherAI non-stream request failed: {e}") from e
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class Chat(BaseChat):
|
|
198
|
+
def __init__(self, client: 'TogetherAI'):
|
|
199
|
+
self.completions = Completions(client)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class TogetherAI(OpenAICompatibleProvider):
|
|
203
|
+
"""
|
|
204
|
+
OpenAI-compatible client for TogetherAI API.
|
|
205
|
+
"""
|
|
206
|
+
class TogetherAI(OpenAICompatibleProvider):
|
|
207
|
+
"""
|
|
208
|
+
OpenAI-compatible client for TogetherAI API.
|
|
209
|
+
"""
|
|
210
|
+
AVAILABLE_MODELS = [
|
|
211
|
+
"Gryphe/MythoMax-L2-13b",
|
|
212
|
+
"Gryphe/MythoMax-L2-13b-Lite",
|
|
213
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
214
|
+
"Qwen/QwQ-32B",
|
|
215
|
+
"Qwen/Qwen2-72B-Instruct",
|
|
216
|
+
"Qwen/Qwen2-VL-72B-Instruct",
|
|
217
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
218
|
+
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
219
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
220
|
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
221
|
+
"Qwen/Qwen3-235B-A22B-fp8",
|
|
222
|
+
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
223
|
+
"Rrrr/meta-llama/Llama-3-70b-chat-hf-6f9ad551",
|
|
224
|
+
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
|
|
225
|
+
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
|
|
226
|
+
"arcee-ai/arcee-blitz",
|
|
227
|
+
"arcee-ai/caller",
|
|
228
|
+
"arcee-ai/coder-large",
|
|
229
|
+
"arcee-ai/maestro-reasoning",
|
|
230
|
+
"arcee-ai/virtuoso-large",
|
|
231
|
+
"arcee-ai/virtuoso-medium-v2",
|
|
232
|
+
"arcee_ai/arcee-spotlight",
|
|
233
|
+
"blackbox/meta-llama-3-1-8b",
|
|
234
|
+
"deepseek-ai/DeepSeek-R1",
|
|
235
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
236
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
237
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
238
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
239
|
+
"deepseek-ai/DeepSeek-V3",
|
|
240
|
+
"deepseek-ai/DeepSeek-V3-p-dp",
|
|
241
|
+
"google/gemma-2-27b-it",
|
|
242
|
+
"google/gemma-2b-it",
|
|
243
|
+
"lgai/exaone-3-5-32b-instruct",
|
|
244
|
+
"lgai/exaone-deep-32b",
|
|
245
|
+
"marin-community/marin-8b-instruct",
|
|
246
|
+
"meta-llama/Llama-3-70b-chat-hf",
|
|
247
|
+
"meta-llama/Llama-3-8b-chat-hf",
|
|
248
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
249
|
+
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
250
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
251
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
252
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
253
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
254
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
255
|
+
"meta-llama/Llama-Vision-Free",
|
|
256
|
+
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
|
257
|
+
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
|
258
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
259
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
260
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
261
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
|
262
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
263
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
264
|
+
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
265
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
266
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
267
|
+
"perplexity-ai/r1-1776",
|
|
268
|
+
"roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
|
|
269
|
+
"roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
|
|
270
|
+
"roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
|
|
271
|
+
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
272
|
+
"scb10x/scb10x-llama3-1-typhoon2-8b-instruct",
|
|
273
|
+
"togethercomputer/MoA-1",
|
|
274
|
+
"togethercomputer/MoA-1-Turbo",
|
|
275
|
+
"togethercomputer/Refuel-Llm-V2",
|
|
276
|
+
"togethercomputer/Refuel-Llm-V2-Small",
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
def __init__(self, browser: str = "chrome"):
|
|
280
|
+
self.timeout = 60
|
|
281
|
+
self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
|
|
282
|
+
self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
|
|
283
|
+
self.session = requests.Session()
|
|
284
|
+
self.headers = LitAgent().generate_fingerprint(browser=browser)
|
|
285
|
+
self.session.headers.update(self.headers)
|
|
286
|
+
self.chat = Chat(self)
|
|
287
|
+
self._api_key_cache = None
|
|
288
|
+
|
|
289
|
+
@property
|
|
290
|
+
def models(self):
|
|
291
|
+
class _ModelList:
|
|
292
|
+
def list(inner_self):
|
|
293
|
+
return TogetherAI.AVAILABLE_MODELS
|
|
294
|
+
return _ModelList()
|
|
295
|
+
|
|
296
|
+
def get_activation_key(self) -> str:
|
|
297
|
+
"""Get API key from activation endpoint"""
|
|
298
|
+
if self._api_key_cache:
|
|
299
|
+
return self._api_key_cache
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
response = requests.get(
|
|
303
|
+
self.activation_endpoint,
|
|
304
|
+
headers={"Accept": "application/json"},
|
|
305
|
+
timeout=30
|
|
306
|
+
)
|
|
307
|
+
response.raise_for_status()
|
|
308
|
+
activation_data = response.json()
|
|
309
|
+
self._api_key_cache = activation_data["openAIParams"]["apiKey"]
|
|
310
|
+
return self._api_key_cache
|
|
311
|
+
except Exception as e:
|
|
312
|
+
raise Exception(f"Failed to get activation key: {e}")
|
|
313
|
+
|
|
314
|
+
def convert_model_name(self, model: str) -> str:
|
|
315
|
+
"""Convert model name - returns model if valid, otherwise default"""
|
|
316
|
+
if model in self.AVAILABLE_MODELS:
|
|
317
|
+
return model
|
|
318
|
+
|
|
319
|
+
# Default to first available model if not found
|
|
320
|
+
return self.AVAILABLE_MODELS[0]
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
if __name__ == "__main__":
|
|
324
|
+
from rich import print
|
|
325
|
+
|
|
326
|
+
client = TogetherAI()
|
|
327
|
+
messages = [
|
|
328
|
+
{"role": "user", "content": "Hello, how are you?"},
|
|
329
|
+
{"role": "assistant", "content": "I'm fine, thank you! How can I help you today?"},
|
|
330
|
+
{"role": "user", "content": "Tell me a short joke."}
|
|
331
|
+
]
|
|
332
|
+
|
|
333
|
+
# Non-streaming example
|
|
334
|
+
response = client.chat.completions.create(
|
|
335
|
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
336
|
+
messages=messages,
|
|
337
|
+
max_tokens=50,
|
|
338
|
+
stream=False
|
|
339
|
+
)
|
|
340
|
+
print("Non-streaming response:")
|
|
341
|
+
print(response)
|
|
342
|
+
|
|
343
|
+
# Streaming example
|
|
344
|
+
print("\nStreaming response:")
|
|
345
|
+
stream = client.chat.completions.create(
|
|
346
|
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
347
|
+
messages=messages,
|
|
348
|
+
max_tokens=50,
|
|
349
|
+
stream=True
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
for chunk in stream:
|
|
353
|
+
if chunk.choices[0].delta.content:
|
|
354
|
+
print(chunk.choices[0].delta.content, end="")
|
|
355
|
+
print()
|