webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -8,12 +8,11 @@ from webscout.Litlogger import Logger, LogLevel
8
8
 
9
9
  logger = Logger(name="OpenAIBase", level=LogLevel.INFO)
10
10
 
11
- # Import the ProxyAutoMeta metaclass
11
+ # Import the LitMeta metaclass from Litproxy
12
12
  try:
13
- from .autoproxy import ProxyAutoMeta
13
+ from litproxy import LitMeta
14
14
  except ImportError:
15
- # Fallback if autoproxy is not available
16
- ProxyAutoMeta = type
15
+ from .autoproxy import ProxyAutoMeta as LitMeta
17
16
 
18
17
 
19
18
  # Import the utils for response structures
@@ -183,81 +182,11 @@ class BaseChat(ABC):
183
182
  completions: BaseCompletions
184
183
 
185
184
 
186
- # class ProxyAutoMeta(ABCMeta):
187
- # """
188
- # Metaclass to ensure all OpenAICompatibleProvider subclasses automatically get proxy support.
189
- # This will inject proxies into any requests.Session, httpx.Client, or curl_cffi session attributes found on the instance.
190
-
191
- # To disable automatic proxy injection, set disable_auto_proxy=True in the constructor or
192
- # set the class attribute DISABLE_AUTO_PROXY = True.
193
- # """
194
- # def __call__(cls, *args, **kwargs):
195
- # instance = super().__call__(*args, **kwargs)
196
-
197
- # # Check if auto proxy is disabled
198
- # disable_auto_proxy = kwargs.get('disable_auto_proxy', False) or getattr(cls, 'DISABLE_AUTO_PROXY', False)
199
-
200
- # proxies = getattr(instance, 'proxies', None) or kwargs.get('proxies', None)
201
- # if proxies is None and not disable_auto_proxy:
202
- # try:
203
- # proxies = {"http": get_auto_proxy(), "https": get_auto_proxy()}
204
- # except Exception as e:
205
- # logger.warning(f"Failed to get auto proxy, disabling proxy support: {e}")
206
- # proxies = {}
207
- # elif proxies is None:
208
- # proxies = {}
209
- # instance.proxies = proxies
210
- # # Patch sessions if we have valid proxies
211
- # if proxies:
212
- # for attr in dir(instance):
213
- # obj = getattr(instance, attr)
214
- # if isinstance(obj, requests.Session):
215
- # obj.proxies.update(proxies)
216
- # if httpx and isinstance(obj, httpx.Client):
217
- # try:
218
- # obj._proxies = proxies
219
- # except Exception:
220
- # pass
221
- # # Patch curl_cffi sessions if present
222
- # if CurlSession and isinstance(obj, CurlSession):
223
- # try:
224
- # obj.proxies.update(proxies)
225
- # except Exception:
226
- # pass
227
- # if CurlAsyncSession and isinstance(obj, CurlAsyncSession):
228
- # try:
229
- # obj.proxies.update(proxies)
230
- # except Exception:
231
- # pass
232
- # # Provide helpers for proxied sessions
233
- # def get_proxied_session():
234
- # s = requests.Session()
235
- # s.proxies.update(proxies)
236
- # return s
237
- # instance.get_proxied_session = get_proxied_session
238
-
239
- # def get_proxied_curl_session(impersonate="chrome120", **kwargs):
240
- # """Get a curl_cffi Session with proxies configured"""
241
- # if CurlSession:
242
- # return CurlSession(proxies=proxies, impersonate=impersonate, **kwargs)
243
- # else:
244
- # raise ImportError("curl_cffi is not installed")
245
- # instance.get_proxied_curl_session = get_proxied_curl_session
246
-
247
- # def get_proxied_curl_async_session(impersonate="chrome120", **kwargs):
248
- # """Get a curl_cffi AsyncSession with proxies configured"""
249
- # if CurlAsyncSession:
250
- # return CurlAsyncSession(proxies=proxies, impersonate=impersonate, **kwargs)
251
- # else:
252
- # raise ImportError("curl_cffi is not installed")
253
- # instance.get_proxied_curl_async_session = get_proxied_curl_async_session
254
-
255
- # return instance
256
- class OpenAICompatibleProvider(ABC, metaclass=ProxyAutoMeta):
185
+ class OpenAICompatibleProvider(ABC, metaclass=LitMeta):
257
186
  """
258
187
  Abstract Base Class for providers mimicking the OpenAI Python client structure.
259
188
  Requires a nested 'chat.completions' structure with tool support.
260
- All subclasses automatically get proxy support via ProxyAutoMeta.
189
+ All subclasses automatically get proxy support via LitMeta.
261
190
 
262
191
  # Available proxy helpers:
263
192
  # - self.get_proxied_session() - returns a requests.Session with proxies
@@ -269,6 +198,8 @@ class OpenAICompatibleProvider(ABC, metaclass=ProxyAutoMeta):
269
198
  # - httpx.Client objects
270
199
  # - curl_cffi.requests.Session objects
271
200
  # - curl_cffi.requests.AsyncSession objects
201
+ #
202
+ # Inbuilt auto-retry is also enabled for all requests.Session and curl_cffi.Session objects.
272
203
  """
273
204
  chat: BaseChat
274
205
  available_tools: Dict[str, Tool] = {} # Dictionary of available tools
@@ -2,23 +2,20 @@ import requests
2
2
  import json
3
3
  import time
4
4
  import uuid
5
+ import collections
5
6
  from typing import List, Dict, Optional, Union, Generator, Any
6
7
 
7
- # Import base classes and utility structures
8
8
  from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
9
  from webscout.Provider.OPENAI.utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
11
  ChatCompletionMessage, CompletionUsage
12
12
  )
13
13
 
14
- # Attempt to import LitAgent, fallback if not available
15
14
  try:
16
15
  from webscout.litagent import LitAgent
17
16
  except ImportError:
18
17
  pass
19
18
 
20
- # --- DeepInfra Client ---
21
-
22
19
  class Completions(BaseCompletions):
23
20
  def __init__(self, client: 'DeepInfra'):
24
21
  self._client = client
@@ -36,10 +33,6 @@ class Completions(BaseCompletions):
36
33
  proxies: Optional[Dict[str, str]] = None,
37
34
  **kwargs: Any
38
35
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
- """
40
- Creates a model response for the given chat conversation.
41
- Mimics openai.chat.completions.create
42
- """
43
36
  payload = {
44
37
  "model": model,
45
38
  "messages": messages,
@@ -50,12 +43,9 @@ class Completions(BaseCompletions):
50
43
  payload["temperature"] = temperature
51
44
  if top_p is not None:
52
45
  payload["top_p"] = top_p
53
-
54
46
  payload.update(kwargs)
55
-
56
47
  request_id = f"chatcmpl-{uuid.uuid4()}"
57
48
  created_time = int(time.time())
58
-
59
49
  if stream:
60
50
  return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
61
51
  else:
@@ -75,52 +65,39 @@ class Completions(BaseCompletions):
75
65
  proxies=proxies
76
66
  )
77
67
  response.raise_for_status()
78
-
79
- # Track token usage across chunks
80
68
  prompt_tokens = 0
81
69
  completion_tokens = 0
82
70
  total_tokens = 0
83
-
84
- for line in response.iter_lines():
71
+ for line in response.iter_lines(decode_unicode=True):
85
72
  if line:
86
- decoded_line = line.decode('utf-8').strip()
87
-
88
- if decoded_line.startswith("data: "):
89
- json_str = decoded_line[6:]
73
+ if line.startswith("data: "):
74
+ json_str = line[6:]
90
75
  if json_str == "[DONE]":
91
- # Format the final [DONE] marker in OpenAI format
92
- # print("data: [DONE]")
93
76
  break
94
-
95
77
  try:
96
78
  data = json.loads(json_str)
97
79
  choice_data = data.get('choices', [{}])[0]
98
80
  delta_data = choice_data.get('delta', {})
99
81
  finish_reason = choice_data.get('finish_reason')
100
-
101
- # Update token counts if available
102
82
  usage_data = data.get('usage', {})
103
83
  if usage_data:
104
84
  prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
105
85
  completion_tokens = usage_data.get('completion_tokens', completion_tokens)
106
86
  total_tokens = usage_data.get('total_tokens', total_tokens)
107
-
108
- # Create the delta object
87
+ if delta_data.get('content'):
88
+ completion_tokens += 1
89
+ total_tokens = prompt_tokens + completion_tokens
109
90
  delta = ChoiceDelta(
110
91
  content=delta_data.get('content'),
111
92
  role=delta_data.get('role'),
112
93
  tool_calls=delta_data.get('tool_calls')
113
94
  )
114
-
115
- # Create the choice object
116
95
  choice = Choice(
117
96
  index=choice_data.get('index', 0),
118
97
  delta=delta,
119
98
  finish_reason=finish_reason,
120
99
  logprobs=choice_data.get('logprobs')
121
100
  )
122
-
123
- # Create the chunk object
124
101
  chunk = ChatCompletionChunk(
125
102
  id=request_id,
126
103
  choices=[choice],
@@ -128,48 +105,35 @@ class Completions(BaseCompletions):
128
105
  model=model,
129
106
  system_fingerprint=data.get('system_fingerprint')
130
107
  )
131
-
132
- # Convert chunk to dict using Pydantic's API
133
- if hasattr(chunk, "model_dump"):
134
- chunk_dict = chunk.model_dump(exclude_none=True)
135
- else:
136
- chunk_dict = chunk.dict(exclude_none=True)
137
-
138
- # Add usage information to match OpenAI format
139
- # Even if we don't have real token counts, include estimated usage
140
- # This matches the format in the examples
141
- usage_dict = {
142
- "prompt_tokens": prompt_tokens or 10,
143
- "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
144
- "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
108
+ chunk.usage = {
109
+ "prompt_tokens": prompt_tokens,
110
+ "completion_tokens": completion_tokens,
111
+ "total_tokens": total_tokens,
145
112
  "estimated_cost": None
146
113
  }
147
-
148
- # Update completion_tokens and total_tokens as we receive more content
149
- if delta_data.get('content'):
150
- completion_tokens += 1
151
- total_tokens = prompt_tokens + completion_tokens
152
- usage_dict["completion_tokens"] = completion_tokens
153
- usage_dict["total_tokens"] = total_tokens
154
-
155
- chunk_dict["usage"] = usage_dict
156
-
157
- # Format the response in OpenAI format exactly as requested
158
- # We need to print the raw string and also yield the chunk object
159
- # This ensures both the console output and the returned object are correct
160
- # print(f"data: {json.dumps(chunk_dict)}")
161
-
162
- # Return the chunk object for internal processing
163
114
  yield chunk
164
115
  except json.JSONDecodeError:
165
- print(f"Warning: Could not decode JSON line: {json_str}")
166
116
  continue
167
- except requests.exceptions.RequestException as e:
117
+ # Final chunk with finish_reason="stop"
118
+ delta = ChoiceDelta(content=None, role=None, tool_calls=None)
119
+ choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
120
+ chunk = ChatCompletionChunk(
121
+ id=request_id,
122
+ choices=[choice],
123
+ created=created_time,
124
+ model=model,
125
+ system_fingerprint=None
126
+ )
127
+ chunk.usage = {
128
+ "prompt_tokens": prompt_tokens,
129
+ "completion_tokens": completion_tokens,
130
+ "total_tokens": total_tokens,
131
+ "estimated_cost": None
132
+ }
133
+ yield chunk
134
+ except Exception as e:
168
135
  print(f"Error during DeepInfra stream request: {e}")
169
136
  raise IOError(f"DeepInfra request failed: {e}") from e
170
- except Exception as e:
171
- print(f"Error processing DeepInfra stream: {e}")
172
- raise
173
137
 
174
138
  def _create_non_stream(
175
139
  self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
@@ -185,13 +149,19 @@ class Completions(BaseCompletions):
185
149
  )
186
150
  response.raise_for_status()
187
151
  data = response.json()
188
-
189
152
  choices_data = data.get('choices', [])
190
153
  usage_data = data.get('usage', {})
191
-
192
154
  choices = []
193
155
  for choice_d in choices_data:
194
- message_d = choice_d.get('message', {})
156
+ message_d = choice_d.get('message')
157
+ if not message_d and 'delta' in choice_d:
158
+ delta = choice_d['delta']
159
+ message_d = {
160
+ 'role': delta.get('role', 'assistant'),
161
+ 'content': delta.get('content', '')
162
+ }
163
+ if not message_d:
164
+ message_d = {'role': 'assistant', 'content': ''}
195
165
  message = ChatCompletionMessage(
196
166
  role=message_d.get('role', 'assistant'),
197
167
  content=message_d.get('content', '')
@@ -202,13 +172,11 @@ class Completions(BaseCompletions):
202
172
  finish_reason=choice_d.get('finish_reason', 'stop')
203
173
  )
204
174
  choices.append(choice)
205
-
206
175
  usage = CompletionUsage(
207
176
  prompt_tokens=usage_data.get('prompt_tokens', 0),
208
177
  completion_tokens=usage_data.get('completion_tokens', 0),
209
178
  total_tokens=usage_data.get('total_tokens', 0)
210
179
  )
211
-
212
180
  completion = ChatCompletion(
213
181
  id=request_id,
214
182
  choices=choices,
@@ -217,22 +185,16 @@ class Completions(BaseCompletions):
217
185
  usage=usage,
218
186
  )
219
187
  return completion
220
-
221
- except requests.exceptions.RequestException as e:
188
+ except Exception as e:
222
189
  print(f"Error during DeepInfra non-stream request: {e}")
223
190
  raise IOError(f"DeepInfra request failed: {e}") from e
224
- except Exception as e:
225
- print(f"Error processing DeepInfra response: {e}")
226
- raise
227
191
 
228
192
  class Chat(BaseChat):
229
193
  def __init__(self, client: 'DeepInfra'):
230
194
  self.completions = Completions(client)
231
195
 
232
196
  class DeepInfra(OpenAICompatibleProvider):
233
-
234
197
  AVAILABLE_MODELS = [
235
- # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
236
198
  "deepseek-ai/DeepSeek-R1-0528",
237
199
  "deepseek-ai/DeepSeek-R1",
238
200
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
@@ -265,39 +227,13 @@ class DeepInfra(OpenAICompatibleProvider):
265
227
  "Qwen/Qwen3-30B-A3B",
266
228
  "Qwen/Qwen3-32B",
267
229
  "Qwen/Qwen3-235B-A22B",
268
- # "google/gemini-1.5-flash", # >>>> NOT WORKING
269
- # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
270
- # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
271
-
272
- # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
273
-
274
- # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
275
- # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
276
- # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
277
- # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
278
- # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
279
- # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
280
- # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
281
- # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
282
- # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
283
- # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
284
- # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
285
- # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
286
- # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
287
- # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
288
- # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
289
- # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
290
- # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
291
230
  ]
292
-
293
231
  def __init__(self, browser: str = "chrome"):
294
- self.timeout = None # Default timeout
232
+ self.timeout = None
295
233
  self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
296
234
  self.session = requests.Session()
297
-
298
235
  agent = LitAgent()
299
236
  fingerprint = agent.generate_fingerprint(browser)
300
-
301
237
  self.headers = {
302
238
  "Accept": fingerprint["accept"],
303
239
  "Accept-Encoding": "gzip, deflate, br, zstd",
@@ -319,21 +255,19 @@ class DeepInfra(OpenAICompatibleProvider):
319
255
  }
320
256
  self.session.headers.update(self.headers)
321
257
  self.chat = Chat(self)
322
-
323
258
  @property
324
259
  def models(self):
325
260
  class _ModelList:
326
261
  def list(inner_self):
327
262
  return type(self).AVAILABLE_MODELS
328
263
  return _ModelList()
329
-
264
+
330
265
  if __name__ == "__main__":
331
- # Example usage
332
266
  client = DeepInfra()
333
267
  response = client.chat.completions.create(
334
268
  model="deepseek-ai/DeepSeek-R1-0528",
335
269
  messages=[{"role": "user", "content": "Hello, how are you?"}],
336
- max_tokens=100,
270
+ max_tokens=10000,
337
271
  stream=False
338
272
  )
339
273
  print(response)