webscout 1.3.5__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AI.py CHANGED
@@ -25,17 +25,16 @@ from webscout.AIbase import Provider
25
25
  from Helpingai_T2 import Perplexity
26
26
  from typing import Any
27
27
  import logging
28
- class GROQ(Provider):
28
+ #-----------------------------------------------Cohere--------------------------------------------
29
+ class Cohere(Provider):
29
30
  def __init__(
30
31
  self,
31
32
  api_key: str,
32
33
  is_conversation: bool = True,
33
34
  max_tokens: int = 600,
34
- temperature: float = 1,
35
- presence_penalty: int = 0,
36
- frequency_penalty: int = 0,
37
- top_p: float = 1,
38
- model: str = "mixtral-8x7b-32768",
35
+ model: str = "command-r-plus",
36
+ temperature: float = 0.7,
37
+ system_prompt: str = "You are helpful AI",
39
38
  timeout: int = 30,
40
39
  intro: str = None,
41
40
  filepath: str = None,
@@ -43,18 +42,20 @@ class GROQ(Provider):
43
42
  proxies: dict = {},
44
43
  history_offset: int = 10250,
45
44
  act: str = None,
45
+ top_k: int = -1,
46
+ top_p: float = 0.999,
46
47
  ):
47
- """Instantiates GROQ
48
+ """Initializes Cohere
48
49
 
49
50
  Args:
50
- api_key (key): GROQ's API key.
51
+ api_key (str): Cohere API key.
51
52
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
52
53
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
53
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
54
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
55
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
56
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
57
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
54
+ model (str, optional): Model to use for generating text. Defaults to "command-r-plus".
55
+ temperature (float, optional): Diversity of the generated text. Higher values produce more diverse outputs.
56
+ Defaults to 0.7.
57
+ system_prompt (str, optional): A system_prompt or context to set the style or tone of the generated text.
58
+ Defaults to "You are helpful AI".
58
59
  timeout (int, optional): Http request timeout. Defaults to 30.
59
60
  intro (str, optional): Conversation introductory prompt. Defaults to None.
60
61
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
@@ -69,10 +70,8 @@ class GROQ(Provider):
69
70
  self.api_key = api_key
70
71
  self.model = model
71
72
  self.temperature = temperature
72
- self.presence_penalty = presence_penalty
73
- self.frequency_penalty = frequency_penalty
74
- self.top_p = top_p
75
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
73
+ self.system_prompt = system_prompt
74
+ self.chat_endpoint = "https://production.api.os.cohere.ai/coral/v1/chat"
76
75
  self.stream_chunk_size = 64
77
76
  self.timeout = timeout
78
77
  self.last_response = {}
@@ -110,42 +109,19 @@ class GROQ(Provider):
110
109
  ) -> dict:
111
110
  """Chat with AI
112
111
 
113
- Args:
114
- prompt (str): Prompt to be send.
115
- stream (bool, optional): Flag for streaming response. Defaults to False.
116
- raw (bool, optional): Stream back raw response as received. Defaults to False.
117
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
118
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
119
- Returns:
120
- dict : {}
121
- ```json
112
+ Args:
113
+ prompt (str): Prompt to be send.
114
+ stream (bool, optional): Flag for streaming response. Defaults to False.
115
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
116
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
+ Returns:
119
+ dict : {}
120
+ ```json
122
121
  {
123
- "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
124
- "object": "chat.completion",
125
- "created": 1710852779,
126
- "model": "mixtral-8x7b-32768",
127
- "choices": [
128
- {
129
- "index": 0,
130
- "message": {
131
- "role": "assistant",
132
- "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
133
- },
134
- "logprobs": null,
135
- "finish_reason": "stop"
136
- }
137
- ],
138
- "usage": {
139
- "prompt_tokens": 47,
140
- "prompt_time": 0.03,
141
- "completion_tokens": 37,
142
- "completion_time": 0.069,
143
- "total_tokens": 84,
144
- "total_time": 0.099
145
- },
146
- "system_fingerprint": null
122
+ "text" : "How may I assist you today?"
147
123
  }
148
- ```
124
+ ```
149
125
  """
150
126
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
151
127
  if optimizer:
@@ -159,13 +135,10 @@ class GROQ(Provider):
159
135
  )
160
136
  self.session.headers.update(self.headers)
161
137
  payload = {
162
- "frequency_penalty": self.frequency_penalty,
163
- "messages": [{"content": conversation_prompt, "role": "user"}],
138
+ "message": conversation_prompt,
164
139
  "model": self.model,
165
- "presence_penalty": self.presence_penalty,
166
- "stream": stream,
167
140
  "temperature": self.temperature,
168
- "top_p": self.top_p,
141
+ "preamble": self.system_prompt,
169
142
  }
170
143
 
171
144
  def for_stream():
@@ -177,22 +150,14 @@ class GROQ(Provider):
177
150
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
178
151
  )
179
152
 
180
- message_load = ""
181
153
  for value in response.iter_lines(
182
154
  decode_unicode=True,
183
- delimiter="" if raw else "data:",
184
155
  chunk_size=self.stream_chunk_size,
185
156
  ):
186
157
  try:
187
- resp = json.loads(value)
188
- incomplete_message = self.get_message(resp)
189
- if incomplete_message:
190
- message_load += incomplete_message
191
- resp["choices"][0]["delta"]["content"] = message_load
192
- self.last_response.update(resp)
193
- yield value if raw else resp
194
- elif raw:
195
- yield value
158
+ resp = json.loads(value.strip().split("\n")[-1])
159
+ self.last_response.update(resp)
160
+ yield value if raw else resp
196
161
  except json.decoder.JSONDecodeError:
197
162
  pass
198
163
  self.conversation.update_chat_history(
@@ -200,19 +165,10 @@ class GROQ(Provider):
200
165
  )
201
166
 
202
167
  def for_non_stream():
203
- response = self.session.post(
204
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
205
- )
206
- if not response.ok:
207
- raise Exception(
208
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
209
- )
210
- resp = response.json()
211
- self.last_response.update(resp)
212
- self.conversation.update_chat_history(
213
- prompt, self.get_message(self.last_response)
214
- )
215
- return resp
168
+ # let's make use of stream
169
+ for _ in for_stream():
170
+ pass
171
+ return self.last_response
216
172
 
217
173
  return for_stream() if stream else for_non_stream()
218
174
 
@@ -261,16 +217,12 @@ class GROQ(Provider):
261
217
  str: Message extracted
262
218
  """
263
219
  assert isinstance(response, dict), "Response should be of dict data-type only"
264
- try:
265
- if response["choices"][0].get("delta"):
266
- return response["choices"][0]["delta"]["content"]
267
- return response["choices"][0]["message"]["content"]
268
- except KeyError:
269
- return ""
270
- #----------------------------------------------------------Sean-----------------------------------------------------------
271
- class Sean:
220
+ return response["result"]["chatStreamEndEvent"]["response"]["text"]
221
+ #-----------------------------------------------REKA-----------------------------------------------
222
+ class REKA(Provider):
272
223
  def __init__(
273
224
  self,
225
+ api_key: str,
274
226
  is_conversation: bool = True,
275
227
  max_tokens: int = 600,
276
228
  timeout: int = 30,
@@ -280,8 +232,12 @@ class Sean:
280
232
  proxies: dict = {},
281
233
  history_offset: int = 10250,
282
234
  act: str = None,
235
+ model: str = "reka-core",
236
+ system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
237
+ use_search_engine: bool = False,
238
+ use_code_interpreter: bool = False,
283
239
  ):
284
- """Instantiates OPENGPT
240
+ """Instantiates REKA
285
241
 
286
242
  Args:
287
243
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
@@ -293,31 +249,25 @@ class Sean:
293
249
  proxies (dict, optional): Http request proxies. Defaults to {}.
294
250
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
295
251
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
252
+ model (str, optional): REKA model name. Defaults to "reka-core".
253
+ system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
254
+ use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
255
+ use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
296
256
  """
297
257
  self.session = requests.Session()
298
- self.max_tokens_to_sample = max_tokens
299
258
  self.is_conversation = is_conversation
300
- self.chat_endpoint = (
301
- "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
302
- )
259
+ self.max_tokens_to_sample = max_tokens
260
+ self.api_endpoint = "https://chat.reka.ai/api/chat"
303
261
  self.stream_chunk_size = 64
304
262
  self.timeout = timeout
305
263
  self.last_response = {}
306
- self.assistant_id = "281bc620-b9f3-47c6-bf74-3f0e5b6e7dac"
307
- self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
308
-
264
+ self.model = model
265
+ self.system_prompt = system_prompt
266
+ self.use_search_engine = use_search_engine
267
+ self.use_code_interpreter = use_code_interpreter
268
+ self.access_token = api_key
309
269
  self.headers = {
310
- "authority": self.authority,
311
- "accept": "text/event-stream",
312
- "accept-language": "en-US,en;q=0.7",
313
- "cache-control": "no-cache",
314
- "content-type": "application/json",
315
- "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
316
- "pragma": "no-cache",
317
- "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
318
- "sec-fetch-site": "same-origin",
319
- "sec-gpc": "1",
320
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
270
+ "Authorization": f"Bearer {self.access_token}",
321
271
  }
322
272
 
323
273
  self.__available_optimizers = (
@@ -359,27 +309,7 @@ class Sean:
359
309
  dict : {}
360
310
  ```json
361
311
  {
362
- "messages": [
363
- {
364
- "content": "Hello there",
365
- "additional_kwargs": {},
366
- "type": "human",
367
- "example": false
368
- },
369
- {
370
- "content": "Hello! How can I assist you today?",
371
- "additional_kwargs": {
372
- "agent": {
373
- "return_values": {
374
- "output": "Hello! How can I assist you today?"
375
- },
376
- "log": "Hello! How can I assist you today?",
377
- "type": "AgentFinish"
378
- }
379
- },
380
- "type": "ai",
381
- "example": false
382
- }]
312
+ "text" : "How may I assist you today?"
383
313
  }
384
314
  ```
385
315
  """
@@ -395,48 +325,264 @@ class Sean:
395
325
  )
396
326
 
397
327
  self.session.headers.update(self.headers)
398
- self.session.headers.update(
399
- dict(
400
- cookie=f"opengpts_user_id={uuid4().__str__()}",
328
+ payload = {
329
+
330
+ "conversation_history": [
331
+ {"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
332
+ ],
333
+
334
+ "stream": stream,
335
+ "use_search_engine": self.use_search_engine,
336
+ "use_code_interpreter": self.use_code_interpreter,
337
+ "model_name": self.model,
338
+ # "model_name": "reka-flash",
339
+ # "model_name": "reka-edge",
340
+ }
341
+
342
+ def for_stream():
343
+ response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
344
+ if not response.ok:
345
+ raise Exception(
346
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
347
+ )
348
+
349
+ for value in response.iter_lines(
350
+ decode_unicode=True,
351
+ chunk_size=self.stream_chunk_size,
352
+ ):
353
+ try:
354
+ resp = json.loads(value)
355
+ self.last_response.update(resp)
356
+ yield value if raw else resp
357
+ except json.decoder.JSONDecodeError:
358
+ pass
359
+ self.conversation.update_chat_history(
360
+ prompt, self.get_message(self.last_response)
361
+ )
362
+
363
+ def for_non_stream():
364
+ # let's make use of stream
365
+ for _ in for_stream():
366
+ pass
367
+ return self.last_response
368
+
369
+ return for_stream() if stream else for_non_stream()
370
+
371
+ def chat(
372
+ self,
373
+ prompt: str,
374
+ stream: bool = False,
375
+ optimizer: str = None,
376
+ conversationally: bool = False,
377
+ ) -> str:
378
+ """Generate response `str`
379
+ Args:
380
+ prompt (str): Prompt to be send.
381
+ stream (bool, optional): Flag for streaming response. Defaults to False.
382
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
383
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
384
+ Returns:
385
+ str: Response generated
386
+ """
387
+
388
+ def for_stream():
389
+ for response in self.ask(
390
+ prompt, True, optimizer=optimizer, conversationally=conversationally
391
+ ):
392
+ yield self.get_message(response)
393
+
394
+ def for_non_stream():
395
+ return self.get_message(
396
+ self.ask(
397
+ prompt,
398
+ False,
399
+ optimizer=optimizer,
400
+ conversationally=conversationally,
401
+ )
402
+ )
403
+
404
+ return for_stream() if stream else for_non_stream()
405
+
406
+ def get_message(self, response: dict) -> str:
407
+ """Retrieves message only from response
408
+
409
+ Args:
410
+ response (dict): Response generated by `self.ask`
411
+
412
+ Returns:
413
+ str: Message extracted
414
+ """
415
+ assert isinstance(response, dict), "Response should be of dict data-type only"
416
+ return response.get("text")
417
+ #-----------------------------------------------GROQ-----------------------------------------------
418
+ class GROQ(Provider):
419
+ def __init__(
420
+ self,
421
+ api_key: str,
422
+ is_conversation: bool = True,
423
+ max_tokens: int = 600,
424
+ temperature: float = 1,
425
+ presence_penalty: int = 0,
426
+ frequency_penalty: int = 0,
427
+ top_p: float = 1,
428
+ model: str = "mixtral-8x7b-32768",
429
+ timeout: int = 30,
430
+ intro: str = None,
431
+ filepath: str = None,
432
+ update_file: bool = True,
433
+ proxies: dict = {},
434
+ history_offset: int = 10250,
435
+ act: str = None,
436
+ ):
437
+ """Instantiates GROQ
438
+
439
+ Args:
440
+ api_key (key): GROQ's API key.
441
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
442
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
443
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
444
+ presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
445
+ frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
446
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
447
+ model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
448
+ timeout (int, optional): Http request timeout. Defaults to 30.
449
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
450
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
451
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
452
+ proxies (dict, optional): Http request proxies. Defaults to {}.
453
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
454
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
455
+ """
456
+ self.session = requests.Session()
457
+ self.is_conversation = is_conversation
458
+ self.max_tokens_to_sample = max_tokens
459
+ self.api_key = api_key
460
+ self.model = model
461
+ self.temperature = temperature
462
+ self.presence_penalty = presence_penalty
463
+ self.frequency_penalty = frequency_penalty
464
+ self.top_p = top_p
465
+ self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
466
+ self.stream_chunk_size = 64
467
+ self.timeout = timeout
468
+ self.last_response = {}
469
+ self.headers = {
470
+ "Content-Type": "application/json",
471
+ "Authorization": f"Bearer {self.api_key}",
472
+ }
473
+
474
+ self.__available_optimizers = (
475
+ method
476
+ for method in dir(Optimizers)
477
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
478
+ )
479
+ self.session.headers.update(self.headers)
480
+ Conversation.intro = (
481
+ AwesomePrompts().get_act(
482
+ act, raise_not_found=True, default=None, case_insensitive=True
401
483
  )
484
+ if act
485
+ else intro or Conversation.intro
402
486
  )
403
- payload = {
404
- "input": [
487
+ self.conversation = Conversation(
488
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
489
+ )
490
+ self.conversation.history_offset = history_offset
491
+ self.session.proxies = proxies
492
+
493
+ def ask(
494
+ self,
495
+ prompt: str,
496
+ stream: bool = False,
497
+ raw: bool = False,
498
+ optimizer: str = None,
499
+ conversationally: bool = False,
500
+ ) -> dict:
501
+ """Chat with AI
502
+
503
+ Args:
504
+ prompt (str): Prompt to be send.
505
+ stream (bool, optional): Flag for streaming response. Defaults to False.
506
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
507
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
508
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
509
+ Returns:
510
+ dict : {}
511
+ ```json
512
+ {
513
+ "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
514
+ "object": "chat.completion",
515
+ "created": 1710852779,
516
+ "model": "mixtral-8x7b-32768",
517
+ "choices": [
405
518
  {
406
- "content": conversation_prompt,
407
- "additional_kwargs": {},
408
- "type": "human",
409
- "example": False,
410
- },
519
+ "index": 0,
520
+ "message": {
521
+ "role": "assistant",
522
+ "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
523
+ },
524
+ "logprobs": null,
525
+ "finish_reason": "stop"
526
+ }
411
527
  ],
412
- "assistant_id": self.assistant_id,
413
- "thread_id": "",
528
+ "usage": {
529
+ "prompt_tokens": 47,
530
+ "prompt_time": 0.03,
531
+ "completion_tokens": 37,
532
+ "completion_time": 0.069,
533
+ "total_tokens": 84,
534
+ "total_time": 0.099
535
+ },
536
+ "system_fingerprint": null
537
+ }
538
+ ```
539
+ """
540
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
541
+ if optimizer:
542
+ if optimizer in self.__available_optimizers:
543
+ conversation_prompt = getattr(Optimizers, optimizer)(
544
+ conversation_prompt if conversationally else prompt
545
+ )
546
+ else:
547
+ raise Exception(
548
+ f"Optimizer is not one of {self.__available_optimizers}"
549
+ )
550
+ self.session.headers.update(self.headers)
551
+ payload = {
552
+ "frequency_penalty": self.frequency_penalty,
553
+ "messages": [{"content": conversation_prompt, "role": "user"}],
554
+ "model": self.model,
555
+ "presence_penalty": self.presence_penalty,
556
+ "stream": stream,
557
+ "temperature": self.temperature,
558
+ "top_p": self.top_p,
414
559
  }
415
560
 
416
561
  def for_stream():
417
562
  response = self.session.post(
418
563
  self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
419
564
  )
420
- if (
421
- not response.ok
422
- or not response.headers.get("Content-Type")
423
- == "text/event-stream; charset=utf-8"
424
- ):
565
+ if not response.ok:
425
566
  raise Exception(
426
567
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
427
568
  )
428
569
 
570
+ message_load = ""
429
571
  for value in response.iter_lines(
430
572
  decode_unicode=True,
573
+ delimiter="" if raw else "data:",
431
574
  chunk_size=self.stream_chunk_size,
432
575
  ):
433
576
  try:
434
- modified_value = re.sub("data:", "", value)
435
- resp = json.loads(modified_value)
436
- if len(resp) == 1:
437
- continue
438
- self.last_response.update(resp[1])
439
- yield value if raw else resp[1]
577
+ resp = json.loads(value)
578
+ incomplete_message = self.get_message(resp)
579
+ if incomplete_message:
580
+ message_load += incomplete_message
581
+ resp["choices"][0]["delta"]["content"] = message_load
582
+ self.last_response.update(resp)
583
+ yield value if raw else resp
584
+ elif raw:
585
+ yield value
440
586
  except json.decoder.JSONDecodeError:
441
587
  pass
442
588
  self.conversation.update_chat_history(
@@ -444,9 +590,19 @@ class Sean:
444
590
  )
445
591
 
446
592
  def for_non_stream():
447
- for _ in for_stream():
448
- pass
449
- return self.last_response
593
+ response = self.session.post(
594
+ self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
595
+ )
596
+ if not response.ok:
597
+ raise Exception(
598
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
599
+ )
600
+ resp = response.json()
601
+ self.last_response.update(resp)
602
+ self.conversation.update_chat_history(
603
+ prompt, self.get_message(self.last_response)
604
+ )
605
+ return resp
450
606
 
451
607
  return for_stream() if stream else for_non_stream()
452
608
 
@@ -495,8 +651,14 @@ class Sean:
495
651
  str: Message extracted
496
652
  """
497
653
  assert isinstance(response, dict), "Response should be of dict data-type only"
498
- return response["content"]
499
- #----------------------------------------------------------OpenAI-----------------------------------------------------------
654
+ try:
655
+ if response["choices"][0].get("delta"):
656
+ return response["choices"][0]["delta"]["content"]
657
+ return response["choices"][0]["message"]["content"]
658
+ except KeyError:
659
+ return ""
660
+
661
+ #----------------------------------------------------------OpenAI-----------------------------------
500
662
  class OPENAI(Provider):
501
663
  model = "gpt-3.5-turbo"
502
664
  def __init__(