webscout 1.3.1__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AI.py CHANGED
@@ -25,6 +25,708 @@ from webscout.AIbase import Provider
25
25
  from Helpingai_T2 import Perplexity
26
26
  from typing import Any
27
27
  import logging
28
+ #----------------------------------------------------------Sean-----------------------------------------------------------
29
+ class Sean:
30
+ def __init__(
31
+ self,
32
+ is_conversation: bool = True,
33
+ max_tokens: int = 600,
34
+ timeout: int = 30,
35
+ intro: str = None,
36
+ filepath: str = None,
37
+ update_file: bool = True,
38
+ proxies: dict = {},
39
+ history_offset: int = 10250,
40
+ act: str = None,
41
+ ):
42
+ """Instantiates OPENGPT
43
+
44
+ Args:
45
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
46
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
47
+ timeout (int, optional): Http request timeout. Defaults to 30.
48
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
49
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
50
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
51
+ proxies (dict, optional): Http request proxies. Defaults to {}.
52
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
53
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
+ """
55
+ self.session = requests.Session()
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.is_conversation = is_conversation
58
+ self.chat_endpoint = (
59
+ "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
60
+ )
61
+ self.stream_chunk_size = 64
62
+ self.timeout = timeout
63
+ self.last_response = {}
64
+ self.assistant_id = "281bc620-b9f3-47c6-bf74-3f0e5b6e7dac"
65
+ self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
66
+
67
+ self.headers = {
68
+ "authority": self.authority,
69
+ "accept": "text/event-stream",
70
+ "accept-language": "en-US,en;q=0.7",
71
+ "cache-control": "no-cache",
72
+ "content-type": "application/json",
73
+ "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
74
+ "pragma": "no-cache",
75
+ "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
76
+ "sec-fetch-site": "same-origin",
77
+ "sec-gpc": "1",
78
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
79
+ }
80
+
81
+ self.__available_optimizers = (
82
+ method
83
+ for method in dir(Optimizers)
84
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
85
+ )
86
+ self.session.headers.update(self.headers)
87
+ Conversation.intro = (
88
+ AwesomePrompts().get_act(
89
+ act, raise_not_found=True, default=None, case_insensitive=True
90
+ )
91
+ if act
92
+ else intro or Conversation.intro
93
+ )
94
+ self.conversation = Conversation(
95
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
96
+ )
97
+ self.conversation.history_offset = history_offset
98
+ self.session.proxies = proxies
99
+
100
+ def ask(
101
+ self,
102
+ prompt: str,
103
+ stream: bool = False,
104
+ raw: bool = False,
105
+ optimizer: str = None,
106
+ conversationally: bool = False,
107
+ ) -> dict:
108
+ """Chat with AI
109
+
110
+ Args:
111
+ prompt (str): Prompt to be send.
112
+ stream (bool, optional): Flag for streaming response. Defaults to False.
113
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
114
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
115
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
116
+ Returns:
117
+ dict : {}
118
+ ```json
119
+ {
120
+ "messages": [
121
+ {
122
+ "content": "Hello there",
123
+ "additional_kwargs": {},
124
+ "type": "human",
125
+ "example": false
126
+ },
127
+ {
128
+ "content": "Hello! How can I assist you today?",
129
+ "additional_kwargs": {
130
+ "agent": {
131
+ "return_values": {
132
+ "output": "Hello! How can I assist you today?"
133
+ },
134
+ "log": "Hello! How can I assist you today?",
135
+ "type": "AgentFinish"
136
+ }
137
+ },
138
+ "type": "ai",
139
+ "example": false
140
+ }]
141
+ }
142
+ ```
143
+ """
144
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
145
+ if optimizer:
146
+ if optimizer in self.__available_optimizers:
147
+ conversation_prompt = getattr(Optimizers, optimizer)(
148
+ conversation_prompt if conversationally else prompt
149
+ )
150
+ else:
151
+ raise Exception(
152
+ f"Optimizer is not one of {self.__available_optimizers}"
153
+ )
154
+
155
+ self.session.headers.update(self.headers)
156
+ self.session.headers.update(
157
+ dict(
158
+ cookie=f"opengpts_user_id={uuid4().__str__()}",
159
+ )
160
+ )
161
+ payload = {
162
+ "input": [
163
+ {
164
+ "content": conversation_prompt,
165
+ "additional_kwargs": {},
166
+ "type": "human",
167
+ "example": False,
168
+ },
169
+ ],
170
+ "assistant_id": self.assistant_id,
171
+ "thread_id": "",
172
+ }
173
+
174
+ def for_stream():
175
+ response = self.session.post(
176
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
177
+ )
178
+ if (
179
+ not response.ok
180
+ or not response.headers.get("Content-Type")
181
+ == "text/event-stream; charset=utf-8"
182
+ ):
183
+ raise Exception(
184
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
185
+ )
186
+
187
+ for value in response.iter_lines(
188
+ decode_unicode=True,
189
+ chunk_size=self.stream_chunk_size,
190
+ ):
191
+ try:
192
+ modified_value = re.sub("data:", "", value)
193
+ resp = json.loads(modified_value)
194
+ if len(resp) == 1:
195
+ continue
196
+ self.last_response.update(resp[1])
197
+ yield value if raw else resp[1]
198
+ except json.decoder.JSONDecodeError:
199
+ pass
200
+ self.conversation.update_chat_history(
201
+ prompt, self.get_message(self.last_response)
202
+ )
203
+
204
+ def for_non_stream():
205
+ for _ in for_stream():
206
+ pass
207
+ return self.last_response
208
+
209
+ return for_stream() if stream else for_non_stream()
210
+
211
+ def chat(
212
+ self,
213
+ prompt: str,
214
+ stream: bool = False,
215
+ optimizer: str = None,
216
+ conversationally: bool = False,
217
+ ) -> str:
218
+ """Generate response `str`
219
+ Args:
220
+ prompt (str): Prompt to be send.
221
+ stream (bool, optional): Flag for streaming response. Defaults to False.
222
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
223
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
224
+ Returns:
225
+ str: Response generated
226
+ """
227
+
228
+ def for_stream():
229
+ for response in self.ask(
230
+ prompt, True, optimizer=optimizer, conversationally=conversationally
231
+ ):
232
+ yield self.get_message(response)
233
+
234
+ def for_non_stream():
235
+ return self.get_message(
236
+ self.ask(
237
+ prompt,
238
+ False,
239
+ optimizer=optimizer,
240
+ conversationally=conversationally,
241
+ )
242
+ )
243
+
244
+ return for_stream() if stream else for_non_stream()
245
+
246
+ def get_message(self, response: dict) -> str:
247
+ """Retrieves message only from response
248
+
249
+ Args:
250
+ response (dict): Response generated by `self.ask`
251
+
252
+ Returns:
253
+ str: Message extracted
254
+ """
255
+ assert isinstance(response, dict), "Response should be of dict data-type only"
256
+ return response["content"]
257
+ #----------------------------------------------------------OpenAI-----------------------------------------------------------
258
+ class OPENAI(Provider):
259
+ model = "gpt-3.5-turbo"
260
+ def __init__(
261
+ self,
262
+ api_key: str,
263
+ is_conversation: bool = True,
264
+ max_tokens: int = 600,
265
+ temperature: float = 1,
266
+ presence_penalty: int = 0,
267
+ frequency_penalty: int = 0,
268
+ top_p: float = 1,
269
+ model: str = model,
270
+ timeout: int = 30,
271
+ intro: str = None,
272
+ filepath: str = None,
273
+ update_file: bool = True,
274
+ proxies: dict = {},
275
+ history_offset: int = 10250,
276
+ act: str = None,
277
+ ):
278
+ """Instantiates OPENAI
279
+
280
+ Args:
281
+ api_key (key): OpenAI's API key.
282
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
283
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
284
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
285
+ presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
286
+ frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
287
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
288
+ model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
289
+ timeout (int, optional): Http request timeout. Defaults to 30.
290
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
291
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
292
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
293
+ proxies (dict, optional): Http request proxies. Defaults to {}.
294
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
295
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
296
+ """
297
+ self.is_conversation = is_conversation
298
+ self.max_tokens_to_sample = max_tokens
299
+ self.api_key = api_key
300
+ self.model = model
301
+ self.temperature = temperature
302
+ self.presence_penalty = presence_penalty
303
+ self.frequency_penalty = frequency_penalty
304
+ self.top_p = top_p
305
+ self.chat_endpoint = "https://api.openai.com/v1/chat/completions"
306
+ self.stream_chunk_size = 64
307
+ self.timeout = timeout
308
+ self.last_response = {}
309
+ self.headers = {
310
+ "Content-Type": "application/json",
311
+ "Authorization": f"Bearer {self.api_key}",
312
+ }
313
+
314
+ self.__available_optimizers = (
315
+ method
316
+ for method in dir(Optimizers)
317
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
318
+ )
319
+ self.session.headers.update(self.headers)
320
+ Conversation.intro = (
321
+ AwesomePrompts().get_act(
322
+ act, raise_not_found=True, default=None, case_insensitive=True
323
+ )
324
+ if act
325
+ else intro or Conversation.intro
326
+ )
327
+ self.conversation = Conversation(
328
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
329
+ )
330
+ self.conversation.history_offset = history_offset
331
+ self.session.proxies = proxies
332
+
333
+ def ask(
334
+ self,
335
+ prompt: str,
336
+ stream: bool = False,
337
+ raw: bool = False,
338
+ optimizer: str = None,
339
+ conversationally: bool = False,
340
+ ) -> dict:
341
+ """Chat with AI
342
+
343
+ Args:
344
+ prompt (str): Prompt to be send.
345
+ stream (bool, optional): Flag for streaming response. Defaults to False.
346
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
347
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
348
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
349
+ Returns:
350
+ dict : {}
351
+ ```json
352
+ {
353
+ "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
354
+ "object": "chat.completion",
355
+ "created": 1704623244,
356
+ "model": "gpt-3.5-turbo",
357
+ "usage": {
358
+ "prompt_tokens": 0,
359
+ "completion_tokens": 0,
360
+ "total_tokens": 0
361
+ },
362
+ "choices": [
363
+ {
364
+ "message": {
365
+ "role": "assistant",
366
+ "content": "Hello! How can I assist you today?"
367
+ },
368
+ "finish_reason": "stop",
369
+ "index": 0
370
+ }
371
+ ]
372
+ }
373
+ ```
374
+ """
375
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
376
+ if optimizer:
377
+ if optimizer in self.__available_optimizers:
378
+ conversation_prompt = getattr(Optimizers, optimizer)(
379
+ conversation_prompt if conversationally else prompt
380
+ )
381
+ else:
382
+ raise Exception(
383
+ f"Optimizer is not one of {self.__available_optimizers}"
384
+ )
385
+ self.session.headers.update(self.headers)
386
+ payload = {
387
+ "frequency_penalty": self.frequency_penalty,
388
+ "messages": [{"content": conversation_prompt, "role": "user"}],
389
+ "model": self.model,
390
+ "presence_penalty": self.presence_penalty,
391
+ "stream": stream,
392
+ "temperature": self.temperature,
393
+ "top_p": self.top_p,
394
+ }
395
+
396
+ def for_stream():
397
+ response = self.session.post(
398
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
399
+ )
400
+ if not response.ok:
401
+ raise Exception(
402
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
403
+ )
404
+
405
+ message_load = ""
406
+ for value in response.iter_lines(
407
+ decode_unicode=True,
408
+ delimiter="" if raw else "data:",
409
+ chunk_size=self.stream_chunk_size,
410
+ ):
411
+ try:
412
+ resp = json.loads(value)
413
+ incomplete_message = self.get_message(resp)
414
+ if incomplete_message:
415
+ message_load += incomplete_message
416
+ resp["choices"][0]["delta"]["content"] = message_load
417
+ self.last_response.update(resp)
418
+ yield value if raw else resp
419
+ elif raw:
420
+ yield value
421
+ except json.decoder.JSONDecodeError:
422
+ pass
423
+ self.conversation.update_chat_history(
424
+ prompt, self.get_message(self.last_response)
425
+ )
426
+
427
+ def for_non_stream():
428
+ response = self.session.post(
429
+ self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
430
+ )
431
+ if (
432
+ not response.ok
433
+ or not response.headers.get("Content-Type", "") == "application/json"
434
+ ):
435
+ raise Exception(
436
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
437
+ )
438
+ resp = response.json()
439
+ self.last_response.update(resp)
440
+ self.conversation.update_chat_history(
441
+ prompt, self.get_message(self.last_response)
442
+ )
443
+ return resp
444
+
445
+ return for_stream() if stream else for_non_stream()
446
+
447
+ def chat(
448
+ self,
449
+ prompt: str,
450
+ stream: bool = False,
451
+ optimizer: str = None,
452
+ conversationally: bool = False,
453
+ ) -> str:
454
+ """Generate response `str`
455
+ Args:
456
+ prompt (str): Prompt to be send.
457
+ stream (bool, optional): Flag for streaming response. Defaults to False.
458
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
459
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
460
+ Returns:
461
+ str: Response generated
462
+ """
463
+
464
+ def for_stream():
465
+ for response in self.ask(
466
+ prompt, True, optimizer=optimizer, conversationally=conversationally
467
+ ):
468
+ yield self.get_message(response)
469
+
470
+ def for_non_stream():
471
+ return self.get_message(
472
+ self.ask(
473
+ prompt,
474
+ False,
475
+ optimizer=optimizer,
476
+ conversationally=conversationally,
477
+ )
478
+ )
479
+
480
+ return for_stream() if stream else for_non_stream()
481
+
482
+ def get_message(self, response: dict) -> str:
483
+ """Retrieves message only from response
484
+
485
+ Args:
486
+ response (dict): Response generated by `self.ask`
487
+
488
+ Returns:
489
+ str: Message extracted
490
+ """
491
+ assert isinstance(response, dict), "Response should be of dict data-type only"
492
+ try:
493
+ if response["choices"][0].get("delta"):
494
+ return response["choices"][0]["delta"]["content"]
495
+ return response["choices"][0]["message"]["content"]
496
+ except KeyError:
497
+ return ""
498
+ #--------------------------------------LEO-----------------------------------------
499
+ class LEO(Provider):
500
+
501
+ model = "llama-2-13b-chat"
502
+
503
+ key = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"
504
+ def __init__(
505
+ self,
506
+ is_conversation: bool = True,
507
+ max_tokens: int = 600,
508
+ temperature: float = 0.2,
509
+ top_k: int = -1,
510
+ top_p: float = 0.999,
511
+ model: str = model,
512
+ brave_key: str = key,
513
+ timeout: int = 30,
514
+ intro: str = None,
515
+ filepath: str = None,
516
+ update_file: bool = True,
517
+ proxies: dict = {},
518
+ history_offset: int = 10250,
519
+ act: str = None,
520
+ ):
521
+ """Instantiate TGPT
522
+
523
+ Args:
524
+ is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
525
+ brave_key (str, optional): Brave API access key. Defaults to "qztbjzBqJueQZLFkwTTJrieu8Vw3789u".
526
+ model (str, optional): Text generation model name. Defaults to "llama-2-13b-chat".
527
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
528
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
529
+ top_k (int, optional): Chance of topic being repeated. Defaults to -1.
530
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
531
+ timeput (int, optional): Http requesting timeout. Defaults to 30
532
+ intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
533
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
534
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
535
+ proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
536
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
537
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
538
+ """
539
+ self.is_conversation = is_conversation
540
+ self.max_tokens_to_sample = max_tokens
541
+ self.model = model
542
+ self.stop_sequences = ["</response>", "</s>"]
543
+ self.temperature = temperature
544
+ self.top_k = top_k
545
+ self.top_p = top_p
546
+ self.chat_endpoint = "https://ai-chat.bsg.brave.com/v1/complete"
547
+ self.stream_chunk_size = 64
548
+ self.timeout = timeout
549
+ self.last_response = {}
550
+ self.headers = {
551
+ "Content-Type": "application/json",
552
+ "accept": "text/event-stream",
553
+ "x-brave-key": brave_key,
554
+ "accept-language": "en-US,en;q=0.9",
555
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/110.0",
556
+ }
557
+ self.__available_optimizers = (
558
+ method
559
+ for method in dir(Optimizers)
560
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
561
+ )
562
+ self.ession.headers.update(self.headers)
563
+ Conversation.intro = (
564
+ AwesomePrompts().get_act(
565
+ act, raise_not_found=True, default=None, case_insensitive=True
566
+ )
567
+ if act
568
+ else intro or Conversation.intro
569
+ )
570
+ self.conversation = Conversation(
571
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
572
+ )
573
+ self.conversation.history_offset = history_offset
574
+ self.session.proxies = proxies
575
+ self.system_prompt = (
576
+ "\n\nYour name is Leo, a helpful"
577
+ "respectful and honest AI assistant created by the company Brave. You will be replying to a user of the Brave browser. "
578
+ "Always respond in a neutral tone. Be polite and courteous. Answer concisely in no more than 50-80 words."
579
+ "\n\nPlease ensure that your responses are socially unbiased and positive in nature."
580
+ "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. "
581
+ "If you don't know the answer to a question, please don't share false information.\n"
582
+ )
583
+
584
+ def ask(
585
+ self,
586
+ prompt: str,
587
+ stream: bool = False,
588
+ raw: bool = False,
589
+ optimizer: str = None,
590
+ conversationally: bool = False,
591
+ ) -> dict:
592
+ """Chat with AI
593
+
594
+ Args:
595
+ prompt (str): Prompt to be send.
596
+ stream (bool, optional): Flag for streaming response. Defaults to False.
597
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
598
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
599
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
600
+ Returns:
601
+ dict : {}
602
+ ```json
603
+ {
604
+ "completion": "\nNext: domestic cat breeds with short hair >>",
605
+ "stop_reason": null,
606
+ "truncated": false,
607
+ "stop": null,
608
+ "model": "llama-2-13b-chat",
609
+ "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
610
+ "exception": null
611
+ }
612
+ ```
613
+ """
614
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
615
+ if optimizer:
616
+ if optimizer in self.__available_optimizers:
617
+ conversation_prompt = getattr(Optimizers, optimizer)(
618
+ conversation_prompt if conversationally else prompt
619
+ )
620
+ else:
621
+ raise Exception(
622
+ f"Optimizer is not one of {self.__available_optimizers}"
623
+ )
624
+
625
+ self.session.headers.update(self.headers)
626
+ payload = {
627
+ "max_tokens_to_sample": self.max_tokens_to_sample,
628
+ "model": self.model,
629
+ "prompt": f"<s>[INST] <<SYS>>{self.system_prompt}<</SYS>>{conversation_prompt} [/INST]",
630
+ "self.stop_sequence": self.stop_sequences,
631
+ "stream": stream,
632
+ "top_k": self.top_k,
633
+ "top_p": self.top_p,
634
+ }
635
+
636
+ def for_stream():
637
+ response = self.session.post(
638
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
639
+ )
640
+ if (
641
+ not response.ok
642
+ or not response.headers.get("Content-Type")
643
+ == "text/event-stream; charset=utf-8"
644
+ ):
645
+ raise Exception(
646
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
647
+ )
648
+
649
+ for value in response.iter_lines(
650
+ decode_unicode=True,
651
+ delimiter="" if raw else "data:",
652
+ chunk_size=self.stream_chunk_size,
653
+ ):
654
+ try:
655
+ resp = json.loads(value)
656
+ self.last_response.update(resp)
657
+ yield value if raw else resp
658
+ except json.decoder.JSONDecodeError:
659
+ pass
660
+ self.conversation.update_chat_history(
661
+ prompt, self.get_message(self.last_response)
662
+ )
663
+
664
+ def for_non_stream():
665
+ response = self.session.post(
666
+ self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
667
+ )
668
+ if (
669
+ not response.ok
670
+ or not response.headers.get("Content-Type", "") == "application/json"
671
+ ):
672
+ raise Exception(
673
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
674
+ )
675
+ resp = response.json()
676
+ self.last_response.update(resp)
677
+ self.conversation.update_chat_history(
678
+ prompt, self.get_message(self.last_response)
679
+ )
680
+ return resp
681
+
682
+ return for_stream() if stream else for_non_stream()
683
+
684
+ def chat(
685
+ self,
686
+ prompt: str,
687
+ stream: bool = False,
688
+ optimizer: str = None,
689
+ conversationally: bool = False,
690
+ ) -> str:
691
+ """Generate response `str`
692
+ Args:
693
+ prompt (str): Prompt to be send.
694
+ stream (bool, optional): Flag for streaming response. Defaults to False.
695
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
696
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
697
+ Returns:
698
+ str: Response generated
699
+ """
700
+
701
+ def for_stream():
702
+ for response in self.ask(
703
+ prompt, True, optimizer=optimizer, conversationally=conversationally
704
+ ):
705
+ yield self.get_message(response)
706
+
707
+ def for_non_stream():
708
+ return self.get_message(
709
+ self.ask(
710
+ prompt,
711
+ False,
712
+ optimizer=optimizer,
713
+ conversationally=conversationally,
714
+ )
715
+ )
716
+
717
+ return for_stream() if stream else for_non_stream()
718
+
719
+ def get_message(self, response: dict) -> str:
720
+ """Retrieves message only from response
721
+
722
+ Args:
723
+ response (dict): Response generated by `self.ask`
724
+
725
+ Returns:
726
+ str: Message extracted
727
+ """
728
+ assert isinstance(response, dict), "Response should be of dict data-type only"
729
+ return response.get("completion")
28
730
  #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
29
731
  class KOBOLDAI(Provider):
30
732
  def __init__(
@@ -859,6 +1561,7 @@ class BLACKBOXAI:
859
1561
  print(processed_response)
860
1562
  #------------------------------------------------------phind-------------------------------------------------------------
861
1563
  class PhindSearch:
1564
+ # default_model = "Phind Model"
862
1565
  def __init__(
863
1566
  self,
864
1567
  is_conversation: bool = True,
@@ -1223,33 +1926,195 @@ class youChat:
1223
1926
  completion = you_chat.create(prompt)
1224
1927
  print(completion)
1225
1928
  #-------------------------------------------------------Gemini--------------------------------------------------------
1226
- class Gemini:
1227
- def __init__(self):
1228
- self.messages = []
1929
+ from Bard import Chatbot
1930
+ import logging
1931
+ from os import path
1932
+ from json import load
1933
+ from json import dumps
1934
+ import warnings
1935
+
1936
+ logging.getLogger("httpx").setLevel(logging.ERROR)
1229
1937
 
1230
- def chat(self, *args):
1231
- assert args != ()
1938
+ warnings.simplefilter("ignore", category=UserWarning)
1939
+ class GEMINI(Provider):
1940
+ def __init__(
1941
+ self,
1942
+ cookie_file: str,
1943
+ proxy: dict = {},
1944
+ timeout: int = 30,
1945
+ ):
1946
+ """Initializes GEMINI
1232
1947
 
1233
- message = " ".join(args)
1234
- self.messages.append({"role": "user", "content": message})
1948
+ Args:
1949
+ cookie_file (str): Path to `bard.google.com.cookies.json` file
1950
+ proxy (dict, optional): Http request proxy. Defaults to {}.
1951
+ timeout (int, optional): Http request timeout. Defaults to 30.
1952
+ """
1953
+ self.conversation = Conversation(False)
1954
+ self.session_auth1 = None
1955
+ self.session_auth2 = None
1956
+ assert isinstance(
1957
+ cookie_file, str
1958
+ ), f"cookie_file should be of {str} only not '{type(cookie_file)}'"
1959
+ if path.isfile(cookie_file):
1960
+ # let's assume auth is a path to exported .json cookie-file
1961
+ with open(cookie_file) as fh:
1962
+ entries = load(fh)
1963
+ for entry in entries:
1964
+ if entry["name"] == "__Secure-1PSID":
1965
+ self.session_auth1 = entry["value"]
1966
+ elif entry["name"] == "__Secure-1PSIDTS":
1967
+ self.session_auth2 = entry["value"]
1968
+
1969
+ assert all(
1970
+ [self.session_auth1, self.session_auth2]
1971
+ ), f"Failed to extract the required cookie value from file '{cookie_file}'"
1972
+ else:
1973
+ raise Exception(f"{cookie_file} is not a valid file path")
1235
1974
 
1236
- response = g4f.ChatCompletion.create(
1237
- model=g4f.models.default,
1238
- provider=g4f.Provider.Gemini,
1239
- messages=self.messages,
1240
- stream=True,
1975
+ self.session = Chatbot(self.session_auth1, self.session_auth2, proxy, timeout)
1976
+ self.last_response = {}
1977
+ self.__available_optimizers = (
1978
+ method
1979
+ for method in dir(Optimizers)
1980
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
1241
1981
  )
1242
- ms = ""
1243
- for message in response:
1244
- ms += message
1245
- self.messages.append({"role": "assistant", "content": ms.strip()}) # Strip whitespace from the message content
1246
- return ms.strip() # Return the message without trailing whitespace
1247
1982
 
1248
- @staticmethod
1249
- def chat_cli(message):
1250
- """Generate completion based on the provided message"""
1251
- gemini = Gemini()
1252
- return gemini.chat(message)
1983
+ def ask(
1984
+ self,
1985
+ prompt: str,
1986
+ stream: bool = False,
1987
+ raw: bool = False,
1988
+ optimizer: str = None,
1989
+ conversationally: bool = False,
1990
+ ) -> dict:
1991
+ """Chat with AI
1992
+
1993
+ Args:
1994
+ prompt (str): Prompt to be send.
1995
+ stream (bool, optional): Flag for streaming response. Defaults to False.
1996
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
1997
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
1998
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
1999
+ Returns:
2000
+ dict : {}
2001
+ ```json
2002
+ {
2003
+ "content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
2004
+ "conversation_id": "c_f13f6217f9a997aa",
2005
+ "response_id": "r_d3665f95975c368f",
2006
+ "factualityQueries": null,
2007
+ "textQuery": [
2008
+ "hello there",
2009
+ 1
2010
+ ],
2011
+ "choices": [
2012
+ {
2013
+ "id": "rc_ea075c9671bfd8cb",
2014
+ "content": [
2015
+ "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
2016
+ ]
2017
+ },
2018
+ {
2019
+ "id": "rc_de6dd3fb793a5402",
2020
+ "content": [
2021
+ "General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
2022
+ ]
2023
+ },
2024
+ {
2025
+ "id": "rc_a672ac089caf32db",
2026
+ "content": [
2027
+ "General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
2028
+ ]
2029
+ }
2030
+ ],
2031
+
2032
+ "images": [
2033
+ "https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
2034
+ ]
2035
+ }
2036
+
2037
+ ```
2038
+ """
2039
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
2040
+ if optimizer:
2041
+ if optimizer in self.__available_optimizers:
2042
+ conversation_prompt = getattr(Optimizers, optimizer)(
2043
+ conversation_prompt if conversationally else prompt
2044
+ )
2045
+ else:
2046
+ raise Exception(
2047
+ f"Optimizer is not one of {self.__available_optimizers}"
2048
+ )
2049
+
2050
+ def for_stream():
2051
+ response = self.session.ask(prompt)
2052
+ self.last_response.update(response)
2053
+ self.conversation.update_chat_history(
2054
+ prompt, self.get_message(self.last_response)
2055
+ )
2056
+ yield dumps(response) if raw else response
2057
+
2058
+ def for_non_stream():
2059
+ # let's make use of stream
2060
+ for _ in for_stream():
2061
+ pass
2062
+ return self.last_response
2063
+
2064
+ return for_stream() if stream else for_non_stream()
2065
+
2066
+ def chat(
2067
+ self,
2068
+ prompt: str,
2069
+ stream: bool = False,
2070
+ optimizer: str = None,
2071
+ conversationally: bool = False,
2072
+ ) -> str:
2073
+ """Generate response `str`
2074
+ Args:
2075
+ prompt (str): Prompt to be send.
2076
+ stream (bool, optional): Flag for streaming response. Defaults to False.
2077
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
2078
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
2079
+ Returns:
2080
+ str: Response generated
2081
+ """
2082
+
2083
+ def for_stream():
2084
+ for response in self.ask(
2085
+ prompt, True, optimizer=optimizer, conversationally=conversationally
2086
+ ):
2087
+ yield self.get_message(response)
2088
+
2089
+ def for_non_stream():
2090
+ return self.get_message(
2091
+ self.ask(
2092
+ prompt,
2093
+ False,
2094
+ optimizer=optimizer,
2095
+ conversationally=conversationally,
2096
+ )
2097
+ )
2098
+
2099
+ return for_stream() if stream else for_non_stream()
2100
+
2101
+ def get_message(self, response: dict) -> str:
2102
+ """Retrieves message only from response
2103
+
2104
+ Args:
2105
+ response (dict): Response generated by `self.ask`
2106
+
2107
+ Returns:
2108
+ str: Message extracted
2109
+ """
2110
+ assert isinstance(response, dict), "Response should be of dict data-type only"
2111
+ return response["content"]
2112
+
2113
+ def reset(self):
2114
+ """Reset the current conversation"""
2115
+ self.session.async_chatbot.conversation_id = ""
2116
+ self.session.async_chatbot.response_id = ""
2117
+ self.session.async_chatbot.choice_id = ""
1253
2118
  #-------------------------------------------------------Prodia-------------------------------------------------------------------------
1254
2119
  class Prodia:
1255
2120
  """
@@ -1379,10 +2244,6 @@ def yepchat(message):
1379
2244
  def youchat(prompt):
1380
2245
  youChat.chat_cli(prompt)
1381
2246
 
1382
- @cli.command()
1383
- @click.option('--message', prompt='Enter your message', help='The message to send.')
1384
- def gemini(message):
1385
- Gemini.chat_cli(message)
1386
2247
 
1387
2248
  @cli.command()
1388
2249
  @click.option('--prompt', prompt='Enter your prompt', help='The prompt for generating the image.')