webscout 4.5__tar.gz → 4.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (85) hide show
  1. {webscout-4.5/webscout.egg-info → webscout-4.6}/PKG-INFO +12 -14
  2. {webscout-4.5 → webscout-4.6}/README.md +10 -13
  3. {webscout-4.5 → webscout-4.6}/setup.py +2 -1
  4. {webscout-4.5 → webscout-4.6}/webscout/AIutel.py +10 -0
  5. {webscout-4.5 → webscout-4.6}/webscout/Extra/gguf.py +1 -1
  6. {webscout-4.5 → webscout-4.6}/webscout/Provider/BasedGPT.py +38 -36
  7. {webscout-4.5 → webscout-4.6}/webscout/Provider/Blackboxai.py +14 -10
  8. webscout-4.6/webscout/Provider/DARKAI.py +207 -0
  9. webscout-4.6/webscout/Provider/Deepseek.py +212 -0
  10. webscout-4.6/webscout/Provider/Llama3.py +173 -0
  11. webscout-4.6/webscout/Provider/PizzaGPT.py +178 -0
  12. webscout-4.6/webscout/Provider/RUBIKSAI.py +201 -0
  13. {webscout-4.5 → webscout-4.6}/webscout/Provider/__init__.py +11 -3
  14. webscout-4.6/webscout/Provider/koala.py +239 -0
  15. {webscout-4.5 → webscout-4.6}/webscout/__init__.py +1 -0
  16. {webscout-4.5 → webscout-4.6}/webscout/version.py +1 -1
  17. {webscout-4.5 → webscout-4.6}/webscout/webai.py +15 -1
  18. {webscout-4.5 → webscout-4.6/webscout.egg-info}/PKG-INFO +12 -14
  19. {webscout-4.5 → webscout-4.6}/webscout.egg-info/SOURCES.txt +6 -1
  20. {webscout-4.5 → webscout-4.6}/webscout.egg-info/requires.txt +1 -0
  21. webscout-4.5/webscout/Provider/Deepseek.py +0 -266
  22. {webscout-4.5 → webscout-4.6}/LICENSE.md +0 -0
  23. {webscout-4.5 → webscout-4.6}/setup.cfg +0 -0
  24. {webscout-4.5 → webscout-4.6}/webscout/AIauto.py +0 -0
  25. {webscout-4.5 → webscout-4.6}/webscout/AIbase.py +0 -0
  26. {webscout-4.5 → webscout-4.6}/webscout/Agents/Onlinesearcher.py +0 -0
  27. {webscout-4.5 → webscout-4.6}/webscout/Agents/__init__.py +0 -0
  28. {webscout-4.5 → webscout-4.6}/webscout/Agents/functioncall.py +0 -0
  29. {webscout-4.5 → webscout-4.6}/webscout/DWEBS.py +0 -0
  30. {webscout-4.5 → webscout-4.6}/webscout/Extra/__init__.py +0 -0
  31. {webscout-4.5 → webscout-4.6}/webscout/Extra/autollama.py +0 -0
  32. {webscout-4.5 → webscout-4.6}/webscout/Extra/weather.py +0 -0
  33. {webscout-4.5 → webscout-4.6}/webscout/Extra/weather_ascii.py +0 -0
  34. {webscout-4.5 → webscout-4.6}/webscout/GoogleS.py +0 -0
  35. {webscout-4.5 → webscout-4.6}/webscout/LLM.py +0 -0
  36. {webscout-4.5 → webscout-4.6}/webscout/Local/__init__.py +0 -0
  37. {webscout-4.5 → webscout-4.6}/webscout/Local/_version.py +0 -0
  38. {webscout-4.5 → webscout-4.6}/webscout/Local/formats.py +0 -0
  39. {webscout-4.5 → webscout-4.6}/webscout/Local/model.py +0 -0
  40. {webscout-4.5 → webscout-4.6}/webscout/Local/rawdog.py +0 -0
  41. {webscout-4.5 → webscout-4.6}/webscout/Local/samplers.py +0 -0
  42. {webscout-4.5 → webscout-4.6}/webscout/Local/thread.py +0 -0
  43. {webscout-4.5 → webscout-4.6}/webscout/Local/utils.py +0 -0
  44. {webscout-4.5 → webscout-4.6}/webscout/Provider/Andi.py +0 -0
  45. {webscout-4.5 → webscout-4.6}/webscout/Provider/Berlin4h.py +0 -0
  46. {webscout-4.5 → webscout-4.6}/webscout/Provider/ChatGPTUK.py +0 -0
  47. {webscout-4.5 → webscout-4.6}/webscout/Provider/Cohere.py +0 -0
  48. {webscout-4.5 → webscout-4.6}/webscout/Provider/Deepinfra.py +0 -0
  49. {webscout-4.5 → webscout-4.6}/webscout/Provider/FreeGemini.py +0 -0
  50. {webscout-4.5 → webscout-4.6}/webscout/Provider/Gemini.py +0 -0
  51. {webscout-4.5 → webscout-4.6}/webscout/Provider/Geminiflash.py +0 -0
  52. {webscout-4.5 → webscout-4.6}/webscout/Provider/Geminipro.py +0 -0
  53. {webscout-4.5 → webscout-4.6}/webscout/Provider/Groq.py +0 -0
  54. {webscout-4.5 → webscout-4.6}/webscout/Provider/Koboldai.py +0 -0
  55. {webscout-4.5 → webscout-4.6}/webscout/Provider/Leo.py +0 -0
  56. {webscout-4.5 → webscout-4.6}/webscout/Provider/Llama.py +0 -0
  57. {webscout-4.5 → webscout-4.6}/webscout/Provider/OLLAMA.py +0 -0
  58. {webscout-4.5 → webscout-4.6}/webscout/Provider/OpenGPT.py +0 -0
  59. {webscout-4.5 → webscout-4.6}/webscout/Provider/Openai.py +0 -0
  60. {webscout-4.5 → webscout-4.6}/webscout/Provider/Perplexity.py +0 -0
  61. {webscout-4.5 → webscout-4.6}/webscout/Provider/Phind.py +0 -0
  62. {webscout-4.5 → webscout-4.6}/webscout/Provider/Poe.py +0 -0
  63. {webscout-4.5 → webscout-4.6}/webscout/Provider/Reka.py +0 -0
  64. {webscout-4.5 → webscout-4.6}/webscout/Provider/ThinkAnyAI.py +0 -0
  65. {webscout-4.5 → webscout-4.6}/webscout/Provider/VTLchat.py +0 -0
  66. {webscout-4.5 → webscout-4.6}/webscout/Provider/Xjai.py +0 -0
  67. {webscout-4.5 → webscout-4.6}/webscout/Provider/Yepchat.py +0 -0
  68. {webscout-4.5 → webscout-4.6}/webscout/Provider/Youchat.py +0 -0
  69. {webscout-4.5 → webscout-4.6}/webscout/YTdownloader.py +0 -0
  70. {webscout-4.5 → webscout-4.6}/webscout/__main__.py +0 -0
  71. {webscout-4.5 → webscout-4.6}/webscout/async_providers.py +0 -0
  72. {webscout-4.5 → webscout-4.6}/webscout/cli.py +0 -0
  73. {webscout-4.5 → webscout-4.6}/webscout/exceptions.py +0 -0
  74. {webscout-4.5 → webscout-4.6}/webscout/g4f.py +0 -0
  75. {webscout-4.5 → webscout-4.6}/webscout/models.py +0 -0
  76. {webscout-4.5 → webscout-4.6}/webscout/tempid.py +0 -0
  77. {webscout-4.5 → webscout-4.6}/webscout/transcriber.py +0 -0
  78. {webscout-4.5 → webscout-4.6}/webscout/utils.py +0 -0
  79. {webscout-4.5 → webscout-4.6}/webscout/voice.py +0 -0
  80. {webscout-4.5 → webscout-4.6}/webscout/webscout_search.py +0 -0
  81. {webscout-4.5 → webscout-4.6}/webscout/webscout_search_async.py +0 -0
  82. {webscout-4.5 → webscout-4.6}/webscout/websx_search.py +0 -0
  83. {webscout-4.5 → webscout-4.6}/webscout.egg-info/dependency_links.txt +0 -0
  84. {webscout-4.5 → webscout-4.6}/webscout.egg-info/entry_points.txt +0 -0
  85. {webscout-4.5 → webscout-4.6}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.5
3
+ Version: 4.6
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -61,6 +61,7 @@ Requires-Dist: PyExecJS
61
61
  Requires-Dist: ollama
62
62
  Requires-Dist: pyfiglet
63
63
  Requires-Dist: yaspin
64
+ Requires-Dist: pillow
64
65
  Provides-Extra: dev
65
66
  Requires-Dist: ruff>=0.1.6; extra == "dev"
66
67
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -1368,7 +1369,7 @@ from rich import print
1368
1369
 
1369
1370
  ai = DeepSeek(
1370
1371
  is_conversation=True,
1371
- api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1372
+ api_key='23bfff080d38429c9fbbf3c76f88454c',
1372
1373
  max_tokens=800,
1373
1374
  timeout=30,
1374
1375
  intro=None,
@@ -1380,18 +1381,12 @@ ai = DeepSeek(
1380
1381
  model="deepseek_chat"
1381
1382
  )
1382
1383
 
1383
- # Start an infinite loop for continuous interaction
1384
- while True:
1385
- # Define a prompt to send to the AI
1386
- prompt = input("Enter your prompt: ")
1387
-
1388
- # Check if the user wants to exit the loop
1389
- if prompt.lower() == "exit":
1390
- break
1391
-
1392
- # Use the 'chat' method to send the prompt and receive a response
1393
- r = ai.chat(prompt)
1394
- print(r)
1384
+
1385
+ # Define a prompt to send to the AI
1386
+ prompt = "Tell me about india"
1387
+ # Use the 'chat' method to send the prompt and receive a response
1388
+ r = ai.chat(prompt)
1389
+ print(r)
1395
1390
  ```
1396
1391
  ### 18. `Deepinfra`
1397
1392
  ```python
@@ -1492,6 +1487,9 @@ from webscout import AndiSearch
1492
1487
  a = AndiSearch()
1493
1488
  print(a.chat("HelpingAI-9B"))
1494
1489
  ```
1490
+
1491
+ ### 25. LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai
1492
+ code similar to other providers
1495
1493
  ### `LLM`
1496
1494
  ```python
1497
1495
  from webscout.LLM import LLM
@@ -1296,7 +1296,7 @@ from rich import print
1296
1296
 
1297
1297
  ai = DeepSeek(
1298
1298
  is_conversation=True,
1299
- api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1299
+ api_key='23bfff080d38429c9fbbf3c76f88454c',
1300
1300
  max_tokens=800,
1301
1301
  timeout=30,
1302
1302
  intro=None,
@@ -1308,18 +1308,12 @@ ai = DeepSeek(
1308
1308
  model="deepseek_chat"
1309
1309
  )
1310
1310
 
1311
- # Start an infinite loop for continuous interaction
1312
- while True:
1313
- # Define a prompt to send to the AI
1314
- prompt = input("Enter your prompt: ")
1315
-
1316
- # Check if the user wants to exit the loop
1317
- if prompt.lower() == "exit":
1318
- break
1319
-
1320
- # Use the 'chat' method to send the prompt and receive a response
1321
- r = ai.chat(prompt)
1322
- print(r)
1311
+
1312
+ # Define a prompt to send to the AI
1313
+ prompt = "Tell me about india"
1314
+ # Use the 'chat' method to send the prompt and receive a response
1315
+ r = ai.chat(prompt)
1316
+ print(r)
1323
1317
  ```
1324
1318
  ### 18. `Deepinfra`
1325
1319
  ```python
@@ -1420,6 +1414,9 @@ from webscout import AndiSearch
1420
1414
  a = AndiSearch()
1421
1415
  print(a.chat("HelpingAI-9B"))
1422
1416
  ```
1417
+
1418
+ ### 25. LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai
1419
+ code similar to other providers
1423
1420
  ### `LLM`
1424
1421
  ```python
1425
1422
  from webscout.LLM import LLM
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="4.5",
8
+ version="4.6",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -65,6 +65,7 @@ setup(
65
65
  "ollama",
66
66
  "pyfiglet",
67
67
  "yaspin",
68
+ "pillow",
68
69
  ],
69
70
  entry_points={
70
71
  "console_scripts": [
@@ -54,6 +54,7 @@ webai = [
54
54
  "geminipro",
55
55
  "ollama",
56
56
  "andi",
57
+ "llama3"
57
58
  ]
58
59
 
59
60
  gpt4free_providers = [
@@ -533,6 +534,15 @@ LLM:
533
534
  ```python
534
535
  print("The essay is about...")
535
536
  ```
537
+
538
+ 3. User: Weather in qazigund
539
+
540
+ LLM:
541
+ ```python
542
+ from webscout import weather as w
543
+ weather = w.get("Qazigund")
544
+ w.print_weather(weather)
545
+ ```
536
546
  """
537
547
 
538
548
 
@@ -153,7 +153,7 @@ huggingface-cli download "$MODEL_ID" --local-dir "./${MODEL_NAME}" --local-dir-u
153
153
  # Convert to fp16
154
154
  FP16="${MODEL_NAME}/${MODEL_NAME,,}.fp16.bin"
155
155
  echo "Converting the model to fp16..."
156
- python3 llama.cpp/convert-hf-to-gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
156
+ python3 llama.cpp/convert_hf_to_gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
157
157
 
158
158
  # Quantize the model
159
159
  echo "Quantizing the model..."
@@ -40,7 +40,7 @@ class BasedGPT(Provider):
40
40
  proxies: dict = {},
41
41
  history_offset: int = 10250,
42
42
  act: str = None,
43
- system_prompt: str = "Be Helpful and Friendly",
43
+ model: str = "gpt-3.5-turbo"
44
44
  ):
45
45
  """Instantiates BasedGPT
46
46
 
@@ -54,25 +54,40 @@ class BasedGPT(Provider):
54
54
  proxies (dict, optional): Http request proxies. Defaults to {}.
55
55
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
56
56
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
57
- system_prompt (str, optional): System prompt for BasedGPT. Defaults to "Be Helpful and Friendly".
57
+ model (str, optional): Model to use for generating text. Defaults to "gpt-3.5-turbo".
58
58
  """
59
59
  self.session = requests.Session()
60
60
  self.is_conversation = is_conversation
61
61
  self.max_tokens_to_sample = max_tokens
62
62
  self.chat_endpoint = "https://www.basedgpt.chat/api/chat"
63
- self.stream_chunk_size = 64
64
63
  self.timeout = timeout
65
64
  self.last_response = {}
66
- self.system_prompt = system_prompt
65
+ self.model = model
66
+ self.headers = {
67
+ "accept": "*/*",
68
+ "accept-encoding": "gzip, deflate, br, zstd",
69
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
70
+ "content-length": "109",
71
+ "content-type": "application/json",
72
+ "dnt": "1",
73
+ "origin": "https://www.basedgpt.chat",
74
+ "priority": "u=1, i",
75
+ "referer": "https://www.basedgpt.chat/",
76
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
77
+ "sec-ch-ua-mobile": "?0",
78
+ "sec-ch-ua-platform": '"Windows"',
79
+ "sec-fetch-dest": "empty",
80
+ "sec-fetch-mode": "cors",
81
+ "sec-fetch-site": "same-origin",
82
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
83
+ }
67
84
 
68
85
  self.__available_optimizers = (
69
86
  method
70
87
  for method in dir(Optimizers)
71
88
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
89
  )
73
- self.session.headers.update(
74
- {"Content-Type": "application/json"}
75
- )
90
+ self.session.headers.update(self.headers)
76
91
  Conversation.intro = (
77
92
  AwesomePrompts().get_act(
78
93
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -106,25 +121,7 @@ class BasedGPT(Provider):
106
121
  dict : {}
107
122
  ```json
108
123
  {
109
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
110
- "object": "chat.completion",
111
- "created": 1704623244,
112
- "model": "gpt-3.5-turbo",
113
- "usage": {
114
- "prompt_tokens": 0,
115
- "completion_tokens": 0,
116
- "total_tokens": 0
117
- },
118
- "choices": [
119
- {
120
- "message": {
121
- "role": "assistant",
122
- "content": "Hello! How can I assist you today?"
123
- },
124
- "finish_reason": "stop",
125
- "index": 0
126
- }
127
- ]
124
+ "text" : "How may I assist you today?"
128
125
  }
129
126
  ```
130
127
  """
@@ -139,11 +136,14 @@ class BasedGPT(Provider):
139
136
  f"Optimizer is not one of {self.__available_optimizers}"
140
137
  )
141
138
 
139
+ self.session.headers.update(self.headers)
142
140
  payload = {
143
141
  "messages": [
144
- {"role": "system", "content": self.system_prompt},
145
- {"role": "user", "content": conversation_prompt},
146
- ],
142
+ {
143
+ "role": "user",
144
+ "content": conversation_prompt
145
+ }
146
+ ]
147
147
  }
148
148
 
149
149
  def for_stream():
@@ -151,22 +151,24 @@ class BasedGPT(Provider):
151
151
  self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
152
152
  )
153
153
  if not response.ok:
154
- raise exceptions.FailedToGenerateResponseError(
154
+ raise Exception(
155
155
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
156
  )
157
157
 
158
- message_load = ""
158
+ streaming_text = ""
159
159
  for value in response.iter_lines(
160
160
  decode_unicode=True,
161
- delimiter="",
162
- chunk_size=self.stream_chunk_size,
161
+ chunk_size=64,
162
+ delimiter="\n",
163
163
  ):
164
164
  try:
165
- message_load += value
166
- yield value if raw else dict(text=message_load)
165
+ if bool(value):
166
+ streaming_text += value + ("\n" if stream else "")
167
+ resp = dict(text=streaming_text)
168
+ self.last_response.update(resp)
169
+ yield value if raw else resp
167
170
  except json.decoder.JSONDecodeError:
168
171
  pass
169
- self.last_response.update(dict(text=message_load))
170
172
  self.conversation.update_chat_history(
171
173
  prompt, self.get_message(self.last_response)
172
174
  )
@@ -22,14 +22,14 @@ import yaml
22
22
  from ..AIutel import Optimizers
23
23
  from ..AIutel import Conversation
24
24
  from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
25
+ from ..AIbase import Provider, AsyncProvider
26
26
  from Helpingai_T2 import Perplexity
27
27
  from webscout import exceptions
28
28
  from typing import Any, AsyncGenerator, Dict
29
29
  import logging
30
30
  import httpx
31
31
 
32
- #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
32
+ #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
33
33
  class BLACKBOXAI:
34
34
  def __init__(
35
35
  self,
@@ -234,13 +234,9 @@ class BLACKBOXAI:
234
234
  """
235
235
  assert isinstance(response, dict), "Response should be of dict data-type only"
236
236
  return response["text"]
237
- @staticmethod
238
- def chat_cli(prompt):
239
- """Sends a request to the BLACKBOXAI API and processes the response."""
240
- blackbox_ai = BLACKBOXAI() # Initialize a BLACKBOXAI instance
241
- response = blackbox_ai.ask(prompt) # Perform a chat with the given prompt
242
- processed_response = blackbox_ai.get_message(response) # Process the response
243
- print(processed_response)
237
+
238
+
239
+
244
240
  class AsyncBLACKBOXAI(AsyncProvider):
245
241
  def __init__(
246
242
  self,
@@ -437,4 +433,12 @@ class AsyncBLACKBOXAI(AsyncProvider):
437
433
  str: Message extracted
438
434
  """
439
435
  assert isinstance(response, dict), "Response should be of dict data-type only"
440
- return response["text"]
436
+ return response["text"]
437
+
438
+ # Function to clean the response text
439
+ def clean_response(response_text: str) -> str:
440
+ # Remove web search results
441
+ response_text = re.sub(r'\$@\$v=undefined-rv1\$@\$Sources:.*?\$~~~', '', response_text, flags=re.DOTALL)
442
+ # Remove any remaining special characters or markers
443
+ response_text = re.sub(r'\$~~~', '', response_text)
444
+ return response_text
@@ -0,0 +1,207 @@
1
+ import time
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+ from ..AIutel import Optimizers
5
+ from ..AIutel import Conversation
6
+ from ..AIutel import AwesomePrompts, sanitize_stream
7
+ from ..AIbase import Provider
8
+ from webscout import exceptions
9
+ import requests
10
+ class DARKAI(Provider):
11
+ """
12
+ A class to interact with the DarkAI API.
13
+ """
14
+
15
+ def __init__(
16
+ self,
17
+ is_conversation: bool = True,
18
+ max_tokens: int = 600,
19
+ timeout: int = 30,
20
+ intro: str = None,
21
+ filepath: str = None,
22
+ update_file: bool = True,
23
+ proxies: dict = {},
24
+ history_offset: int = 10250,
25
+ act: str = None,
26
+ model: str = "gpt-4o", #llama-3-70b, llama-3-405b, gpt-3.5-turbo, gpt-4o
27
+ ) -> None:
28
+ """
29
+ Initializes the DARKAI API with given parameters.
30
+
31
+ Args:
32
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
33
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
34
+ Defaults to 600.
35
+ timeout (int, optional): Http request timeout. Defaults to 30.
36
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
37
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
38
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
39
+ proxies (dict, optional): Http request proxies. Defaults to {}.
40
+ history_offset (int, optional): Limit conversation history to this number of last texts.
41
+ Defaults to 10250.
42
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
43
+ model (str, optional): AI model to use. Defaults to "gpt-4o". #llama-3-70b, llama-3-405b, gpt-3.5-turbo, gpt-4o
44
+ """
45
+ self.session = requests.Session()
46
+ self.is_conversation = is_conversation
47
+ self.max_tokens_to_sample = max_tokens
48
+ self.api_endpoint = "https://darkai.foundation/chat"
49
+ self.stream_chunk_size = 64
50
+ self.timeout = timeout
51
+ self.last_response = {}
52
+ self.model = model
53
+ self.headers = {
54
+ "accept": "text/event-stream",
55
+ "accept-encoding": "gzip, deflate, br, zstd",
56
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
57
+ "content-type": "application/json",
58
+ "dnt": "1",
59
+ "origin": "https://www.aiuncensored.info",
60
+ "referer": "https://www.aiuncensored.info/",
61
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
62
+ "sec-ch-ua-mobile": "?0",
63
+ "sec-ch-ua-platform": '"Windows"',
64
+ "sec-fetch-dest": "empty",
65
+ "sec-fetch-mode": "cors",
66
+ "sec-fetch-site": "cross-site",
67
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
68
+ }
69
+
70
+ self.__available_optimizers = (
71
+ method
72
+ for method in dir(Optimizers)
73
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
74
+ )
75
+ self.session.headers.update(self.headers)
76
+ Conversation.intro = (
77
+ AwesomePrompts().get_act(
78
+ act, raise_not_found=True, default=None, case_insensitive=True
79
+ )
80
+ if act
81
+ else intro or Conversation.intro
82
+ )
83
+ self.conversation = Conversation(
84
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
85
+ )
86
+ self.conversation.history_offset = history_offset
87
+ self.session.proxies = proxies
88
+
89
+ def ask(
90
+ self,
91
+ prompt: str,
92
+ stream: bool = False,
93
+ raw: bool = False,
94
+ optimizer: str = None,
95
+ conversationally: bool = False,
96
+ ) -> Dict[str, Any]:
97
+ """
98
+ Sends a prompt to the DarkAI API and returns the response.
99
+
100
+ Args:
101
+ prompt: The text prompt to generate text from.
102
+ stream (bool, optional): Whether to stream the response. Defaults to False.
103
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
104
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
105
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
106
+
107
+ Returns:
108
+ The response from the API.
109
+ """
110
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
111
+ if optimizer:
112
+ if optimizer in self.__available_optimizers:
113
+ conversation_prompt = getattr(Optimizers, optimizer)(
114
+ conversation_prompt if conversationally else prompt
115
+ )
116
+ else:
117
+ raise Exception(
118
+ f"Optimizer is not one of {self.__available_optimizers}"
119
+ )
120
+
121
+ payload = {
122
+ "query": conversation_prompt,
123
+ "model": self.model
124
+ }
125
+
126
+ def for_stream():
127
+ response = self.session.post(
128
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
129
+ )
130
+
131
+ if not response.ok:
132
+ raise exceptions.FailedToGenerateResponseError(
133
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
134
+ )
135
+
136
+ streaming_response = ""
137
+ for line in response.iter_lines():
138
+ if line:
139
+ decoded_line = line.decode('utf-8')
140
+ if decoded_line.startswith("data:"):
141
+ data = decoded_line[len("data:"):].strip()
142
+ if data:
143
+ try:
144
+ event = json.loads(data)
145
+ if event.get("event") == "final-response":
146
+ message = event['data'].get('message', '')
147
+ streaming_response += message
148
+ yield message if raw else dict(text=streaming_response)
149
+ except json.decoder.JSONDecodeError:
150
+ continue
151
+ self.last_response.update(dict(text=streaming_response))
152
+ self.conversation.update_chat_history(
153
+ prompt, self.get_message(self.last_response)
154
+ )
155
+ def for_non_stream():
156
+ for _ in for_stream():
157
+ pass
158
+ return self.last_response
159
+
160
+ return for_stream() if stream else for_non_stream()
161
+
162
+ def chat(
163
+ self,
164
+ prompt: str,
165
+ stream: bool = False,
166
+ optimizer: str = None,
167
+ conversationally: bool = False,
168
+ ) -> str:
169
+ """Generate response `str`
170
+ Args:
171
+ prompt (str): Prompt to be send.
172
+ stream (bool, optional): Flag for streaming response. Defaults to False.
173
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
174
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
175
+ Returns:
176
+ str: Response generated
177
+ """
178
+
179
+ def for_stream():
180
+ for response in self.ask(
181
+ prompt, True, optimizer=optimizer, conversationally=conversationally
182
+ ):
183
+ yield self.get_message(response)
184
+
185
+ def for_non_stream():
186
+ return self.get_message(
187
+ self.ask(
188
+ prompt,
189
+ False,
190
+ optimizer=optimizer,
191
+ conversationally=conversationally,
192
+ )
193
+ )
194
+
195
+ return for_stream() if stream else for_non_stream()
196
+
197
+ def get_message(self, response: dict) -> str:
198
+ """Retrieves message only from response
199
+
200
+ Args:
201
+ response (dict): Response generated by `self.ask`
202
+
203
+ Returns:
204
+ str: Message extracted
205
+ """
206
+ assert isinstance(response, dict), "Response should be of dict data-type only"
207
+ return response["text"]