webscout 4.5__tar.gz → 4.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (86) hide show
  1. {webscout-4.5/webscout.egg-info → webscout-4.7}/PKG-INFO +40 -60
  2. {webscout-4.5 → webscout-4.7}/README.md +37 -59
  3. {webscout-4.5 → webscout-4.7}/setup.py +3 -1
  4. {webscout-4.5 → webscout-4.7}/webscout/AIutel.py +10 -0
  5. {webscout-4.5 → webscout-4.7}/webscout/Extra/gguf.py +1 -1
  6. {webscout-4.5 → webscout-4.7}/webscout/Provider/BasedGPT.py +38 -36
  7. {webscout-4.5 → webscout-4.7}/webscout/Provider/Blackboxai.py +14 -10
  8. webscout-4.7/webscout/Provider/DARKAI.py +207 -0
  9. webscout-4.7/webscout/Provider/Deepseek.py +212 -0
  10. webscout-4.7/webscout/Provider/Llama3.py +173 -0
  11. webscout-4.7/webscout/Provider/PizzaGPT.py +178 -0
  12. webscout-4.7/webscout/Provider/RUBIKSAI.py +201 -0
  13. {webscout-4.5 → webscout-4.7}/webscout/Provider/__init__.py +14 -3
  14. webscout-4.7/webscout/Provider/koala.py +239 -0
  15. webscout-4.7/webscout/Provider/meta.py +778 -0
  16. {webscout-4.5 → webscout-4.7}/webscout/__init__.py +1 -0
  17. {webscout-4.5 → webscout-4.7}/webscout/exceptions.py +6 -0
  18. {webscout-4.5 → webscout-4.7}/webscout/version.py +1 -1
  19. {webscout-4.5 → webscout-4.7}/webscout/webai.py +15 -1
  20. {webscout-4.5 → webscout-4.7/webscout.egg-info}/PKG-INFO +40 -60
  21. {webscout-4.5 → webscout-4.7}/webscout.egg-info/SOURCES.txt +7 -1
  22. {webscout-4.5 → webscout-4.7}/webscout.egg-info/requires.txt +2 -0
  23. webscout-4.5/webscout/Provider/Deepseek.py +0 -266
  24. {webscout-4.5 → webscout-4.7}/LICENSE.md +0 -0
  25. {webscout-4.5 → webscout-4.7}/setup.cfg +0 -0
  26. {webscout-4.5 → webscout-4.7}/webscout/AIauto.py +0 -0
  27. {webscout-4.5 → webscout-4.7}/webscout/AIbase.py +0 -0
  28. {webscout-4.5 → webscout-4.7}/webscout/Agents/Onlinesearcher.py +0 -0
  29. {webscout-4.5 → webscout-4.7}/webscout/Agents/__init__.py +0 -0
  30. {webscout-4.5 → webscout-4.7}/webscout/Agents/functioncall.py +0 -0
  31. {webscout-4.5 → webscout-4.7}/webscout/DWEBS.py +0 -0
  32. {webscout-4.5 → webscout-4.7}/webscout/Extra/__init__.py +0 -0
  33. {webscout-4.5 → webscout-4.7}/webscout/Extra/autollama.py +0 -0
  34. {webscout-4.5 → webscout-4.7}/webscout/Extra/weather.py +0 -0
  35. {webscout-4.5 → webscout-4.7}/webscout/Extra/weather_ascii.py +0 -0
  36. {webscout-4.5 → webscout-4.7}/webscout/GoogleS.py +0 -0
  37. {webscout-4.5 → webscout-4.7}/webscout/LLM.py +0 -0
  38. {webscout-4.5 → webscout-4.7}/webscout/Local/__init__.py +0 -0
  39. {webscout-4.5 → webscout-4.7}/webscout/Local/_version.py +0 -0
  40. {webscout-4.5 → webscout-4.7}/webscout/Local/formats.py +0 -0
  41. {webscout-4.5 → webscout-4.7}/webscout/Local/model.py +0 -0
  42. {webscout-4.5 → webscout-4.7}/webscout/Local/rawdog.py +0 -0
  43. {webscout-4.5 → webscout-4.7}/webscout/Local/samplers.py +0 -0
  44. {webscout-4.5 → webscout-4.7}/webscout/Local/thread.py +0 -0
  45. {webscout-4.5 → webscout-4.7}/webscout/Local/utils.py +0 -0
  46. {webscout-4.5 → webscout-4.7}/webscout/Provider/Andi.py +0 -0
  47. {webscout-4.5 → webscout-4.7}/webscout/Provider/Berlin4h.py +0 -0
  48. {webscout-4.5 → webscout-4.7}/webscout/Provider/ChatGPTUK.py +0 -0
  49. {webscout-4.5 → webscout-4.7}/webscout/Provider/Cohere.py +0 -0
  50. {webscout-4.5 → webscout-4.7}/webscout/Provider/Deepinfra.py +0 -0
  51. {webscout-4.5 → webscout-4.7}/webscout/Provider/FreeGemini.py +0 -0
  52. {webscout-4.5 → webscout-4.7}/webscout/Provider/Gemini.py +0 -0
  53. {webscout-4.5 → webscout-4.7}/webscout/Provider/Geminiflash.py +0 -0
  54. {webscout-4.5 → webscout-4.7}/webscout/Provider/Geminipro.py +0 -0
  55. {webscout-4.5 → webscout-4.7}/webscout/Provider/Groq.py +0 -0
  56. {webscout-4.5 → webscout-4.7}/webscout/Provider/Koboldai.py +0 -0
  57. {webscout-4.5 → webscout-4.7}/webscout/Provider/Leo.py +0 -0
  58. {webscout-4.5 → webscout-4.7}/webscout/Provider/Llama.py +0 -0
  59. {webscout-4.5 → webscout-4.7}/webscout/Provider/OLLAMA.py +0 -0
  60. {webscout-4.5 → webscout-4.7}/webscout/Provider/OpenGPT.py +0 -0
  61. {webscout-4.5 → webscout-4.7}/webscout/Provider/Openai.py +0 -0
  62. {webscout-4.5 → webscout-4.7}/webscout/Provider/Perplexity.py +0 -0
  63. {webscout-4.5 → webscout-4.7}/webscout/Provider/Phind.py +0 -0
  64. {webscout-4.5 → webscout-4.7}/webscout/Provider/Poe.py +0 -0
  65. {webscout-4.5 → webscout-4.7}/webscout/Provider/Reka.py +0 -0
  66. {webscout-4.5 → webscout-4.7}/webscout/Provider/ThinkAnyAI.py +0 -0
  67. {webscout-4.5 → webscout-4.7}/webscout/Provider/VTLchat.py +0 -0
  68. {webscout-4.5 → webscout-4.7}/webscout/Provider/Xjai.py +0 -0
  69. {webscout-4.5 → webscout-4.7}/webscout/Provider/Yepchat.py +0 -0
  70. {webscout-4.5 → webscout-4.7}/webscout/Provider/Youchat.py +0 -0
  71. {webscout-4.5 → webscout-4.7}/webscout/YTdownloader.py +0 -0
  72. {webscout-4.5 → webscout-4.7}/webscout/__main__.py +0 -0
  73. {webscout-4.5 → webscout-4.7}/webscout/async_providers.py +0 -0
  74. {webscout-4.5 → webscout-4.7}/webscout/cli.py +0 -0
  75. {webscout-4.5 → webscout-4.7}/webscout/g4f.py +0 -0
  76. {webscout-4.5 → webscout-4.7}/webscout/models.py +0 -0
  77. {webscout-4.5 → webscout-4.7}/webscout/tempid.py +0 -0
  78. {webscout-4.5 → webscout-4.7}/webscout/transcriber.py +0 -0
  79. {webscout-4.5 → webscout-4.7}/webscout/utils.py +0 -0
  80. {webscout-4.5 → webscout-4.7}/webscout/voice.py +0 -0
  81. {webscout-4.5 → webscout-4.7}/webscout/webscout_search.py +0 -0
  82. {webscout-4.5 → webscout-4.7}/webscout/webscout_search_async.py +0 -0
  83. {webscout-4.5 → webscout-4.7}/webscout/websx_search.py +0 -0
  84. {webscout-4.5 → webscout-4.7}/webscout.egg-info/dependency_links.txt +0 -0
  85. {webscout-4.5 → webscout-4.7}/webscout.egg-info/entry_points.txt +0 -0
  86. {webscout-4.5 → webscout-4.7}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.5
3
+ Version: 4.7
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -61,6 +61,8 @@ Requires-Dist: PyExecJS
61
61
  Requires-Dist: ollama
62
62
  Requires-Dist: pyfiglet
63
63
  Requires-Dist: yaspin
64
+ Requires-Dist: pillow
65
+ Requires-Dist: requests_html
64
66
  Provides-Extra: dev
65
67
  Requires-Dist: ruff>=0.1.6; extra == "dev"
66
68
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -1175,55 +1177,36 @@ prompt = "Explain the concept of recursion in simple terms."
1175
1177
  response = perplexity.chat(prompt)
1176
1178
  print(response)
1177
1179
  ```
1178
- ### 8. `OpenGPT` - chat With OPENGPT
1180
+ ### 8. `meta ai` - chat With meta ai
1179
1181
  ```python
1180
- from webscout import OPENGPT
1182
+ from webscout import Meta
1183
+ from rich import print
1184
+ # **For unauthenticated usage**
1185
+ meta_ai = Meta()
1181
1186
 
1182
- opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30, assistant_id="bca37014-6f97-4f2b-8928-81ea8d478d88")
1183
- while True:
1184
- # Prompt the user for input
1185
- prompt = input("Enter your prompt: ")
1186
- # Send the prompt to the OPENGPT model and print the response
1187
- response_str = opengpt.chat(prompt)
1188
- print(response_str)
1189
- ```
1190
- ```python
1191
- from webscout import OPENGPTv2
1192
-
1193
- # Initialize the bot with all specified settings
1194
- bot = OPENGPTv2(
1195
- generate_new_agents=True, # Set to True to generate new IDs, False to load from file
1196
- assistant_name="My Custom Assistant",
1197
- retrieval_description="Helpful information from my files.",
1198
- agent_system_message="",
1199
- enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
1200
- enable_ddg_search=False, # Enable DuckDuckGo search tool
1201
- enable_arxiv=False, # Assuming you want to disable Arxiv
1202
- enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
1203
- enable_pubmed=False, # Assuming you want to disable PubMed
1204
- enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
1205
- enable_retrieval=False, # Assuming you want to disable Retrieval
1206
- enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
1207
- enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
1208
- enable_you_com_search=True, # Assuming you want to disable You.com Search
1209
- enable_wikipedia=False, # Enable Wikipedia tool
1210
- is_public=True,
1211
- is_conversation=True,
1212
- max_tokens=800,
1213
- timeout=40,
1214
- filepath="opengpt_conversation_history.txt",
1215
- update_file=True,
1216
- history_offset=10250,
1217
- act=None,
1218
- )
1187
+ # Simple text prompt
1188
+ response = meta_ai.chat("What is the capital of France?")
1189
+ print(response)
1219
1190
 
1220
- # Example interaction loop
1221
- while True:
1222
- prompt = input("You: ")
1223
- if prompt.strip().lower() == 'exit':
1224
- break
1225
- response = bot.chat(prompt)
1226
- print(response)
1191
+ # Streaming response
1192
+ for chunk in meta_ai.chat("Tell me a story about a cat."):
1193
+ print(chunk, end="", flush=True)
1194
+
1195
+ # **For authenticated usage (including image generation)**
1196
+ fb_email = "abcd@abc.com"
1197
+ fb_password = "qwertfdsa"
1198
+ meta_ai = Meta(fb_email=fb_email, fb_password=fb_password)
1199
+
1200
+ # Text prompt with web search
1201
+ response = meta_ai.ask("what is currently happning in bangladesh in aug 2024")
1202
+ print(response["message"]) # Access the text message
1203
+ print("Sources:", response["sources"]) # Access sources (if any)
1204
+
1205
+ # Image generation
1206
+ response = meta_ai.ask("Create an image of a cat wearing a hat.")
1207
+ print(response["message"]) # Print the text message from the response
1208
+ for media in response["media"]:
1209
+ print(media["url"]) # Access image URLs
1227
1210
 
1228
1211
  ```
1229
1212
  ### 9. `KOBOLDAI` -
@@ -1368,7 +1351,7 @@ from rich import print
1368
1351
 
1369
1352
  ai = DeepSeek(
1370
1353
  is_conversation=True,
1371
- api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1354
+ api_key='23bfff080d38429c9fbbf3c76f88454c',
1372
1355
  max_tokens=800,
1373
1356
  timeout=30,
1374
1357
  intro=None,
@@ -1380,18 +1363,12 @@ ai = DeepSeek(
1380
1363
  model="deepseek_chat"
1381
1364
  )
1382
1365
 
1383
- # Start an infinite loop for continuous interaction
1384
- while True:
1385
- # Define a prompt to send to the AI
1386
- prompt = input("Enter your prompt: ")
1387
-
1388
- # Check if the user wants to exit the loop
1389
- if prompt.lower() == "exit":
1390
- break
1391
-
1392
- # Use the 'chat' method to send the prompt and receive a response
1393
- r = ai.chat(prompt)
1394
- print(r)
1366
+
1367
+ # Define a prompt to send to the AI
1368
+ prompt = "Tell me about india"
1369
+ # Use the 'chat' method to send the prompt and receive a response
1370
+ r = ai.chat(prompt)
1371
+ print(r)
1395
1372
  ```
1396
1373
  ### 18. `Deepinfra`
1397
1374
  ```python
@@ -1492,6 +1469,9 @@ from webscout import AndiSearch
1492
1469
  a = AndiSearch()
1493
1470
  print(a.chat("HelpingAI-9B"))
1494
1471
  ```
1472
+
1473
+ ### 25. LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai
1474
+ code similar to other providers
1495
1475
  ### `LLM`
1496
1476
  ```python
1497
1477
  from webscout.LLM import LLM
@@ -1103,55 +1103,36 @@ prompt = "Explain the concept of recursion in simple terms."
1103
1103
  response = perplexity.chat(prompt)
1104
1104
  print(response)
1105
1105
  ```
1106
- ### 8. `OpenGPT` - chat With OPENGPT
1106
+ ### 8. `meta ai` - chat With meta ai
1107
1107
  ```python
1108
- from webscout import OPENGPT
1108
+ from webscout import Meta
1109
+ from rich import print
1110
+ # **For unauthenticated usage**
1111
+ meta_ai = Meta()
1109
1112
 
1110
- opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30, assistant_id="bca37014-6f97-4f2b-8928-81ea8d478d88")
1111
- while True:
1112
- # Prompt the user for input
1113
- prompt = input("Enter your prompt: ")
1114
- # Send the prompt to the OPENGPT model and print the response
1115
- response_str = opengpt.chat(prompt)
1116
- print(response_str)
1117
- ```
1118
- ```python
1119
- from webscout import OPENGPTv2
1120
-
1121
- # Initialize the bot with all specified settings
1122
- bot = OPENGPTv2(
1123
- generate_new_agents=True, # Set to True to generate new IDs, False to load from file
1124
- assistant_name="My Custom Assistant",
1125
- retrieval_description="Helpful information from my files.",
1126
- agent_system_message="",
1127
- enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
1128
- enable_ddg_search=False, # Enable DuckDuckGo search tool
1129
- enable_arxiv=False, # Assuming you want to disable Arxiv
1130
- enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
1131
- enable_pubmed=False, # Assuming you want to disable PubMed
1132
- enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
1133
- enable_retrieval=False, # Assuming you want to disable Retrieval
1134
- enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
1135
- enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
1136
- enable_you_com_search=True, # Assuming you want to disable You.com Search
1137
- enable_wikipedia=False, # Enable Wikipedia tool
1138
- is_public=True,
1139
- is_conversation=True,
1140
- max_tokens=800,
1141
- timeout=40,
1142
- filepath="opengpt_conversation_history.txt",
1143
- update_file=True,
1144
- history_offset=10250,
1145
- act=None,
1146
- )
1113
+ # Simple text prompt
1114
+ response = meta_ai.chat("What is the capital of France?")
1115
+ print(response)
1147
1116
 
1148
- # Example interaction loop
1149
- while True:
1150
- prompt = input("You: ")
1151
- if prompt.strip().lower() == 'exit':
1152
- break
1153
- response = bot.chat(prompt)
1154
- print(response)
1117
+ # Streaming response
1118
+ for chunk in meta_ai.chat("Tell me a story about a cat."):
1119
+ print(chunk, end="", flush=True)
1120
+
1121
+ # **For authenticated usage (including image generation)**
1122
+ fb_email = "abcd@abc.com"
1123
+ fb_password = "qwertfdsa"
1124
+ meta_ai = Meta(fb_email=fb_email, fb_password=fb_password)
1125
+
1126
+ # Text prompt with web search
1127
+ response = meta_ai.ask("what is currently happning in bangladesh in aug 2024")
1128
+ print(response["message"]) # Access the text message
1129
+ print("Sources:", response["sources"]) # Access sources (if any)
1130
+
1131
+ # Image generation
1132
+ response = meta_ai.ask("Create an image of a cat wearing a hat.")
1133
+ print(response["message"]) # Print the text message from the response
1134
+ for media in response["media"]:
1135
+ print(media["url"]) # Access image URLs
1155
1136
 
1156
1137
  ```
1157
1138
  ### 9. `KOBOLDAI` -
@@ -1296,7 +1277,7 @@ from rich import print
1296
1277
 
1297
1278
  ai = DeepSeek(
1298
1279
  is_conversation=True,
1299
- api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1280
+ api_key='23bfff080d38429c9fbbf3c76f88454c',
1300
1281
  max_tokens=800,
1301
1282
  timeout=30,
1302
1283
  intro=None,
@@ -1308,18 +1289,12 @@ ai = DeepSeek(
1308
1289
  model="deepseek_chat"
1309
1290
  )
1310
1291
 
1311
- # Start an infinite loop for continuous interaction
1312
- while True:
1313
- # Define a prompt to send to the AI
1314
- prompt = input("Enter your prompt: ")
1315
-
1316
- # Check if the user wants to exit the loop
1317
- if prompt.lower() == "exit":
1318
- break
1319
-
1320
- # Use the 'chat' method to send the prompt and receive a response
1321
- r = ai.chat(prompt)
1322
- print(r)
1292
+
1293
+ # Define a prompt to send to the AI
1294
+ prompt = "Tell me about india"
1295
+ # Use the 'chat' method to send the prompt and receive a response
1296
+ r = ai.chat(prompt)
1297
+ print(r)
1323
1298
  ```
1324
1299
  ### 18. `Deepinfra`
1325
1300
  ```python
@@ -1420,6 +1395,9 @@ from webscout import AndiSearch
1420
1395
  a = AndiSearch()
1421
1396
  print(a.chat("HelpingAI-9B"))
1422
1397
  ```
1398
+
1399
+ ### 25. LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai
1400
+ code similar to other providers
1423
1401
  ### `LLM`
1424
1402
  ```python
1425
1403
  from webscout.LLM import LLM
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="4.5",
8
+ version="4.7",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -65,6 +65,8 @@ setup(
65
65
  "ollama",
66
66
  "pyfiglet",
67
67
  "yaspin",
68
+ "pillow",
69
+ "requests_html"
68
70
  ],
69
71
  entry_points={
70
72
  "console_scripts": [
@@ -54,6 +54,7 @@ webai = [
54
54
  "geminipro",
55
55
  "ollama",
56
56
  "andi",
57
+ "llama3"
57
58
  ]
58
59
 
59
60
  gpt4free_providers = [
@@ -533,6 +534,15 @@ LLM:
533
534
  ```python
534
535
  print("The essay is about...")
535
536
  ```
537
+
538
+ 3. User: Weather in qazigund
539
+
540
+ LLM:
541
+ ```python
542
+ from webscout import weather as w
543
+ weather = w.get("Qazigund")
544
+ w.print_weather(weather)
545
+ ```
536
546
  """
537
547
 
538
548
 
@@ -153,7 +153,7 @@ huggingface-cli download "$MODEL_ID" --local-dir "./${MODEL_NAME}" --local-dir-u
153
153
  # Convert to fp16
154
154
  FP16="${MODEL_NAME}/${MODEL_NAME,,}.fp16.bin"
155
155
  echo "Converting the model to fp16..."
156
- python3 llama.cpp/convert-hf-to-gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
156
+ python3 llama.cpp/convert_hf_to_gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
157
157
 
158
158
  # Quantize the model
159
159
  echo "Quantizing the model..."
@@ -40,7 +40,7 @@ class BasedGPT(Provider):
40
40
  proxies: dict = {},
41
41
  history_offset: int = 10250,
42
42
  act: str = None,
43
- system_prompt: str = "Be Helpful and Friendly",
43
+ model: str = "gpt-3.5-turbo"
44
44
  ):
45
45
  """Instantiates BasedGPT
46
46
 
@@ -54,25 +54,40 @@ class BasedGPT(Provider):
54
54
  proxies (dict, optional): Http request proxies. Defaults to {}.
55
55
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
56
56
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
57
- system_prompt (str, optional): System prompt for BasedGPT. Defaults to "Be Helpful and Friendly".
57
+ model (str, optional): Model to use for generating text. Defaults to "gpt-3.5-turbo".
58
58
  """
59
59
  self.session = requests.Session()
60
60
  self.is_conversation = is_conversation
61
61
  self.max_tokens_to_sample = max_tokens
62
62
  self.chat_endpoint = "https://www.basedgpt.chat/api/chat"
63
- self.stream_chunk_size = 64
64
63
  self.timeout = timeout
65
64
  self.last_response = {}
66
- self.system_prompt = system_prompt
65
+ self.model = model
66
+ self.headers = {
67
+ "accept": "*/*",
68
+ "accept-encoding": "gzip, deflate, br, zstd",
69
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
70
+ "content-length": "109",
71
+ "content-type": "application/json",
72
+ "dnt": "1",
73
+ "origin": "https://www.basedgpt.chat",
74
+ "priority": "u=1, i",
75
+ "referer": "https://www.basedgpt.chat/",
76
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
77
+ "sec-ch-ua-mobile": "?0",
78
+ "sec-ch-ua-platform": '"Windows"',
79
+ "sec-fetch-dest": "empty",
80
+ "sec-fetch-mode": "cors",
81
+ "sec-fetch-site": "same-origin",
82
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
83
+ }
67
84
 
68
85
  self.__available_optimizers = (
69
86
  method
70
87
  for method in dir(Optimizers)
71
88
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
89
  )
73
- self.session.headers.update(
74
- {"Content-Type": "application/json"}
75
- )
90
+ self.session.headers.update(self.headers)
76
91
  Conversation.intro = (
77
92
  AwesomePrompts().get_act(
78
93
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -106,25 +121,7 @@ class BasedGPT(Provider):
106
121
  dict : {}
107
122
  ```json
108
123
  {
109
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
110
- "object": "chat.completion",
111
- "created": 1704623244,
112
- "model": "gpt-3.5-turbo",
113
- "usage": {
114
- "prompt_tokens": 0,
115
- "completion_tokens": 0,
116
- "total_tokens": 0
117
- },
118
- "choices": [
119
- {
120
- "message": {
121
- "role": "assistant",
122
- "content": "Hello! How can I assist you today?"
123
- },
124
- "finish_reason": "stop",
125
- "index": 0
126
- }
127
- ]
124
+ "text" : "How may I assist you today?"
128
125
  }
129
126
  ```
130
127
  """
@@ -139,11 +136,14 @@ class BasedGPT(Provider):
139
136
  f"Optimizer is not one of {self.__available_optimizers}"
140
137
  )
141
138
 
139
+ self.session.headers.update(self.headers)
142
140
  payload = {
143
141
  "messages": [
144
- {"role": "system", "content": self.system_prompt},
145
- {"role": "user", "content": conversation_prompt},
146
- ],
142
+ {
143
+ "role": "user",
144
+ "content": conversation_prompt
145
+ }
146
+ ]
147
147
  }
148
148
 
149
149
  def for_stream():
@@ -151,22 +151,24 @@ class BasedGPT(Provider):
151
151
  self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
152
152
  )
153
153
  if not response.ok:
154
- raise exceptions.FailedToGenerateResponseError(
154
+ raise Exception(
155
155
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
156
  )
157
157
 
158
- message_load = ""
158
+ streaming_text = ""
159
159
  for value in response.iter_lines(
160
160
  decode_unicode=True,
161
- delimiter="",
162
- chunk_size=self.stream_chunk_size,
161
+ chunk_size=64,
162
+ delimiter="\n",
163
163
  ):
164
164
  try:
165
- message_load += value
166
- yield value if raw else dict(text=message_load)
165
+ if bool(value):
166
+ streaming_text += value + ("\n" if stream else "")
167
+ resp = dict(text=streaming_text)
168
+ self.last_response.update(resp)
169
+ yield value if raw else resp
167
170
  except json.decoder.JSONDecodeError:
168
171
  pass
169
- self.last_response.update(dict(text=message_load))
170
172
  self.conversation.update_chat_history(
171
173
  prompt, self.get_message(self.last_response)
172
174
  )
@@ -22,14 +22,14 @@ import yaml
22
22
  from ..AIutel import Optimizers
23
23
  from ..AIutel import Conversation
24
24
  from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
25
+ from ..AIbase import Provider, AsyncProvider
26
26
  from Helpingai_T2 import Perplexity
27
27
  from webscout import exceptions
28
28
  from typing import Any, AsyncGenerator, Dict
29
29
  import logging
30
30
  import httpx
31
31
 
32
- #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
32
+ #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
33
33
  class BLACKBOXAI:
34
34
  def __init__(
35
35
  self,
@@ -234,13 +234,9 @@ class BLACKBOXAI:
234
234
  """
235
235
  assert isinstance(response, dict), "Response should be of dict data-type only"
236
236
  return response["text"]
237
- @staticmethod
238
- def chat_cli(prompt):
239
- """Sends a request to the BLACKBOXAI API and processes the response."""
240
- blackbox_ai = BLACKBOXAI() # Initialize a BLACKBOXAI instance
241
- response = blackbox_ai.ask(prompt) # Perform a chat with the given prompt
242
- processed_response = blackbox_ai.get_message(response) # Process the response
243
- print(processed_response)
237
+
238
+
239
+
244
240
  class AsyncBLACKBOXAI(AsyncProvider):
245
241
  def __init__(
246
242
  self,
@@ -437,4 +433,12 @@ class AsyncBLACKBOXAI(AsyncProvider):
437
433
  str: Message extracted
438
434
  """
439
435
  assert isinstance(response, dict), "Response should be of dict data-type only"
440
- return response["text"]
436
+ return response["text"]
437
+
438
+ # Function to clean the response text
439
+ def clean_response(response_text: str) -> str:
440
+ # Remove web search results
441
+ response_text = re.sub(r'\$@\$v=undefined-rv1\$@\$Sources:.*?\$~~~', '', response_text, flags=re.DOTALL)
442
+ # Remove any remaining special characters or markers
443
+ response_text = re.sub(r'\$~~~', '', response_text)
444
+ return response_text