webscout 3.9__py3-none-any.whl → 4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIauto.py CHANGED
@@ -1,29 +1,33 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
- from webscout import OPENGPT, AsyncOPENGPT
3
- from webscout import KOBOLDAI, AsyncKOBOLDAI
4
- from webscout import PhindSearch, AsyncPhindSearch
5
- from webscout import LLAMA2, AsyncLLAMA2
6
- from webscout import BLACKBOXAI, AsyncBLACKBOXAI
7
- from webscout import PERPLEXITY
8
- from webscout import ThinkAnyAI
9
- from webscout import YouChat
10
- from webscout import YEPCHAT
11
- from webscout.AIbase import Provider, AsyncProvider
12
- from webscout import KOBOLDAI, AsyncKOBOLDAI
13
- from webscout import PhindSearch, AsyncPhindSearch
14
- from webscout import LLAMA2, AsyncLLAMA2
15
- from webscout import BLACKBOXAI, AsyncBLACKBOXAI
16
- from webscout import PERPLEXITY
17
- from webscout import ThinkAnyAI
18
- from webscout import YouChat
19
- from webscout import YEPCHAT, AsyncYEPCHAT
20
- from webscout import LEO, AsyncLEO
21
- from webscout import GROQ, AsyncGROQ
22
- from webscout import OPENAI, AsyncOPENAI
23
- from webscout import REKA
24
- from webscout import Xjai
25
- from webscout import Berlin4h
26
- from webscout import ChatGPTUK
2
+ from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
+ from webscout.Provider.Xjai import Xjai
4
+ from webscout.Provider.Llama2 import LLAMA2
5
+ from webscout.Provider.Llama2 import AsyncLLAMA2
6
+ from webscout.Provider.Leo import LEO
7
+ from webscout.Provider.Leo import AsyncLEO
8
+ from webscout.Provider.Koboldai import KOBOLDAI
9
+ from webscout.Provider.Koboldai import AsyncKOBOLDAI
10
+ from webscout.Provider.OpenGPT import OPENGPT
11
+ from webscout.Provider.OpenGPT import OPENGPTv2
12
+ from webscout.Provider.OpenGPT import AsyncOPENGPT
13
+ from webscout.Provider.Perplexity import PERPLEXITY
14
+ from webscout.Provider.Blackboxai import BLACKBOXAI
15
+ from webscout.Provider.Blackboxai import AsyncBLACKBOXAI
16
+ from webscout.Provider.Phind import PhindSearch
17
+ from webscout.Provider.Phind import AsyncPhindSearch
18
+ from webscout.Provider.Phind import Phindv2
19
+ from webscout.Provider.Phind import AsyncPhindv2
20
+ from webscout.Provider.Yepchat import YEPCHAT
21
+ from webscout.Provider.Yepchat import AsyncYEPCHAT
22
+ from webscout.Provider.Berlin4h import Berlin4h
23
+ from webscout.Provider.ChatGPTUK import ChatGPTUK
24
+ from webscout.Provider.Poe import POE
25
+ from webscout.Provider.BasedGPT import BasedGPT
26
+ from webscout.Provider.Deepseek import DeepSeek
27
+ from webscout.Provider.Deepinfra import DeepInfra, VLM, AsyncDeepInfra
28
+ from webscout.Provider.VTLchat import VTLchat
29
+ from webscout.Provider.Geminipro import GEMINIPRO
30
+ from webscout.Provider.Geminiflash import GEMINIFLASH
27
31
  from webscout.g4f import GPT4FREE, AsyncGPT4FREE
28
32
  from webscout.g4f import TestProviders
29
33
  from webscout.exceptions import AllProvidersFailure
@@ -36,43 +40,56 @@ import logging
36
40
 
37
41
 
38
42
  provider_map: dict[
39
- str, Union[ ThinkAnyAI,
40
- Xjai,
41
- LLAMA2,
42
- AsyncLLAMA2,
43
- LEO,
44
- AsyncLEO,
45
- KOBOLDAI,
46
- AsyncKOBOLDAI,
47
- OPENGPT,
48
- AsyncOPENGPT,
49
- PERPLEXITY,
50
- BLACKBOXAI,
51
- AsyncBLACKBOXAI,
52
- PhindSearch,
53
- AsyncPhindSearch,
54
- YEPCHAT,
55
- AsyncYEPCHAT,
56
- YouChat,
57
- Berlin4h,
58
- ChatGPTUK,]
43
+ str,
44
+ Union[
45
+ ThinkAnyAI,
46
+ Xjai,
47
+ LLAMA2,
48
+ LEO,
49
+ KOBOLDAI,
50
+ OPENGPT,
51
+ OPENGPTv2,
52
+ PERPLEXITY,
53
+ BLACKBOXAI,
54
+ PhindSearch,
55
+ Phindv2,
56
+ YEPCHAT,
57
+ Berlin4h,
58
+ ChatGPTUK,
59
+ POE,
60
+ BasedGPT,
61
+ DeepSeek,
62
+ DeepInfra,
63
+ VLM,
64
+ VTLchat,
65
+ GEMINIPRO,
66
+ GEMINIFLASH,
67
+ GPT4FREE,
68
+ ],
59
69
  ] = {
70
+ "ThinkAnyAI": ThinkAnyAI,
71
+ "Xjai": Xjai,
72
+ "LLAMA2": LLAMA2,
73
+ "LEO": LEO,
74
+ "KOBOLDAI": KOBOLDAI,
75
+ "OPENGPT": OPENGPT,
76
+ "OPENGPTv2": OPENGPTv2,
77
+ "PERPLEXITY": PERPLEXITY,
78
+ "BLACKBOXAI": BLACKBOXAI,
60
79
  "PhindSearch": PhindSearch,
61
- "perplexity": PERPLEXITY,
62
- "opengpt": OPENGPT,
63
- "koboldai": KOBOLDAI,
64
- "llama2": LLAMA2,
65
- "blackboxai": BLACKBOXAI,
66
- "gpt4free": GPT4FREE,
67
- "thinkany": ThinkAnyAI,
68
- "yepchat": YEPCHAT,
69
- "you": YouChat,
70
- "leo": LEO,
71
- "xjai": Xjai,
72
- "berlin4h": Berlin4h,
73
- "chatgptuk": ChatGPTUK,
80
+ "Phindv2": Phindv2,
81
+ "YEPCHAT": YEPCHAT,
82
+ "Berlin4h": Berlin4h,
83
+ "ChatGPTUK": ChatGPTUK,
84
+ "POE": POE,
85
+ "BasedGPT": BasedGPT,
86
+ "DeepSeek": DeepSeek,
87
+ "DeepInfra": DeepInfra,
88
+ "VLM": VLM,
89
+ "VTLchat": VTLchat,
90
+ "GEMINIPRO": GEMINIPRO,
91
+ "GEMINIFLASH": GEMINIFLASH,
74
92
  "gpt4free": GPT4FREE,
75
-
76
93
  }
77
94
 
78
95
 
@@ -104,7 +121,31 @@ class AUTO(Provider):
104
121
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
105
122
  exclude(list[str], optional): List of providers to be excluded. Defaults to [].
106
123
  """
107
- self.provider: Union[OPENGPT, KOBOLDAI, PhindSearch, LLAMA2, BLACKBOXAI, PERPLEXITY, GPT4FREE, ThinkAnyAI, YEPCHAT, YouChat] = None
124
+ self.provider: Union[
125
+ ThinkAnyAI,
126
+ Xjai,
127
+ LLAMA2,
128
+ LEO,
129
+ KOBOLDAI,
130
+ OPENGPT,
131
+ OPENGPTv2,
132
+ PERPLEXITY,
133
+ BLACKBOXAI,
134
+ PhindSearch,
135
+ Phindv2,
136
+ YEPCHAT,
137
+ Berlin4h,
138
+ ChatGPTUK,
139
+ POE,
140
+ BasedGPT,
141
+ DeepSeek,
142
+ DeepInfra,
143
+ VLM,
144
+ VTLchat,
145
+ GEMINIPRO,
146
+ GEMINIFLASH,
147
+ GPT4FREE,
148
+ ] = None
108
149
  self.provider_name: str = None
109
150
  self.is_conversation = is_conversation
110
151
  self.max_tokens = max_tokens
@@ -315,6 +356,30 @@ class AsyncAUTO(AsyncProvider):
315
356
  AsyncLLAMA2,
316
357
  AsyncBLACKBOXAI,
317
358
  AsyncGPT4FREE,
359
+ AsyncLEO,
360
+ ThinkAnyAI,
361
+ Xjai,
362
+ LLAMA2,
363
+ LEO,
364
+ KOBOLDAI,
365
+ OPENGPT,
366
+ OPENGPTv2,
367
+ PERPLEXITY,
368
+ BLACKBOXAI,
369
+ PhindSearch,
370
+ Phindv2,
371
+ YEPCHAT,
372
+ Berlin4h,
373
+ ChatGPTUK,
374
+ POE,
375
+ BasedGPT,
376
+ DeepSeek,
377
+ DeepInfra,
378
+ VLM,
379
+ VTLchat,
380
+ GEMINIPRO,
381
+ GEMINIFLASH,
382
+ GPT4FREE
318
383
  ] = None
319
384
  self.provider_name: str = None
320
385
  self.is_conversation = is_conversation
@@ -490,4 +555,4 @@ class AsyncAUTO(AsyncProvider):
490
555
  str: Message extracted
491
556
  """
492
557
  assert self.provider is not None, "Chat with AI first"
493
- return await self.provider.get_message(response)
558
+ return await self.provider.get_message(response)
webscout/AIutel.py CHANGED
@@ -196,7 +196,7 @@ class Conversation:
196
196
  """
197
197
  self.status = status
198
198
  self.max_tokens_to_sample = max_tokens
199
- self.chat_history = self.intro
199
+ self.chat_history = ""
200
200
  self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
201
201
  self.file = filepath
202
202
  self.update_file = update_file
@@ -219,19 +219,20 @@ class Conversation:
219
219
  ), f"File '{filepath}' does not exist"
220
220
  if not os.path.isfile(filepath):
221
221
  logging.debug(f"Creating new chat-history file - '{filepath}'")
222
- with open(filepath, "w", encoding='utf-8') as fh: # Try creating new file
222
+ with open(filepath, "w") as fh: # Try creating new file
223
223
  # lets add intro here
224
224
  fh.write(self.intro)
225
225
  else:
226
226
  logging.debug(f"Loading conversation from '{filepath}'")
227
- with open(filepath, encoding='utf-8') as fh:
228
- file_contents = fh.read()
229
- # Presume intro prompt is part of the file content
230
- self.chat_history = file_contents
227
+ with open(filepath) as fh:
228
+ file_contents = fh.readlines()
229
+ if file_contents:
230
+ self.intro = file_contents[0] # Presume first line is the intro.
231
+ self.chat_history = "\n".join(file_contents[1:])
231
232
 
232
- def __trim_chat_history(self, chat_history: str) -> str:
233
+ def __trim_chat_history(self, chat_history: str, intro: str) -> str:
233
234
  """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
234
- len_of_intro = len(self.intro)
235
+ len_of_intro = len(intro)
235
236
  len_of_chat_history = len(chat_history)
236
237
  total = (
237
238
  self.max_tokens_to_sample + len_of_intro + len_of_chat_history
@@ -239,25 +240,28 @@ class Conversation:
239
240
  if total > self.history_offset:
240
241
  truncate_at = (total - self.history_offset) + self.prompt_allowance
241
242
  # Remove head of total (n) of chat_history
242
- new_chat_history = chat_history[truncate_at:]
243
- self.chat_history = self.intro + "\n... " + new_chat_history
243
+ trimmed_chat_history = chat_history[truncate_at:]
244
+ return "... " + trimmed_chat_history
244
245
  # print(len(self.chat_history))
245
- return self.chat_history
246
- # print(len(chat_history))
247
- return chat_history
246
+ else:
247
+ return chat_history
248
248
 
249
- def gen_complete_prompt(self, prompt: str) -> str:
249
+ def gen_complete_prompt(self, prompt: str, intro: str = None) -> str:
250
250
  """Generates a kinda like incomplete conversation
251
251
 
252
252
  Args:
253
- prompt (str): _description_
253
+ prompt (str): Chat prompt
254
+ intro (str): Override class' intro. Defaults to None.
254
255
 
255
256
  Returns:
256
257
  str: Updated incomplete chat_history
257
258
  """
258
259
  if self.status:
259
- resp = self.chat_history + self.history_format % dict(user=prompt, llm="")
260
- return self.__trim_chat_history(resp)
260
+ intro = self.intro if intro is None else intro
261
+ incomplete_chat_history = self.chat_history + self.history_format % dict(
262
+ user=prompt, llm=""
263
+ )
264
+ return intro + self.__trim_chat_history(incomplete_chat_history, intro)
261
265
 
262
266
  return prompt
263
267
 
@@ -275,11 +279,16 @@ class Conversation:
275
279
  return
276
280
  new_history = self.history_format % dict(user=prompt, llm=response)
277
281
  if self.file and self.update_file:
278
- with open(self.file, "a", encoding='utf-8') as fh:
279
- fh.write(new_history)
282
+ if os.path.exists(self.file):
283
+ with open(self.file, "w") as fh:
284
+ fh.write(self.intro + "\n" + new_history)
285
+ else:
286
+ with open(self.file, "a") as fh:
287
+ fh.write(new_history)
280
288
  self.chat_history += new_history
281
289
 
282
290
 
291
+
283
292
  class AwesomePrompts:
284
293
  awesome_prompt_url = (
285
294
  "https://raw.githubusercontent.com/OE-LUCIFER/prompts/main/prompt.json"
@@ -1,2 +1,4 @@
1
1
  from .gguf import *
2
- from .autollama import *
2
+ from .autollama import *
3
+ from .weather import *
4
+ from .weather_ascii import *
@@ -0,0 +1,49 @@
1
+ import requests
2
+
3
+ def get(location):
4
+ """Fetches weather data for the given location.
5
+
6
+ Args:
7
+ location (str): The location for which to fetch weather data.
8
+
9
+ Returns:
10
+ dict: A dictionary containing weather data if the request is successful,
11
+ otherwise a string indicating the error.
12
+ """
13
+ url = f"https://wttr.in/{location}?format=j1"
14
+
15
+ response = requests.get(url)
16
+
17
+ if response.status_code == 200:
18
+ return response.json()
19
+ else:
20
+ return f"Error: Unable to fetch weather data. Status code: {response.status_code}"
21
+
22
+ def print_weather(weather_data):
23
+ """Prints the weather data in a user-friendly format.
24
+
25
+ Args:
26
+ weather_data (dict or str): The weather data returned from get_weather()
27
+ or an error message.
28
+ """
29
+ if isinstance(weather_data, str):
30
+ print(weather_data)
31
+ return
32
+
33
+ current = weather_data['current_condition'][0]
34
+ location_name = weather_data['nearest_area'][0]['areaName'][0]['value']
35
+
36
+ print(f"Weather in {location_name}:")
37
+ print(f"Temperature: {current['temp_C']}°C / {current['temp_F']}°F")
38
+ print(f"Condition: {current['weatherDesc'][0]['value']}")
39
+ print(f"Humidity: {current['humidity']}%")
40
+ print(f"Wind: {current['windspeedKmph']} km/h, {current['winddir16Point']}")
41
+
42
+
43
+ print("\nForecast:")
44
+ for day in weather_data['weather']:
45
+ date = day['date']
46
+ max_temp = day['maxtempC']
47
+ min_temp = day['mintempC']
48
+ desc = day['hourly'][4]['weatherDesc'][0]['value']
49
+ print(f"{date}: {min_temp}°C to {max_temp}°C, {desc}")
@@ -0,0 +1,18 @@
1
+ import requests
2
+
3
+ def get(location):
4
+ """Fetches ASCII art weather data for the given location.
5
+ Args:
6
+ location (str): The location for which to fetch weather data.
7
+
8
+ Returns:
9
+ str: ASCII art weather report if the request is successful,
10
+ otherwise an error message.
11
+ """
12
+ url = f"https://wttr.in/{location}"
13
+ response = requests.get(url, headers={'User-Agent': 'curl'})
14
+
15
+ if response.status_code == 200:
16
+ return "\n".join(response.text.splitlines()[:-1])
17
+ else:
18
+ return f"Error: Unable to fetch weather data. Status code: {response.status_code}"
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '3.9'
3
+ __version__ = '4.1'