webscout 4.0__tar.gz → 4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (73) hide show
  1. {webscout-4.0/webscout.egg-info → webscout-4.2}/PKG-INFO +10 -1
  2. {webscout-4.0 → webscout-4.2}/README.md +9 -0
  3. {webscout-4.0 → webscout-4.2}/setup.py +1 -1
  4. {webscout-4.0 → webscout-4.2}/webscout/AIauto.py +126 -61
  5. {webscout-4.0 → webscout-4.2}/webscout/AIutel.py +28 -18
  6. {webscout-4.0 → webscout-4.2}/webscout/Local/_version.py +1 -1
  7. webscout-4.2/webscout/Provider/OLLAMA.py +187 -0
  8. {webscout-4.0 → webscout-4.2}/webscout/Provider/__init__.py +2 -1
  9. {webscout-4.0 → webscout-4.2}/webscout/__init__.py +1 -0
  10. {webscout-4.0 → webscout-4.2}/webscout/version.py +1 -1
  11. {webscout-4.0 → webscout-4.2}/webscout/webai.py +14 -0
  12. {webscout-4.0 → webscout-4.2/webscout.egg-info}/PKG-INFO +10 -1
  13. {webscout-4.0 → webscout-4.2}/webscout.egg-info/SOURCES.txt +1 -0
  14. {webscout-4.0 → webscout-4.2}/LICENSE.md +0 -0
  15. {webscout-4.0 → webscout-4.2}/setup.cfg +0 -0
  16. {webscout-4.0 → webscout-4.2}/webscout/AIbase.py +0 -0
  17. {webscout-4.0 → webscout-4.2}/webscout/DWEBS.py +0 -0
  18. {webscout-4.0 → webscout-4.2}/webscout/Extra/__init__.py +0 -0
  19. {webscout-4.0 → webscout-4.2}/webscout/Extra/autollama.py +0 -0
  20. {webscout-4.0 → webscout-4.2}/webscout/Extra/gguf.py +0 -0
  21. {webscout-4.0 → webscout-4.2}/webscout/Extra/weather.py +0 -0
  22. {webscout-4.0 → webscout-4.2}/webscout/Extra/weather_ascii.py +0 -0
  23. {webscout-4.0 → webscout-4.2}/webscout/LLM.py +0 -0
  24. {webscout-4.0 → webscout-4.2}/webscout/Local/__init__.py +0 -0
  25. {webscout-4.0 → webscout-4.2}/webscout/Local/formats.py +0 -0
  26. {webscout-4.0 → webscout-4.2}/webscout/Local/model.py +0 -0
  27. {webscout-4.0 → webscout-4.2}/webscout/Local/rawdog.py +0 -0
  28. {webscout-4.0 → webscout-4.2}/webscout/Local/samplers.py +0 -0
  29. {webscout-4.0 → webscout-4.2}/webscout/Local/thread.py +0 -0
  30. {webscout-4.0 → webscout-4.2}/webscout/Local/utils.py +0 -0
  31. {webscout-4.0 → webscout-4.2}/webscout/Provider/BasedGPT.py +0 -0
  32. {webscout-4.0 → webscout-4.2}/webscout/Provider/Berlin4h.py +0 -0
  33. {webscout-4.0 → webscout-4.2}/webscout/Provider/Blackboxai.py +0 -0
  34. {webscout-4.0 → webscout-4.2}/webscout/Provider/ChatGPTUK.py +0 -0
  35. {webscout-4.0 → webscout-4.2}/webscout/Provider/Cohere.py +0 -0
  36. {webscout-4.0 → webscout-4.2}/webscout/Provider/Deepinfra.py +0 -0
  37. {webscout-4.0 → webscout-4.2}/webscout/Provider/Deepseek.py +0 -0
  38. {webscout-4.0 → webscout-4.2}/webscout/Provider/Gemini.py +0 -0
  39. {webscout-4.0 → webscout-4.2}/webscout/Provider/Geminiflash.py +0 -0
  40. {webscout-4.0 → webscout-4.2}/webscout/Provider/Geminipro.py +0 -0
  41. {webscout-4.0 → webscout-4.2}/webscout/Provider/Groq.py +0 -0
  42. {webscout-4.0 → webscout-4.2}/webscout/Provider/Koboldai.py +0 -0
  43. {webscout-4.0 → webscout-4.2}/webscout/Provider/Leo.py +0 -0
  44. {webscout-4.0 → webscout-4.2}/webscout/Provider/Llama2.py +0 -0
  45. {webscout-4.0 → webscout-4.2}/webscout/Provider/OpenGPT.py +0 -0
  46. {webscout-4.0 → webscout-4.2}/webscout/Provider/Openai.py +0 -0
  47. {webscout-4.0 → webscout-4.2}/webscout/Provider/Perplexity.py +0 -0
  48. {webscout-4.0 → webscout-4.2}/webscout/Provider/Phind.py +0 -0
  49. {webscout-4.0 → webscout-4.2}/webscout/Provider/Poe.py +0 -0
  50. {webscout-4.0 → webscout-4.2}/webscout/Provider/Reka.py +0 -0
  51. {webscout-4.0 → webscout-4.2}/webscout/Provider/ThinkAnyAI.py +0 -0
  52. {webscout-4.0 → webscout-4.2}/webscout/Provider/VTLchat.py +0 -0
  53. {webscout-4.0 → webscout-4.2}/webscout/Provider/Xjai.py +0 -0
  54. {webscout-4.0 → webscout-4.2}/webscout/Provider/Yepchat.py +0 -0
  55. {webscout-4.0 → webscout-4.2}/webscout/Provider/Youchat.py +0 -0
  56. {webscout-4.0 → webscout-4.2}/webscout/YTdownloader.py +0 -0
  57. {webscout-4.0 → webscout-4.2}/webscout/__main__.py +0 -0
  58. {webscout-4.0 → webscout-4.2}/webscout/async_providers.py +0 -0
  59. {webscout-4.0 → webscout-4.2}/webscout/cli.py +0 -0
  60. {webscout-4.0 → webscout-4.2}/webscout/exceptions.py +0 -0
  61. {webscout-4.0 → webscout-4.2}/webscout/g4f.py +0 -0
  62. {webscout-4.0 → webscout-4.2}/webscout/models.py +0 -0
  63. {webscout-4.0 → webscout-4.2}/webscout/tempid.py +0 -0
  64. {webscout-4.0 → webscout-4.2}/webscout/transcriber.py +0 -0
  65. {webscout-4.0 → webscout-4.2}/webscout/utils.py +0 -0
  66. {webscout-4.0 → webscout-4.2}/webscout/voice.py +0 -0
  67. {webscout-4.0 → webscout-4.2}/webscout/webscout_search.py +0 -0
  68. {webscout-4.0 → webscout-4.2}/webscout/webscout_search_async.py +0 -0
  69. {webscout-4.0 → webscout-4.2}/webscout/websx_search.py +0 -0
  70. {webscout-4.0 → webscout-4.2}/webscout.egg-info/dependency_links.txt +0 -0
  71. {webscout-4.0 → webscout-4.2}/webscout.egg-info/entry_points.txt +0 -0
  72. {webscout-4.0 → webscout-4.2}/webscout.egg-info/requires.txt +0 -0
  73. {webscout-4.0 → webscout-4.2}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.0
3
+ Version: 4.2
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1463,6 +1463,15 @@ print(response)
1463
1463
 
1464
1464
  ### 21. GeminiFlash and geminipro
1465
1465
  **Usage similar to other providers**
1466
+
1467
+ ### 22. `Ollama` - chat will AI models locally
1468
+ ```python
1469
+ from webscout import OLLAMA
1470
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
1471
+ response = ollama_provider.chat("What is the meaning of life?")
1472
+ print(response)
1473
+ ```
1474
+
1466
1475
  ### `LLM`
1467
1476
  ```python
1468
1477
  from webscout.LLM import LLM
@@ -1397,6 +1397,15 @@ print(response)
1397
1397
 
1398
1398
  ### 21. GeminiFlash and geminipro
1399
1399
  **Usage similar to other providers**
1400
+
1401
+ ### 22. `Ollama` - chat will AI models locally
1402
+ ```python
1403
+ from webscout import OLLAMA
1404
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
1405
+ response = ollama_provider.chat("What is the meaning of life?")
1406
+ print(response)
1407
+ ```
1408
+
1400
1409
  ### `LLM`
1401
1410
  ```python
1402
1411
  from webscout.LLM import LLM
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="4.0",
8
+ version="4.2",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -1,29 +1,33 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
- from webscout import OPENGPT, AsyncOPENGPT
3
- from webscout import KOBOLDAI, AsyncKOBOLDAI
4
- from webscout import PhindSearch, AsyncPhindSearch
5
- from webscout import LLAMA2, AsyncLLAMA2
6
- from webscout import BLACKBOXAI, AsyncBLACKBOXAI
7
- from webscout import PERPLEXITY
8
- from webscout import ThinkAnyAI
9
- from webscout import YouChat
10
- from webscout import YEPCHAT
11
- from webscout.AIbase import Provider, AsyncProvider
12
- from webscout import KOBOLDAI, AsyncKOBOLDAI
13
- from webscout import PhindSearch, AsyncPhindSearch
14
- from webscout import LLAMA2, AsyncLLAMA2
15
- from webscout import BLACKBOXAI, AsyncBLACKBOXAI
16
- from webscout import PERPLEXITY
17
- from webscout import ThinkAnyAI
18
- from webscout import YouChat
19
- from webscout import YEPCHAT, AsyncYEPCHAT
20
- from webscout import LEO, AsyncLEO
21
- from webscout import GROQ, AsyncGROQ
22
- from webscout import OPENAI, AsyncOPENAI
23
- from webscout import REKA
24
- from webscout import Xjai
25
- from webscout import Berlin4h
26
- from webscout import ChatGPTUK
2
+ from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
+ from webscout.Provider.Xjai import Xjai
4
+ from webscout.Provider.Llama2 import LLAMA2
5
+ from webscout.Provider.Llama2 import AsyncLLAMA2
6
+ from webscout.Provider.Leo import LEO
7
+ from webscout.Provider.Leo import AsyncLEO
8
+ from webscout.Provider.Koboldai import KOBOLDAI
9
+ from webscout.Provider.Koboldai import AsyncKOBOLDAI
10
+ from webscout.Provider.OpenGPT import OPENGPT
11
+ from webscout.Provider.OpenGPT import OPENGPTv2
12
+ from webscout.Provider.OpenGPT import AsyncOPENGPT
13
+ from webscout.Provider.Perplexity import PERPLEXITY
14
+ from webscout.Provider.Blackboxai import BLACKBOXAI
15
+ from webscout.Provider.Blackboxai import AsyncBLACKBOXAI
16
+ from webscout.Provider.Phind import PhindSearch
17
+ from webscout.Provider.Phind import AsyncPhindSearch
18
+ from webscout.Provider.Phind import Phindv2
19
+ from webscout.Provider.Phind import AsyncPhindv2
20
+ from webscout.Provider.Yepchat import YEPCHAT
21
+ from webscout.Provider.Yepchat import AsyncYEPCHAT
22
+ from webscout.Provider.Berlin4h import Berlin4h
23
+ from webscout.Provider.ChatGPTUK import ChatGPTUK
24
+ from webscout.Provider.Poe import POE
25
+ from webscout.Provider.BasedGPT import BasedGPT
26
+ from webscout.Provider.Deepseek import DeepSeek
27
+ from webscout.Provider.Deepinfra import DeepInfra, VLM, AsyncDeepInfra
28
+ from webscout.Provider.VTLchat import VTLchat
29
+ from webscout.Provider.Geminipro import GEMINIPRO
30
+ from webscout.Provider.Geminiflash import GEMINIFLASH
27
31
  from webscout.g4f import GPT4FREE, AsyncGPT4FREE
28
32
  from webscout.g4f import TestProviders
29
33
  from webscout.exceptions import AllProvidersFailure
@@ -36,43 +40,56 @@ import logging
36
40
 
37
41
 
38
42
  provider_map: dict[
39
- str, Union[ ThinkAnyAI,
40
- Xjai,
41
- LLAMA2,
42
- AsyncLLAMA2,
43
- LEO,
44
- AsyncLEO,
45
- KOBOLDAI,
46
- AsyncKOBOLDAI,
47
- OPENGPT,
48
- AsyncOPENGPT,
49
- PERPLEXITY,
50
- BLACKBOXAI,
51
- AsyncBLACKBOXAI,
52
- PhindSearch,
53
- AsyncPhindSearch,
54
- YEPCHAT,
55
- AsyncYEPCHAT,
56
- YouChat,
57
- Berlin4h,
58
- ChatGPTUK,]
43
+ str,
44
+ Union[
45
+ ThinkAnyAI,
46
+ Xjai,
47
+ LLAMA2,
48
+ LEO,
49
+ KOBOLDAI,
50
+ OPENGPT,
51
+ OPENGPTv2,
52
+ PERPLEXITY,
53
+ BLACKBOXAI,
54
+ PhindSearch,
55
+ Phindv2,
56
+ YEPCHAT,
57
+ Berlin4h,
58
+ ChatGPTUK,
59
+ POE,
60
+ BasedGPT,
61
+ DeepSeek,
62
+ DeepInfra,
63
+ VLM,
64
+ VTLchat,
65
+ GEMINIPRO,
66
+ GEMINIFLASH,
67
+ GPT4FREE,
68
+ ],
59
69
  ] = {
70
+ "ThinkAnyAI": ThinkAnyAI,
71
+ "Xjai": Xjai,
72
+ "LLAMA2": LLAMA2,
73
+ "LEO": LEO,
74
+ "KOBOLDAI": KOBOLDAI,
75
+ "OPENGPT": OPENGPT,
76
+ "OPENGPTv2": OPENGPTv2,
77
+ "PERPLEXITY": PERPLEXITY,
78
+ "BLACKBOXAI": BLACKBOXAI,
60
79
  "PhindSearch": PhindSearch,
61
- "perplexity": PERPLEXITY,
62
- "opengpt": OPENGPT,
63
- "koboldai": KOBOLDAI,
64
- "llama2": LLAMA2,
65
- "blackboxai": BLACKBOXAI,
66
- "gpt4free": GPT4FREE,
67
- "thinkany": ThinkAnyAI,
68
- "yepchat": YEPCHAT,
69
- "you": YouChat,
70
- "leo": LEO,
71
- "xjai": Xjai,
72
- "berlin4h": Berlin4h,
73
- "chatgptuk": ChatGPTUK,
80
+ "Phindv2": Phindv2,
81
+ "YEPCHAT": YEPCHAT,
82
+ "Berlin4h": Berlin4h,
83
+ "ChatGPTUK": ChatGPTUK,
84
+ "POE": POE,
85
+ "BasedGPT": BasedGPT,
86
+ "DeepSeek": DeepSeek,
87
+ "DeepInfra": DeepInfra,
88
+ "VLM": VLM,
89
+ "VTLchat": VTLchat,
90
+ "GEMINIPRO": GEMINIPRO,
91
+ "GEMINIFLASH": GEMINIFLASH,
74
92
  "gpt4free": GPT4FREE,
75
-
76
93
  }
77
94
 
78
95
 
@@ -104,7 +121,31 @@ class AUTO(Provider):
104
121
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
105
122
  exclude(list[str], optional): List of providers to be excluded. Defaults to [].
106
123
  """
107
- self.provider: Union[OPENGPT, KOBOLDAI, PhindSearch, LLAMA2, BLACKBOXAI, PERPLEXITY, GPT4FREE, ThinkAnyAI, YEPCHAT, YouChat] = None
124
+ self.provider: Union[
125
+ ThinkAnyAI,
126
+ Xjai,
127
+ LLAMA2,
128
+ LEO,
129
+ KOBOLDAI,
130
+ OPENGPT,
131
+ OPENGPTv2,
132
+ PERPLEXITY,
133
+ BLACKBOXAI,
134
+ PhindSearch,
135
+ Phindv2,
136
+ YEPCHAT,
137
+ Berlin4h,
138
+ ChatGPTUK,
139
+ POE,
140
+ BasedGPT,
141
+ DeepSeek,
142
+ DeepInfra,
143
+ VLM,
144
+ VTLchat,
145
+ GEMINIPRO,
146
+ GEMINIFLASH,
147
+ GPT4FREE,
148
+ ] = None
108
149
  self.provider_name: str = None
109
150
  self.is_conversation = is_conversation
110
151
  self.max_tokens = max_tokens
@@ -315,6 +356,30 @@ class AsyncAUTO(AsyncProvider):
315
356
  AsyncLLAMA2,
316
357
  AsyncBLACKBOXAI,
317
358
  AsyncGPT4FREE,
359
+ AsyncLEO,
360
+ ThinkAnyAI,
361
+ Xjai,
362
+ LLAMA2,
363
+ LEO,
364
+ KOBOLDAI,
365
+ OPENGPT,
366
+ OPENGPTv2,
367
+ PERPLEXITY,
368
+ BLACKBOXAI,
369
+ PhindSearch,
370
+ Phindv2,
371
+ YEPCHAT,
372
+ Berlin4h,
373
+ ChatGPTUK,
374
+ POE,
375
+ BasedGPT,
376
+ DeepSeek,
377
+ DeepInfra,
378
+ VLM,
379
+ VTLchat,
380
+ GEMINIPRO,
381
+ GEMINIFLASH,
382
+ GPT4FREE
318
383
  ] = None
319
384
  self.provider_name: str = None
320
385
  self.is_conversation = is_conversation
@@ -490,4 +555,4 @@ class AsyncAUTO(AsyncProvider):
490
555
  str: Message extracted
491
556
  """
492
557
  assert self.provider is not None, "Chat with AI first"
493
- return await self.provider.get_message(response)
558
+ return await self.provider.get_message(response)
@@ -52,6 +52,7 @@ webai = [
52
52
  "vtlchat",
53
53
  "geminiflash",
54
54
  "geminipro",
55
+ "ollama"
55
56
  ]
56
57
 
57
58
  gpt4free_providers = [
@@ -219,19 +220,20 @@ class Conversation:
219
220
  ), f"File '{filepath}' does not exist"
220
221
  if not os.path.isfile(filepath):
221
222
  logging.debug(f"Creating new chat-history file - '{filepath}'")
222
- with open(filepath, "w", encoding='utf-8') as fh: # Try creating new file
223
+ with open(filepath, "w") as fh: # Try creating new file
223
224
  # lets add intro here
224
225
  fh.write(self.intro)
225
226
  else:
226
227
  logging.debug(f"Loading conversation from '{filepath}'")
227
- with open(filepath, encoding='utf-8') as fh:
228
- file_contents = fh.read()
229
- # Presume intro prompt is part of the file content
230
- self.chat_history = file_contents
228
+ with open(filepath) as fh:
229
+ file_contents = fh.readlines()
230
+ if file_contents:
231
+ self.intro = file_contents[0] # Presume first line is the intro.
232
+ self.chat_history = "\n".join(file_contents[1:])
231
233
 
232
- def __trim_chat_history(self, chat_history: str) -> str:
234
+ def __trim_chat_history(self, chat_history: str, intro: str) -> str:
233
235
  """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
234
- len_of_intro = len(self.intro)
236
+ len_of_intro = len(intro)
235
237
  len_of_chat_history = len(chat_history)
236
238
  total = (
237
239
  self.max_tokens_to_sample + len_of_intro + len_of_chat_history
@@ -239,25 +241,28 @@ class Conversation:
239
241
  if total > self.history_offset:
240
242
  truncate_at = (total - self.history_offset) + self.prompt_allowance
241
243
  # Remove head of total (n) of chat_history
242
- new_chat_history = chat_history[truncate_at:]
243
- self.chat_history = self.intro + "\n... " + new_chat_history
244
+ trimmed_chat_history = chat_history[truncate_at:]
245
+ return "... " + trimmed_chat_history
244
246
  # print(len(self.chat_history))
245
- return self.chat_history
246
- # print(len(chat_history))
247
- return chat_history
247
+ else:
248
+ return chat_history
248
249
 
249
- def gen_complete_prompt(self, prompt: str) -> str:
250
+ def gen_complete_prompt(self, prompt: str, intro: str = None) -> str:
250
251
  """Generates a kinda like incomplete conversation
251
252
 
252
253
  Args:
253
- prompt (str): _description_
254
+ prompt (str): Chat prompt
255
+ intro (str): Override class' intro. Defaults to None.
254
256
 
255
257
  Returns:
256
258
  str: Updated incomplete chat_history
257
259
  """
258
260
  if self.status:
259
- resp = self.chat_history + self.history_format % dict(user=prompt, llm="")
260
- return self.__trim_chat_history(resp)
261
+ intro = self.intro if intro is None else intro
262
+ incomplete_chat_history = self.chat_history + self.history_format % dict(
263
+ user=prompt, llm=""
264
+ )
265
+ return intro + self.__trim_chat_history(incomplete_chat_history, intro)
261
266
 
262
267
  return prompt
263
268
 
@@ -275,11 +280,16 @@ class Conversation:
275
280
  return
276
281
  new_history = self.history_format % dict(user=prompt, llm=response)
277
282
  if self.file and self.update_file:
278
- with open(self.file, "a", encoding='utf-8') as fh:
279
- fh.write(new_history)
283
+ if os.path.exists(self.file):
284
+ with open(self.file, "w") as fh:
285
+ fh.write(self.intro + "\n" + new_history)
286
+ else:
287
+ with open(self.file, "a") as fh:
288
+ fh.write(new_history)
280
289
  self.chat_history += new_history
281
290
 
282
291
 
292
+
283
293
  class AwesomePrompts:
284
294
  awesome_prompt_url = (
285
295
  "https://raw.githubusercontent.com/OE-LUCIFER/prompts/main/prompt.json"
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '4.0'
3
+ __version__ = '4.2'
@@ -0,0 +1,187 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ from requests import get
5
+ from uuid import uuid4
6
+ from re import findall
7
+ from requests.exceptions import RequestException
8
+ from curl_cffi.requests import get, RequestsError
9
+ import g4f
10
+ from random import randint
11
+ from PIL import Image
12
+ import io
13
+ import re
14
+ import json
15
+ import yaml
16
+ from ..AIutel import Optimizers
17
+ from ..AIutel import Conversation
18
+ from ..AIutel import AwesomePrompts, sanitize_stream
19
+ from ..AIbase import Provider, AsyncProvider
20
+ from webscout import exceptions
21
+ from typing import Any, AsyncGenerator, Dict
22
+ import logging
23
+ import httpx
24
+ import ollama
25
+
26
+ class OLLAMA(Provider):
27
+ def __init__(
28
+ self,
29
+ model: str = 'qwen2:0.5b',
30
+ is_conversation: bool = True,
31
+ max_tokens: int = 600,
32
+ timeout: int = 30,
33
+ intro: str = None,
34
+ filepath: str = None,
35
+ update_file: bool = True,
36
+ proxies: dict = {},
37
+ history_offset: int = 10250,
38
+ act: str = None,
39
+ ):
40
+ """Instantiates Ollama
41
+
42
+ Args:
43
+ model (str, optional): Model name. Defaults to 'llama2'.
44
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
+ timeout (int, optional): Http request timeout. Defaults to 30.
47
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
48
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
+ proxies (dict, optional): Http request proxies. Defaults to {}.
51
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
+ """
54
+ self.model = model
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+
60
+ self.__available_optimizers = (
61
+ method
62
+ for method in dir(Optimizers)
63
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
64
+ )
65
+ Conversation.intro = (
66
+ AwesomePrompts().get_act(
67
+ act, raise_not_found=True, default=None, case_insensitive=True
68
+ )
69
+ if act
70
+ else intro or Conversation.intro
71
+ )
72
+ self.conversation = Conversation(
73
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
74
+ )
75
+ self.conversation.history_offset = history_offset
76
+
77
+ def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ ) -> dict | AsyncGenerator:
85
+ """Chat with AI
86
+
87
+ Args:
88
+ prompt (str): Prompt to be send.
89
+ stream (bool, optional): Flag for streaming response. Defaults to False.
90
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
91
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
92
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
93
+ Returns:
94
+ dict|AsyncGenerator : ai content
95
+ ```json
96
+ {
97
+ "text" : "print('How may I help you today?')"
98
+ }
99
+ ```
100
+ """
101
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
102
+ if optimizer:
103
+ if optimizer in self.__available_optimizers:
104
+ conversation_prompt = getattr(Optimizers, optimizer)(
105
+ conversation_prompt if conversationally else prompt
106
+ )
107
+ else:
108
+ raise Exception(
109
+ f"Optimizer is not one of {self.__available_optimizers}"
110
+ )
111
+
112
+ def for_stream():
113
+ stream = ollama.chat(model=self.model, messages=[
114
+ {'role': 'user', 'content': conversation_prompt}
115
+ ], stream=True)
116
+
117
+ message_load = ""
118
+ for chunk in stream:
119
+ message_load += chunk['message']['content']
120
+ yield chunk['message']['content'] if raw else dict(text=message_load)
121
+ self.last_response.update(dict(text=message_load))
122
+ self.conversation.update_chat_history(
123
+ prompt, self.get_message(self.last_response)
124
+ )
125
+
126
+ def for_non_stream():
127
+ response = ollama.chat(model=self.model, messages=[
128
+ {'role': 'user', 'content': conversation_prompt}
129
+ ])
130
+ self.last_response.update(dict(text=response['message']['content']))
131
+ self.conversation.update_chat_history(
132
+ prompt, self.get_message(self.last_response)
133
+ )
134
+ return self.last_response
135
+
136
+ return for_stream() if stream else for_non_stream()
137
+
138
+ def chat(
139
+ self,
140
+ prompt: str,
141
+ stream: bool = False,
142
+ optimizer: str = None,
143
+ conversationally: bool = False,
144
+ ) -> str | AsyncGenerator:
145
+ """Generate response `str`
146
+ Args:
147
+ prompt (str): Prompt to be send.
148
+ stream (bool, optional): Flag for streaming response. Defaults to False.
149
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
150
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
151
+ Returns:
152
+ str: Response generated
153
+ """
154
+
155
+ def for_stream():
156
+ for response in self.ask(
157
+ prompt, True, optimizer=optimizer, conversationally=conversationally
158
+ ):
159
+ yield self.get_message(response)
160
+
161
+ def for_non_stream():
162
+ return self.get_message(
163
+ self.ask(
164
+ prompt,
165
+ False,
166
+ optimizer=optimizer,
167
+ conversationally=conversationally,
168
+ )
169
+ )
170
+
171
+ return for_stream() if stream else for_non_stream()
172
+
173
+ def get_message(self, response: dict) -> str:
174
+ """Retrieves message only from response
175
+
176
+ Args:
177
+ response (dict): Response generated by `self.ask`
178
+
179
+ Returns:
180
+ str: Message extracted
181
+ """
182
+ assert isinstance(response, dict), "Response should be of dict data-type only"
183
+ return response["text"]
184
+ if __name__ == "__main__":
185
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
186
+ response = ollama_provider.chat("What is the meaning of life?")
187
+ print(response)
@@ -37,7 +37,7 @@ from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
37
37
  from .VTLchat import VTLchat
38
38
  from .Geminipro import GEMINIPRO
39
39
  from .Geminiflash import GEMINIFLASH
40
-
40
+ from .OLLAMA import OLLAMA
41
41
  __all__ = [
42
42
  'ThinkAnyAI',
43
43
  'Xjai',
@@ -78,6 +78,7 @@ __all__ = [
78
78
  'OPENGPTv2',
79
79
  'GEMINIPRO',
80
80
  'GEMINIFLASH',
81
+ 'OLLAMA'
81
82
 
82
83
 
83
84
  ]
@@ -44,6 +44,7 @@ webai = [
44
44
  "vtlchat",
45
45
  "geminiflash",
46
46
  "geminipro",
47
+ "ollama"
47
48
  ]
48
49
 
49
50
  gpt4free_providers = [
@@ -1,2 +1,2 @@
1
- __version__ = "4.0"
1
+ __version__ = "4.2"
2
2
  __prog__ = "webscout"
@@ -831,7 +831,21 @@ class Main(cmd.Cmd):
831
831
  act=awesome_prompt,
832
832
  quiet=quiet,
833
833
  )
834
+ elif provider == "ollama":
835
+ from webscout import OLLAMA
834
836
 
837
+ self.bot = OLLAMA(
838
+ is_conversation=disable_conversation,
839
+ max_tokens=max_tokens,
840
+ timeout=timeout,
841
+ intro=intro,
842
+ filepath=filepath,
843
+ update_file=update_file,
844
+ proxies=proxies,
845
+ history_offset=history_offset,
846
+ act=awesome_prompt,
847
+ model=getOr(model, "qwen2:0.5b")
848
+ )
835
849
  else:
836
850
  raise NotImplementedError(
837
851
  f"The provider `{provider}` is not yet implemented."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.0
3
+ Version: 4.2
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1463,6 +1463,15 @@ print(response)
1463
1463
 
1464
1464
  ### 21. GeminiFlash and geminipro
1465
1465
  **Usage similar to other providers**
1466
+
1467
+ ### 22. `Ollama` - chat will AI models locally
1468
+ ```python
1469
+ from webscout import OLLAMA
1470
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
1471
+ response = ollama_provider.chat("What is the meaning of life?")
1472
+ print(response)
1473
+ ```
1474
+
1466
1475
  ### `LLM`
1467
1476
  ```python
1468
1477
  from webscout.LLM import LLM
@@ -56,6 +56,7 @@ webscout/Provider/Groq.py
56
56
  webscout/Provider/Koboldai.py
57
57
  webscout/Provider/Leo.py
58
58
  webscout/Provider/Llama2.py
59
+ webscout/Provider/OLLAMA.py
59
60
  webscout/Provider/OpenGPT.py
60
61
  webscout/Provider/Openai.py
61
62
  webscout/Provider/Perplexity.py
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes