webscout 4.1__py3-none-any.whl → 4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIauto.py CHANGED
@@ -1,8 +1,8 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
2
  from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
3
  from webscout.Provider.Xjai import Xjai
4
- from webscout.Provider.Llama2 import LLAMA2
5
- from webscout.Provider.Llama2 import AsyncLLAMA2
4
+ from webscout.Provider.Llama import LLAMA2
5
+ from webscout.Provider.Llama import AsyncLLAMA2
6
6
  from webscout.Provider.Leo import LEO
7
7
  from webscout.Provider.Leo import AsyncLEO
8
8
  from webscout.Provider.Koboldai import KOBOLDAI
webscout/AIutel.py CHANGED
@@ -52,6 +52,7 @@ webai = [
52
52
  "vtlchat",
53
53
  "geminiflash",
54
54
  "geminipro",
55
+ "ollama"
55
56
  ]
56
57
 
57
58
  gpt4free_providers = [
@@ -196,7 +197,7 @@ class Conversation:
196
197
  """
197
198
  self.status = status
198
199
  self.max_tokens_to_sample = max_tokens
199
- self.chat_history = ""
200
+ self.chat_history = self.intro
200
201
  self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
201
202
  self.file = filepath
202
203
  self.update_file = update_file
@@ -219,17 +220,16 @@ class Conversation:
219
220
  ), f"File '{filepath}' does not exist"
220
221
  if not os.path.isfile(filepath):
221
222
  logging.debug(f"Creating new chat-history file - '{filepath}'")
222
- with open(filepath, "w") as fh: # Try creating new file
223
- # lets add intro here
223
+ with open(filepath, "w", encoding="utf-8") as fh: # Try creating new file with UTF-8 encoding
224
224
  fh.write(self.intro)
225
225
  else:
226
226
  logging.debug(f"Loading conversation from '{filepath}'")
227
- with open(filepath) as fh:
227
+ with open(filepath, encoding="utf-8") as fh: # Open with UTF-8 encoding
228
228
  file_contents = fh.readlines()
229
229
  if file_contents:
230
230
  self.intro = file_contents[0] # Presume first line is the intro.
231
231
  self.chat_history = "\n".join(file_contents[1:])
232
-
232
+
233
233
  def __trim_chat_history(self, chat_history: str, intro: str) -> str:
234
234
  """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
235
235
  len_of_intro = len(intro)
@@ -242,7 +242,6 @@ class Conversation:
242
242
  # Remove head of total (n) of chat_history
243
243
  trimmed_chat_history = chat_history[truncate_at:]
244
244
  return "... " + trimmed_chat_history
245
- # print(len(self.chat_history))
246
245
  else:
247
246
  return chat_history
248
247
 
@@ -280,12 +279,12 @@ class Conversation:
280
279
  new_history = self.history_format % dict(user=prompt, llm=response)
281
280
  if self.file and self.update_file:
282
281
  if os.path.exists(self.file):
283
- with open(self.file, "w") as fh:
282
+ with open(self.file, "w", encoding="utf-8") as fh: # Specify UTF-8 encoding
284
283
  fh.write(self.intro + "\n" + new_history)
285
284
  else:
286
- with open(self.file, "a") as fh:
285
+ with open(self.file, "a", encoding="utf-8") as fh: # Specify UTF-8 encoding
287
286
  fh.write(new_history)
288
- self.chat_history += new_history
287
+ self.chat_history += new_history
289
288
 
290
289
 
291
290
 
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '4.1'
3
+ __version__ = '4.3'
@@ -0,0 +1,169 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class FreeGemini(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 60,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates FreeGemini
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ """
57
+ self.session = requests.Session()
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.chat_endpoint = "https://api.safone.dev/bard"
61
+ self.timeout = timeout
62
+ self.last_response = {}
63
+
64
+ self.headers = {
65
+ "accept": "application/json",
66
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
67
+ }
68
+
69
+ self.__available_optimizers = (
70
+ method
71
+ for method in dir(Optimizers)
72
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
+ )
74
+ self.session.headers.update(self.headers)
75
+ Conversation.intro = (
76
+ AwesomePrompts().get_act(
77
+ act, raise_not_found=True, default=None, case_insensitive=True
78
+ )
79
+ if act
80
+ else intro or Conversation.intro
81
+ )
82
+ self.conversation = Conversation(
83
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
84
+ )
85
+ self.conversation.history_offset = history_offset
86
+ self.session.proxies = proxies
87
+
88
+ def ask(
89
+ self,
90
+ prompt: str,
91
+ stream: bool = False,
92
+ raw: bool = False,
93
+ optimizer: str = None,
94
+ conversationally: bool = False,
95
+ ) -> dict:
96
+ """Chat with AI
97
+
98
+ Args:
99
+ prompt (str): Prompt to be send.
100
+ stream (bool, optional): Flag for streaming response. Defaults to False.
101
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
102
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
103
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+ """
105
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
106
+ if optimizer:
107
+ if optimizer in self.__available_optimizers:
108
+ conversation_prompt = getattr(Optimizers, optimizer)(
109
+ conversation_prompt if conversationally else prompt
110
+ )
111
+ else:
112
+ raise Exception(
113
+ f"Optimizer is not one of {self.__available_optimizers}"
114
+ )
115
+
116
+ self.session.headers.update(self.headers)
117
+ payload = {"message": conversation_prompt}
118
+
119
+ response = self.session.post(
120
+ self.chat_endpoint, json=payload, timeout=self.timeout
121
+ )
122
+
123
+ if not response.ok:
124
+ raise exceptions.FailedToGenerateResponseError(
125
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
126
+ )
127
+
128
+ resp = response.json()
129
+ message_load = self.get_message(resp)
130
+ self.conversation.update_chat_history(
131
+ prompt, message_load
132
+ )
133
+ return resp
134
+
135
+ def chat(
136
+ self,
137
+ prompt: str,
138
+ stream: bool = False,
139
+ optimizer: str = None,
140
+ conversationally: bool = False,
141
+ ) -> str:
142
+ """Generate response `str`
143
+ Args:
144
+ prompt (str): Prompt to be send.
145
+ stream (bool, optional): Flag for streaming response. Defaults to False.
146
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
147
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
148
+ Returns:
149
+ str: Response generated
150
+ """
151
+ return self.get_message(
152
+ self.ask(
153
+ prompt,
154
+ optimizer=optimizer,
155
+ conversationally=conversationally,
156
+ )
157
+ )
158
+
159
+ def get_message(self, response: dict) -> str:
160
+ """Retrieves message only from response
161
+
162
+ Args:
163
+ response (dict): Response generated by `self.ask`
164
+
165
+ Returns:
166
+ str: Message extracted
167
+ """
168
+ assert isinstance(response, dict), "Response should be of dict data-type only"
169
+ return response["message"]
@@ -0,0 +1,211 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class LLAMA(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates LLAMA
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ model (str, optional): LLM model name. Defaults to "llama3-70b-8192".
57
+ """
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.timeout = timeout
61
+ self.last_response = {}
62
+ self.model = "llama3-70b-8192",
63
+ self.api_endpoint = "https://api.safone.dev/llama"
64
+ self.headers = {
65
+ "accept": "application/json",
66
+ }
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+ Conversation.intro = (
74
+ AwesomePrompts().get_act(
75
+ act, raise_not_found=True, default=None, case_insensitive=True
76
+ )
77
+ if act
78
+ else intro or Conversation.intro
79
+ )
80
+ self.conversation = Conversation(
81
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
82
+ )
83
+ self.conversation.history_offset = history_offset
84
+ self.session = requests.Session()
85
+ self.session.proxies = proxies
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> dict | AsyncGenerator:
95
+ """Chat with AI
96
+
97
+ Args:
98
+ prompt (str): Prompt to be send.
99
+ stream (bool, optional): Flag for streaming response. Defaults to False.
100
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
101
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
+ Returns:
104
+ dict|AsyncGenerator : ai content
105
+ ```json
106
+ {
107
+ "text" : "print('How may I help you today?')"
108
+ }
109
+ ```
110
+ """
111
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
+ if optimizer:
113
+ if optimizer in self.__available_optimizers:
114
+ conversation_prompt = getattr(Optimizers, optimizer)(
115
+ conversation_prompt if conversationally else prompt
116
+ )
117
+ else:
118
+ raise Exception(
119
+ f"Optimizer is not one of {self.__available_optimizers}"
120
+ )
121
+
122
+ self.session.headers.update(self.headers)
123
+ payload = {
124
+ "message": conversation_prompt
125
+ }
126
+
127
+ def for_stream():
128
+ response = self.session.get(
129
+ self.api_endpoint, params=payload, stream=True, timeout=self.timeout
130
+ )
131
+ if not response.ok:
132
+ raise exceptions.FailedToGenerateResponseError(
133
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
134
+ )
135
+
136
+ message_load = ""
137
+ for chunk in response.iter_lines():
138
+ try:
139
+ resp = json.loads(chunk)
140
+ message_load += resp['message']
141
+ yield chunk if raw else dict(text=message_load)
142
+ self.last_response.update(resp)
143
+ except:
144
+ pass
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+
149
+ def for_non_stream():
150
+ response = self.session.get(
151
+ self.api_endpoint, params=payload, stream=False, timeout=self.timeout
152
+ )
153
+ if not response.ok:
154
+ raise exceptions.FailedToGenerateResponseError(
155
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
+ )
157
+ resp = response.json()
158
+ self.last_response.update(resp)
159
+ self.conversation.update_chat_history(
160
+ prompt, self.get_message(self.last_response)
161
+ )
162
+ return resp
163
+
164
+ return for_stream() if stream else for_non_stream()
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> str | AsyncGenerator:
173
+ """Generate response `str`
174
+ Args:
175
+ prompt (str): Prompt to be send.
176
+ stream (bool, optional): Flag for streaming response. Defaults to False.
177
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
+ Returns:
180
+ str: Response generated
181
+ """
182
+
183
+ def for_stream():
184
+ for response in self.ask(
185
+ prompt, True, optimizer=optimizer, conversationally=conversationally
186
+ ):
187
+ yield self.get_message(response)
188
+
189
+ def for_non_stream():
190
+ return self.get_message(
191
+ self.ask(
192
+ prompt,
193
+ False,
194
+ optimizer=optimizer,
195
+ conversationally=conversationally,
196
+ )
197
+ )
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def get_message(self, response: dict) -> str:
202
+ """Retrieves message only from response
203
+
204
+ Args:
205
+ response (dict): Response generated by `self.ask`
206
+
207
+ Returns:
208
+ str: Message extracted
209
+ """
210
+ assert isinstance(response, dict), "Response should be of dict data-type only"
211
+ return response["message"]
@@ -0,0 +1,187 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ from requests import get
5
+ from uuid import uuid4
6
+ from re import findall
7
+ from requests.exceptions import RequestException
8
+ from curl_cffi.requests import get, RequestsError
9
+ import g4f
10
+ from random import randint
11
+ from PIL import Image
12
+ import io
13
+ import re
14
+ import json
15
+ import yaml
16
+ from ..AIutel import Optimizers
17
+ from ..AIutel import Conversation
18
+ from ..AIutel import AwesomePrompts, sanitize_stream
19
+ from ..AIbase import Provider, AsyncProvider
20
+ from webscout import exceptions
21
+ from typing import Any, AsyncGenerator, Dict
22
+ import logging
23
+ import httpx
24
+ import ollama
25
+
26
+ class OLLAMA(Provider):
27
+ def __init__(
28
+ self,
29
+ model: str = 'qwen2:0.5b',
30
+ is_conversation: bool = True,
31
+ max_tokens: int = 600,
32
+ timeout: int = 30,
33
+ intro: str = None,
34
+ filepath: str = None,
35
+ update_file: bool = True,
36
+ proxies: dict = {},
37
+ history_offset: int = 10250,
38
+ act: str = None,
39
+ ):
40
+ """Instantiates Ollama
41
+
42
+ Args:
43
+ model (str, optional): Model name. Defaults to 'llama2'.
44
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
+ timeout (int, optional): Http request timeout. Defaults to 30.
47
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
48
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
+ proxies (dict, optional): Http request proxies. Defaults to {}.
51
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
+ """
54
+ self.model = model
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+
60
+ self.__available_optimizers = (
61
+ method
62
+ for method in dir(Optimizers)
63
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
64
+ )
65
+ Conversation.intro = (
66
+ AwesomePrompts().get_act(
67
+ act, raise_not_found=True, default=None, case_insensitive=True
68
+ )
69
+ if act
70
+ else intro or Conversation.intro
71
+ )
72
+ self.conversation = Conversation(
73
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
74
+ )
75
+ self.conversation.history_offset = history_offset
76
+
77
+ def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ ) -> dict | AsyncGenerator:
85
+ """Chat with AI
86
+
87
+ Args:
88
+ prompt (str): Prompt to be send.
89
+ stream (bool, optional): Flag for streaming response. Defaults to False.
90
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
91
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
92
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
93
+ Returns:
94
+ dict|AsyncGenerator : ai content
95
+ ```json
96
+ {
97
+ "text" : "print('How may I help you today?')"
98
+ }
99
+ ```
100
+ """
101
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
102
+ if optimizer:
103
+ if optimizer in self.__available_optimizers:
104
+ conversation_prompt = getattr(Optimizers, optimizer)(
105
+ conversation_prompt if conversationally else prompt
106
+ )
107
+ else:
108
+ raise Exception(
109
+ f"Optimizer is not one of {self.__available_optimizers}"
110
+ )
111
+
112
+ def for_stream():
113
+ stream = ollama.chat(model=self.model, messages=[
114
+ {'role': 'user', 'content': conversation_prompt}
115
+ ], stream=True)
116
+
117
+ message_load = ""
118
+ for chunk in stream:
119
+ message_load += chunk['message']['content']
120
+ yield chunk['message']['content'] if raw else dict(text=message_load)
121
+ self.last_response.update(dict(text=message_load))
122
+ self.conversation.update_chat_history(
123
+ prompt, self.get_message(self.last_response)
124
+ )
125
+
126
+ def for_non_stream():
127
+ response = ollama.chat(model=self.model, messages=[
128
+ {'role': 'user', 'content': conversation_prompt}
129
+ ])
130
+ self.last_response.update(dict(text=response['message']['content']))
131
+ self.conversation.update_chat_history(
132
+ prompt, self.get_message(self.last_response)
133
+ )
134
+ return self.last_response
135
+
136
+ return for_stream() if stream else for_non_stream()
137
+
138
+ def chat(
139
+ self,
140
+ prompt: str,
141
+ stream: bool = False,
142
+ optimizer: str = None,
143
+ conversationally: bool = False,
144
+ ) -> str | AsyncGenerator:
145
+ """Generate response `str`
146
+ Args:
147
+ prompt (str): Prompt to be send.
148
+ stream (bool, optional): Flag for streaming response. Defaults to False.
149
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
150
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
151
+ Returns:
152
+ str: Response generated
153
+ """
154
+
155
+ def for_stream():
156
+ for response in self.ask(
157
+ prompt, True, optimizer=optimizer, conversationally=conversationally
158
+ ):
159
+ yield self.get_message(response)
160
+
161
+ def for_non_stream():
162
+ return self.get_message(
163
+ self.ask(
164
+ prompt,
165
+ False,
166
+ optimizer=optimizer,
167
+ conversationally=conversationally,
168
+ )
169
+ )
170
+
171
+ return for_stream() if stream else for_non_stream()
172
+
173
+ def get_message(self, response: dict) -> str:
174
+ """Retrieves message only from response
175
+
176
+ Args:
177
+ response (dict): Response generated by `self.ask`
178
+
179
+ Returns:
180
+ str: Message extracted
181
+ """
182
+ assert isinstance(response, dict), "Response should be of dict data-type only"
183
+ return response["text"]
184
+ if __name__ == "__main__":
185
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
186
+ response = ollama_provider.chat("What is the meaning of life?")
187
+ print(response)
@@ -2,8 +2,7 @@
2
2
 
3
3
  from .ThinkAnyAI import ThinkAnyAI
4
4
  from .Xjai import Xjai
5
- from .Llama2 import LLAMA2
6
- from .Llama2 import AsyncLLAMA2
5
+ from .Llama import LLAMA
7
6
  from .Cohere import Cohere
8
7
  from .Reka import REKA
9
8
  from .Groq import GROQ
@@ -37,12 +36,12 @@ from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
37
36
  from .VTLchat import VTLchat
38
37
  from .Geminipro import GEMINIPRO
39
38
  from .Geminiflash import GEMINIFLASH
40
-
39
+ from .OLLAMA import OLLAMA
40
+ from .FreeGemini import FreeGemini
41
41
  __all__ = [
42
42
  'ThinkAnyAI',
43
43
  'Xjai',
44
- 'LLAMA2',
45
- 'AsyncLLAMA2',
44
+ 'LLAMA',
46
45
  'Cohere',
47
46
  'REKA',
48
47
  'GROQ',
@@ -78,6 +77,8 @@ __all__ = [
78
77
  'OPENGPTv2',
79
78
  'GEMINIPRO',
80
79
  'GEMINIFLASH',
80
+ 'OLLAMA',
81
+ 'FreeGemini'
81
82
 
82
83
 
83
84
  ]
webscout/__init__.py CHANGED
@@ -6,7 +6,7 @@ from .transcriber import transcriber
6
6
  from .voice import play_audio
7
7
  from .websx_search import WEBSX
8
8
 
9
- from .LLM import LLM
9
+ from .LLM import VLM, LLM
10
10
  from .YTdownloader import *
11
11
  # from .Local import *
12
12
  import g4f
@@ -44,6 +44,7 @@ webai = [
44
44
  "vtlchat",
45
45
  "geminiflash",
46
46
  "geminipro",
47
+ "ollama"
47
48
  ]
48
49
 
49
50
  gpt4free_providers = [
@@ -2,7 +2,6 @@ from webscout import AsyncPhindSearch
2
2
  from webscout import AsyncYEPCHAT
3
3
  from webscout import AsyncOPENGPT
4
4
  from webscout import AsyncOPENAI
5
- from webscout import AsyncLLAMA2
6
5
  from webscout import AsyncLEO
7
6
  from webscout import AsyncKOBOLDAI
8
7
  from webscout import AsyncGROQ
@@ -15,7 +14,6 @@ mapper: dict[str, object] = {
15
14
  "koboldai": AsyncKOBOLDAI,
16
15
  "blackboxai": AsyncBLACKBOXAI,
17
16
  "gpt4free": AsyncGPT4FREE,
18
- "llama2": AsyncLLAMA2,
19
17
  "yepchat": AsyncYEPCHAT,
20
18
  "leo": AsyncLEO,
21
19
  "groq": AsyncGROQ,
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "4.1"
1
+ __version__ = "4.3"
2
2
  __prog__ = "webscout"
webscout/webai.py CHANGED
@@ -831,7 +831,21 @@ class Main(cmd.Cmd):
831
831
  act=awesome_prompt,
832
832
  quiet=quiet,
833
833
  )
834
+ elif provider == "ollama":
835
+ from webscout import OLLAMA
834
836
 
837
+ self.bot = OLLAMA(
838
+ is_conversation=disable_conversation,
839
+ max_tokens=max_tokens,
840
+ timeout=timeout,
841
+ intro=intro,
842
+ filepath=filepath,
843
+ update_file=update_file,
844
+ proxies=proxies,
845
+ history_offset=history_offset,
846
+ act=awesome_prompt,
847
+ model=getOr(model, "qwen2:0.5b")
848
+ )
835
849
  else:
836
850
  raise NotImplementedError(
837
851
  f"The provider `{provider}` is not yet implemented."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.1
3
+ Version: 4.3
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1463,6 +1463,43 @@ print(response)
1463
1463
 
1464
1464
  ### 21. GeminiFlash and geminipro
1465
1465
  **Usage similar to other providers**
1466
+
1467
+ ### 22. `Ollama` - chat will AI models locally
1468
+ ```python
1469
+ from webscout import OLLAMA
1470
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
1471
+ response = ollama_provider.chat("What is the meaning of life?")
1472
+ print(response)
1473
+ ```
1474
+
1475
+ ### 22. GROQ
1476
+ ```python
1477
+ from webscout import GROQ
1478
+ ai = GROQ(api_key="")
1479
+ response = ai.chat("What is the meaning of life?")
1480
+ print(response)
1481
+
1482
+ ```
1483
+
1484
+ ### 23. Freegemini - chat with gemini for free
1485
+ ```python
1486
+ from webscout import FreeGemini
1487
+ ai = FreeGemini()
1488
+ response = ai.chat("What is the meaning of life?")
1489
+ print(response)
1490
+ ```
1491
+
1492
+ ### 24. LLama 70b - chat with meta's llama 3 70b
1493
+ ```python
1494
+
1495
+ from webscout import LLAMA
1496
+
1497
+ llama = LLAMA()
1498
+
1499
+ r = llama.chat("What is the meaning of life?")
1500
+ print(r)
1501
+ ```
1502
+
1466
1503
  ### `LLM`
1467
1504
  ```python
1468
1505
  from webscout.LLM import LLM
@@ -1,12 +1,12 @@
1
- webscout/AIauto.py,sha256=DycblRfFsQiLJVeP1sRQ0C-eNX7iO3a_y1wt8wChM8Y,20005
1
+ webscout/AIauto.py,sha256=5ZMoS39Tyy1AZS6s_bgVnng-x9CmvHhWWNB4QMB5v9U,20003
2
2
  webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
3
- webscout/AIutel.py,sha256=RIS66j1QP_7zCL7o0uCBbz0A20yunH4onTtTHkkIN-k,33959
3
+ webscout/AIutel.py,sha256=1NQAchS2e6c1SrIq0efsVtX3ANZ5XI1hjKVHGpJG7OU,34076
4
4
  webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
5
5
  webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
6
6
  webscout/YTdownloader.py,sha256=uWpUWnw9pxeEGw9KJ_3XDyQ5gd38gH1dJpr-HJo4vzU,39144
7
- webscout/__init__.py,sha256=ibbt9571c1Kv5ZZV784j46i-lvR2WsTx8BXugHFQw2I,2198
7
+ webscout/__init__.py,sha256=DX52bX0RKkXgKAWohQRyBKNdiamZmp2aQuTpsD5ohbY,2216
8
8
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
9
- webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
9
+ webscout/async_providers.py,sha256=MRj0klEhBYVQXnzZGG_15d0e-TPA0nOc2nn735H-wR4,622
10
10
  webscout/cli.py,sha256=EDxqTmcIshvhg9P0n2ZPaApj2-MEFY3uawS92zbBV_s,14705
11
11
  webscout/exceptions.py,sha256=YtIs-vXBwcjbt9TZ_wB7yI0dO7ANYIZAmEEeLmoQ2fI,487
12
12
  webscout/g4f.py,sha256=NNcnlOtIWV9R93UsBN4jBGBEJ9sJ-Np1WbgjkGVDcYc,24487
@@ -14,9 +14,9 @@ webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
14
14
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
15
15
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
16
16
  webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
17
- webscout/version.py,sha256=2jE62l770oovuLzelP3RWCvIn45U-DuZHeNGLvMIszs,44
17
+ webscout/version.py,sha256=Pp5thQN3CvwDpubKz9MHn-UvDhuocamnBfB2VckwBGI,44
18
18
  webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
19
- webscout/webai.py,sha256=4_v28Tg25N71utqg34hFd0vF4zK1lEtaMXj-5H6hw1o,88311
19
+ webscout/webai.py,sha256=LPn9XKvc5SLxJ68slMsPUXxzkzfa4b0kzsiJyWs-yq0,88897
20
20
  webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,42804
21
21
  webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
22
22
  webscout/websx_search.py,sha256=n-qVwiHozJEF-GFRPcAfh4k1d_tscTmDe1dNL-1ngcU,12094
@@ -26,7 +26,7 @@ webscout/Extra/gguf.py,sha256=5zTNE5HxM_VQ5ONoocL8GG5fRXrgyLdEEjNzndG0oUw,7811
26
26
  webscout/Extra/weather.py,sha256=ocGwJYp5B9FwVWvIZ9wtoJTQsPFt64Vt8TitxJcdvAU,1687
27
27
  webscout/Extra/weather_ascii.py,sha256=sy6EEh2kN1CO1hKda8chD-mVCxH4p0NHyP7Uxr0-rgo,630
28
28
  webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
29
- webscout/Local/_version.py,sha256=cDatVldm_Tvfl9Vo4E6_GueNJoFGOBH6PA42hSdBFUA,83
29
+ webscout/Local/_version.py,sha256=yH-h9AKl_KbJwMWeq0PDDOVI2FQ9NutjLDqcCGuAQ6I,83
30
30
  webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
31
31
  webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
32
32
  webscout/Local/rawdog.py,sha256=ojY_O8Vb1KvR34OwWdfLgllgaAK_7HMf64ElMATvCXs,36689
@@ -40,13 +40,15 @@ webscout/Provider/ChatGPTUK.py,sha256=qmuCb_a71GNE5LelOb5AKJUBndvj7soebiNey4VdDv
40
40
  webscout/Provider/Cohere.py,sha256=IXnRosYOaMAA65nvsKmN6ZkJGSdZFYQYBidzuNaCqX8,8711
41
41
  webscout/Provider/Deepinfra.py,sha256=kVnWARJdEtIeIsZwGw3POq8B2dO87bDcJso3uOeCeOA,18750
42
42
  webscout/Provider/Deepseek.py,sha256=pnOB44ObuOfAsoi_bUGUvha3tfwd0rTJ9rnX-14QkL4,10550
43
+ webscout/Provider/FreeGemini.py,sha256=GbTJEG09vs5IKWKy9FqHBvDNKVq-HdMexOplctpb0RI,6426
43
44
  webscout/Provider/Gemini.py,sha256=_4DHWvlWuNAmVHPwHB1RjmryjTZZCthLa6lvPEHLvkQ,8451
44
45
  webscout/Provider/Geminiflash.py,sha256=1kMPA-ypi1gmJoms606Z7j_51znpdofM2aAyo4Hl7wU,5951
45
46
  webscout/Provider/Geminipro.py,sha256=nOifT5CRmnUg28iifSbOHkNLoKucLRr5zCj607mVrhw,5948
46
47
  webscout/Provider/Groq.py,sha256=QfgP3hKUcqq5vUA4Pzuu3HAgpJkKwLWNjjsnxtkCYd8,21094
47
48
  webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
48
49
  webscout/Provider/Leo.py,sha256=wbuDR-vFjLptfRC6yDlk74tINqNvCOzpISsK92lIgGg,19987
49
- webscout/Provider/Llama2.py,sha256=gVMotyiBaDSqliwuDtFefHoOBn9V5m5Ze_YVtV0trt8,17525
50
+ webscout/Provider/Llama.py,sha256=F_srqtdo6ws03tnEaetZOfDolXrQEnLZaIxmQaY_tJQ,8052
51
+ webscout/Provider/OLLAMA.py,sha256=G8sz_P7OZINFI1qGnpDhNPWU789Sv2cpDnShOA5Nbmw,7075
50
52
  webscout/Provider/OpenGPT.py,sha256=ZymwLgNJSPlGZHW3msMlnRR7NxmALqJw9yuToqrRrhw,35515
51
53
  webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
52
54
  webscout/Provider/Perplexity.py,sha256=CPdKqkdlVejXDcf1uycNO4LPCVNUADSCetvyJEGepSw,8826
@@ -58,10 +60,10 @@ webscout/Provider/VTLchat.py,sha256=_sErGr-wOi16ZAfiGOo0bPsAEMkjzzwreEsIqjIZMIU,
58
60
  webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,9039
59
61
  webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
60
62
  webscout/Provider/Youchat.py,sha256=fhMpt94pIPE_XDbC4z9xyfgA7NbkNE2wlRFJabsjv90,8069
61
- webscout/Provider/__init__.py,sha256=nFRM7vno2a7y2lG-7mDXvzytI8irb_loC0BPGZtrh7w,1923
62
- webscout-4.1.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
63
- webscout-4.1.dist-info/METADATA,sha256=a7qUaB7wlic1oP1ZEIX0p6BTJRpJvRbUqLv3QNgdgSo,56855
64
- webscout-4.1.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
65
- webscout-4.1.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
66
- webscout-4.1.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
67
- webscout-4.1.dist-info/RECORD,,
63
+ webscout/Provider/__init__.py,sha256=j6lZqjLYext2a-KTnvGEvVm-D3jezHIlnanlj2H37FI,1962
64
+ webscout-4.3.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
65
+ webscout-4.3.dist-info/METADATA,sha256=Wh2IMCZhNgKcxsOqGNPriPzrEYoQ4uWfLakOnteemsc,57597
66
+ webscout-4.3.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
67
+ webscout-4.3.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
68
+ webscout-4.3.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
69
+ webscout-4.3.dist-info/RECORD,,
@@ -1,437 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
-
32
- class AsyncLLAMA2(AsyncProvider):
33
- def __init__(
34
- self,
35
- is_conversation: bool = True,
36
- max_tokens: int = 800,
37
- temperature: float = 0.75,
38
- presence_penalty: int = 0,
39
- frequency_penalty: int = 0,
40
- top_p: float = 0.9,
41
- model: str = "meta/meta-llama-3-70b-instruct",
42
- timeout: int = 30,
43
- intro: str = None,
44
- filepath: str = None,
45
- update_file: bool = True,
46
- proxies: dict = {},
47
- history_offset: int = 10250,
48
- act: str = None,
49
- ):
50
- """Instantiates LLAMA2
51
-
52
- Args:
53
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
54
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
55
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.75.
56
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
57
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
58
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.9.
59
- model (str, optional): LLM model name. Defaults to "meta/llama-2-70b-chat".
60
- timeout (int, optional): Http request timeout. Defaults to 30.
61
- intro (str, optional): Conversation introductory prompt. Defaults to None.
62
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
63
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
64
- proxies (dict, optional): Http request proxies. Defaults to {}.
65
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
66
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
67
- """
68
- self.is_conversation = is_conversation
69
- self.max_tokens_to_sample = max_tokens
70
- self.model = model
71
- self.temperature = temperature
72
- self.presence_penalty = presence_penalty
73
- self.frequency_penalty = frequency_penalty
74
- self.top_p = top_p
75
- self.chat_endpoint = "https://www.llama2.ai/api"
76
- self.stream_chunk_size = 64
77
- self.timeout = timeout
78
- self.last_response = {}
79
- self.headers = {
80
- "Content-Type": "application/json",
81
- "Referer": "https://www.llama2.ai/",
82
- "Content-Type": "text/plain;charset=UTF-8",
83
- "Origin": "https://www.llama2.ai",
84
- }
85
-
86
- self.__available_optimizers = (
87
- method
88
- for method in dir(Optimizers)
89
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
90
- )
91
- Conversation.intro = (
92
- AwesomePrompts().get_act(
93
- act, raise_not_found=True, default=None, case_insensitive=True
94
- )
95
- if act
96
- else intro or Conversation.intro
97
- )
98
- self.conversation = Conversation(
99
- is_conversation, self.max_tokens_to_sample, filepath, update_file
100
- )
101
- self.conversation.history_offset = history_offset
102
- self.session = httpx.AsyncClient(
103
- headers=self.headers,
104
- proxies=proxies,
105
- )
106
-
107
- async def ask(
108
- self,
109
- prompt: str,
110
- stream: bool = False,
111
- raw: bool = False,
112
- optimizer: str = None,
113
- conversationally: bool = False,
114
- ) -> dict | AsyncGenerator:
115
- """Chat with AI asynchronously.
116
-
117
- Args:
118
- prompt (str): Prompt to be send.
119
- stream (bool, optional): Flag for streaming response. Defaults to False.
120
- raw (bool, optional): Stream back raw response as received. Defaults to False.
121
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
122
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
123
- Returns:
124
- dict|AsyncGeneraror[dict] : ai content
125
- ```json
126
- {
127
- "text" : "How may I help you today?"
128
- }
129
- ```
130
- """
131
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
132
- if optimizer:
133
- if optimizer in self.__available_optimizers:
134
- conversation_prompt = getattr(Optimizers, optimizer)(
135
- conversation_prompt if conversationally else prompt
136
- )
137
- else:
138
- raise Exception(
139
- f"Optimizer is not one of {self.__available_optimizers}"
140
- )
141
-
142
- payload = {
143
- "prompt": f"{conversation_prompt}<s>[INST] {prompt} [/INST]",
144
- "model": self.model,
145
- "systemPrompt": "You are a helpful assistant.",
146
- "temperature": self.temperature,
147
- "topP": self.top_p,
148
- "maxTokens": self.max_tokens_to_sample,
149
- "image": None,
150
- "audio": None,
151
- }
152
-
153
- async def for_stream():
154
- async with self.session.stream(
155
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
156
- ) as response:
157
- if (
158
- not response.is_success
159
- or not response.headers.get("Content-Type")
160
- == "text/plain; charset=utf-8"
161
- ):
162
- raise exceptions.FailedToGenerateResponseError(
163
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
164
- )
165
- message_load: str = ""
166
- async for value in response.aiter_lines():
167
- try:
168
- if bool(value.strip()):
169
- message_load += value + "\n"
170
- resp: dict = dict(text=message_load)
171
- yield value if raw else resp
172
- self.last_response.update(resp)
173
- except json.decoder.JSONDecodeError:
174
- pass
175
- self.conversation.update_chat_history(
176
- prompt, await self.get_message(self.last_response)
177
- )
178
-
179
- async def for_non_stream():
180
- async for _ in for_stream():
181
- pass
182
- return self.last_response
183
-
184
- return for_stream() if stream else await for_non_stream()
185
-
186
- async def chat(
187
- self,
188
- prompt: str,
189
- stream: bool = False,
190
- optimizer: str = None,
191
- conversationally: bool = False,
192
- ) -> str | AsyncGenerator:
193
- """Generate response `str` asynchronously.
194
- Args:
195
- prompt (str): Prompt to be send.
196
- stream (bool, optional): Flag for streaming response. Defaults to False.
197
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
198
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
199
- Returns:
200
- str|AsyncGenerator: Response generated
201
- """
202
-
203
- async def for_stream():
204
- async_ask = await self.ask(
205
- prompt, True, optimizer=optimizer, conversationally=conversationally
206
- )
207
- async for response in async_ask:
208
- yield await self.get_message(response)
209
-
210
- async def for_non_stream():
211
- return await self.get_message(
212
- await self.ask(
213
- prompt,
214
- False,
215
- optimizer=optimizer,
216
- conversationally=conversationally,
217
- )
218
- )
219
-
220
- return for_stream() if stream else await for_non_stream()
221
-
222
- async def get_message(self, response: dict) -> str:
223
- """Retrieves message only from response
224
-
225
- Args:
226
- response (str): Response generated by `self.ask`
227
-
228
- Returns:
229
- str: Message extracted
230
- """
231
- assert isinstance(response, dict), "Response should be of dict data-type only"
232
- return response["text"]
233
- class LLAMA2(Provider):
234
- def __init__(
235
- self,
236
- is_conversation: bool = True,
237
- max_tokens: int = 800,
238
- temperature: float = 0.75,
239
- presence_penalty: int = 0,
240
- frequency_penalty: int = 0,
241
- top_p: float = 0.9,
242
- model: str = "meta/meta-llama-3-70b-instruct",
243
- timeout: int = 30,
244
- intro: str = None,
245
- filepath: str = None,
246
- update_file: bool = True,
247
- proxies: dict = {},
248
- history_offset: int = 10250,
249
- act: str = None,
250
- ):
251
- """Instantiates LLAMA2
252
-
253
- Args:
254
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
255
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
256
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.75.
257
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
258
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
259
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.9.
260
- model (str, optional): LLM model name. Defaults to "meta/llama-2-70b-chat".
261
- timeout (int, optional): Http request timeout. Defaults to 30.
262
- intro (str, optional): Conversation introductory prompt. Defaults to None.
263
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
264
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
265
- proxies (dict, optional): Http request proxies. Defaults to {}.
266
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
267
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
268
- """
269
- self.session = requests.Session()
270
- self.is_conversation = is_conversation
271
- self.max_tokens_to_sample = max_tokens
272
- self.model = model
273
- self.temperature = temperature
274
- self.presence_penalty = presence_penalty
275
- self.frequency_penalty = frequency_penalty
276
- self.top_p = top_p
277
- self.chat_endpoint = "https://www.llama2.ai/api"
278
- self.stream_chunk_size = 64
279
- self.timeout = timeout
280
- self.last_response = {}
281
- self.headers = {
282
- "Content-Type": "application/json",
283
- "Referer": "https://www.llama2.ai/",
284
- "Content-Type": "text/plain;charset=UTF-8",
285
- "Origin": "https://www.llama2.ai",
286
- }
287
-
288
- self.__available_optimizers = (
289
- method
290
- for method in dir(Optimizers)
291
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
292
- )
293
- self.session.headers.update(self.headers)
294
- Conversation.intro = (
295
- AwesomePrompts().get_act(
296
- act, raise_not_found=True, default=None, case_insensitive=True
297
- )
298
- if act
299
- else intro or Conversation.intro
300
- )
301
- self.conversation = Conversation(
302
- is_conversation, self.max_tokens_to_sample, filepath, update_file
303
- )
304
- self.conversation.history_offset = history_offset
305
- self.session.proxies = proxies
306
-
307
- def ask(
308
- self,
309
- prompt: str,
310
- stream: bool = False,
311
- raw: bool = False,
312
- optimizer: str = None,
313
- conversationally: bool = False,
314
- ) -> dict:
315
- """Chat with AI
316
-
317
- Args:
318
- prompt (str): Prompt to be send.
319
- stream (bool, optional): Flag for streaming response. Defaults to False.
320
- raw (bool, optional): Stream back raw response as received. Defaults to False.
321
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
322
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
323
- Returns:
324
- dict : {}
325
- ```json
326
- {
327
- "text" : "How may I help you today?"
328
- }
329
- ```
330
- """
331
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
332
- if optimizer:
333
- if optimizer in self.__available_optimizers:
334
- conversation_prompt = getattr(Optimizers, optimizer)(
335
- conversation_prompt if conversationally else prompt
336
- )
337
- else:
338
- raise Exception(
339
- f"Optimizer is not one of {self.__available_optimizers}"
340
- )
341
- self.session.headers.update(self.headers)
342
-
343
- payload = {
344
- "prompt": f"{conversation_prompt}<s>[INST] {prompt} [/INST]",
345
- "model": self.model,
346
- "systemPrompt": "You are a helpful assistant.",
347
- "temperature": self.temperature,
348
- "topP": self.top_p,
349
- "maxTokens": self.max_tokens_to_sample,
350
- "image": None,
351
- "audio": None,
352
- }
353
-
354
- def for_stream():
355
- response = self.session.post(
356
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
357
- )
358
- if (
359
- not response.ok
360
- or not response.headers.get("Content-Type")
361
- == "text/plain; charset=utf-8"
362
- ):
363
- raise exceptions.FailedToGenerateResponseError(
364
- f"Failed to generate response - ({response.status_code}, {response.reason})"
365
- )
366
-
367
- message_load: str = ""
368
- for value in response.iter_lines(
369
- decode_unicode=True,
370
- delimiter="\n",
371
- chunk_size=self.stream_chunk_size,
372
- ):
373
- try:
374
- if bool(value.strip()):
375
- message_load += value + "\n"
376
- resp: dict = dict(text=message_load)
377
- yield value if raw else resp
378
- self.last_response.update(resp)
379
- except json.decoder.JSONDecodeError:
380
- pass
381
- self.conversation.update_chat_history(
382
- prompt, self.get_message(self.last_response)
383
- )
384
-
385
- def for_non_stream():
386
- for _ in for_stream():
387
- pass
388
- return self.last_response
389
-
390
- return for_stream() if stream else for_non_stream()
391
-
392
- def chat(
393
- self,
394
- prompt: str,
395
- stream: bool = False,
396
- optimizer: str = None,
397
- conversationally: bool = False,
398
- ) -> str:
399
- """Generate response `str`
400
- Args:
401
- prompt (str): Prompt to be send.
402
- stream (bool, optional): Flag for streaming response. Defaults to False.
403
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
404
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
405
- Returns:
406
- str: Response generated
407
- """
408
-
409
- def for_stream():
410
- for response in self.ask(
411
- prompt, True, optimizer=optimizer, conversationally=conversationally
412
- ):
413
- yield self.get_message(response)
414
-
415
- def for_non_stream():
416
- return self.get_message(
417
- self.ask(
418
- prompt,
419
- False,
420
- optimizer=optimizer,
421
- conversationally=conversationally,
422
- )
423
- )
424
-
425
- return for_stream() if stream else for_non_stream()
426
-
427
- def get_message(self, response: dict) -> str:
428
- """Retrieves message only from response
429
-
430
- Args:
431
- response (str): Response generated by `self.ask`
432
-
433
- Returns:
434
- str: Message extracted
435
- """
436
- assert isinstance(response, dict), "Response should be of dict data-type only"
437
- return response["text"]
File without changes