webscout 2.8__py3-none-any.whl → 3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,214 +1,214 @@
1
- import requests
2
- from typing import Any, AsyncGenerator, Dict, Optional
3
- import json
4
- import re
5
-
6
- from ..AIutel import Optimizers
7
- from ..AIutel import Conversation
8
- from ..AIutel import AwesomePrompts, sanitize_stream
9
- from ..AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
-
12
-
13
- class ChatGPTUK(Provider):
14
- """
15
- A class to interact with the ChatGPT UK API.
16
- """
17
-
18
- def __init__(
19
- self,
20
- is_conversation: bool = True,
21
- max_tokens: int = 600,
22
- temperature: float = 0.9,
23
- presence_penalty: float = 0,
24
- frequency_penalty: float = 0,
25
- top_p: float = 1,
26
- model: str = "google-gemini-pro",
27
- timeout: int = 30,
28
- intro: str = None,
29
- filepath: str = None,
30
- update_file: bool = True,
31
- proxies: dict = {},
32
- history_offset: int = 10250,
33
- act: str = None,
34
- ) -> None:
35
- """
36
- Initializes the ChatGPTUK API with given parameters.
37
-
38
- Args:
39
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
42
- presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
43
- frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
44
- top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
45
- model (str, optional): LLM model name. Defaults to "google-gemini-pro".
46
- timeout (int, optional): Http request timeout. Defaults to 30.
47
- intro (str, optional): Conversation introductory prompt. Defaults to None.
48
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
- proxies (dict, optional): Http request proxies. Defaults to {}.
51
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
- """
54
- self.session = requests.Session()
55
- self.is_conversation = is_conversation
56
- self.max_tokens_to_sample = max_tokens
57
- self.api_endpoint = "https://free.chatgpt.org.uk/api/openai/v1/chat/completions"
58
- self.stream_chunk_size = 64
59
- self.timeout = timeout
60
- self.last_response = {}
61
- self.model = model
62
- self.temperature = temperature
63
- self.presence_penalty = presence_penalty
64
- self.frequency_penalty = frequency_penalty
65
- self.top_p = top_p
66
- self.headers = {"Content-Type": "application/json"}
67
-
68
- self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
71
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
- )
73
- self.session.headers.update(self.headers)
74
- Conversation.intro = (
75
- AwesomePrompts().get_act(
76
- act, raise_not_found=True, default=None, case_insensitive=True
77
- )
78
- if act
79
- else intro or Conversation.intro
80
- )
81
- self.conversation = Conversation(
82
- is_conversation, self.max_tokens_to_sample, filepath, update_file
83
- )
84
- self.conversation.history_offset = history_offset
85
- self.session.proxies = proxies
86
-
87
- def ask(
88
- self,
89
- prompt: str,
90
- stream: bool = False,
91
- raw: bool = False,
92
- optimizer: str = None,
93
- conversationally: bool = False,
94
- ) -> dict:
95
- """Chat with AI
96
-
97
- Args:
98
- prompt (str): Prompt to be send.
99
- stream (bool, optional): Flag for streaming response. Defaults to False.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
- Returns:
104
- dict : {}
105
- ```json
106
- {
107
- "text" : "How may I assist you today?"
108
- }
109
- ```
110
- """
111
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
- if optimizer:
113
- if optimizer in self.__available_optimizers:
114
- conversation_prompt = getattr(Optimizers, optimizer)(
115
- conversation_prompt if conversationally else prompt
116
- )
117
- else:
118
- raise Exception(
119
- f"Optimizer is not one of {self.__available_optimizers}"
120
- )
121
-
122
- self.session.headers.update(self.headers)
123
- payload = {
124
- "messages": [
125
- {"role": "system", "content": "Keep your responses long and detailed"},
126
- {"role": "user", "content": conversation_prompt}
127
- ],
128
- "stream": True,
129
- "model": self.model,
130
- "temperature": self.temperature,
131
- "presence_penalty": self.presence_penalty,
132
- "frequency_penalty": self.frequency_penalty,
133
- "top_p": self.top_p,
134
- "max_tokens": self.max_tokens_to_sample
135
- }
136
-
137
- def for_stream():
138
- response = self.session.post(
139
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
140
- )
141
- if not response.ok:
142
- raise exceptions.FailedToGenerateResponseError(
143
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144
- )
145
-
146
- streaming_response = ""
147
- for line in response.iter_lines(decode_unicode=True, chunk_size=1):
148
- if line:
149
- modified_line = re.sub("data:", "", line)
150
- try:
151
- json_data = json.loads(modified_line)
152
- content = json_data['choices'][0]['delta']['content']
153
- streaming_response += content
154
- yield content if raw else dict(text=streaming_response)
155
- except:
156
- continue
157
- self.last_response.update(dict(text=streaming_response))
158
- self.conversation.update_chat_history(
159
- prompt, self.get_message(self.last_response)
160
- )
161
-
162
- def for_non_stream():
163
- for _ in for_stream():
164
- pass
165
- return self.last_response
166
-
167
- return for_stream() if stream else for_non_stream()
168
-
169
- def chat(
170
- self,
171
- prompt: str,
172
- stream: bool = False,
173
- optimizer: str = None,
174
- conversationally: bool = False,
175
- ) -> str:
176
- """Generate response `str`
177
- Args:
178
- prompt (str): Prompt to be send.
179
- stream (bool, optional): Flag for streaming response. Defaults to False.
180
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
181
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
182
- Returns:
183
- str: Response generated
184
- """
185
-
186
- def for_stream():
187
- for response in self.ask(
188
- prompt, True, optimizer=optimizer, conversationally=conversationally
189
- ):
190
- yield self.get_message(response)
191
-
192
- def for_non_stream():
193
- return self.get_message(
194
- self.ask(
195
- prompt,
196
- False,
197
- optimizer=optimizer,
198
- conversationally=conversationally,
199
- )
200
- )
201
-
202
- return for_stream() if stream else for_non_stream()
203
-
204
- def get_message(self, response: dict) -> str:
205
- """Retrieves message only from response
206
-
207
- Args:
208
- response (dict): Response generated by `self.ask`
209
-
210
- Returns:
211
- str: Message extracted
212
- """
213
- assert isinstance(response, dict), "Response should be of dict data-type only"
1
+ import requests
2
+ from typing import Any, AsyncGenerator, Dict, Optional
3
+ import json
4
+ import re
5
+
6
+ from ..AIutel import Optimizers
7
+ from ..AIutel import Conversation
8
+ from ..AIutel import AwesomePrompts, sanitize_stream
9
+ from ..AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+
13
+ class ChatGPTUK(Provider):
14
+ """
15
+ A class to interact with the ChatGPT UK API.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ is_conversation: bool = True,
21
+ max_tokens: int = 600,
22
+ temperature: float = 0.9,
23
+ presence_penalty: float = 0,
24
+ frequency_penalty: float = 0,
25
+ top_p: float = 1,
26
+ model: str = "google-gemini-pro",
27
+ timeout: int = 30,
28
+ intro: str = None,
29
+ filepath: str = None,
30
+ update_file: bool = True,
31
+ proxies: dict = {},
32
+ history_offset: int = 10250,
33
+ act: str = None,
34
+ ) -> None:
35
+ """
36
+ Initializes the ChatGPTUK API with given parameters.
37
+
38
+ Args:
39
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
42
+ presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
43
+ frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
44
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
45
+ model (str, optional): LLM model name. Defaults to "google-gemini-pro".
46
+ timeout (int, optional): Http request timeout. Defaults to 30.
47
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
48
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
+ proxies (dict, optional): Http request proxies. Defaults to {}.
51
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
+ """
54
+ self.session = requests.Session()
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.api_endpoint = "https://free.chatgpt.org.uk/api/openai/v1/chat/completions"
58
+ self.stream_chunk_size = 64
59
+ self.timeout = timeout
60
+ self.last_response = {}
61
+ self.model = model
62
+ self.temperature = temperature
63
+ self.presence_penalty = presence_penalty
64
+ self.frequency_penalty = frequency_penalty
65
+ self.top_p = top_p
66
+ self.headers = {"Content-Type": "application/json"}
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+ self.session.headers.update(self.headers)
74
+ Conversation.intro = (
75
+ AwesomePrompts().get_act(
76
+ act, raise_not_found=True, default=None, case_insensitive=True
77
+ )
78
+ if act
79
+ else intro or Conversation.intro
80
+ )
81
+ self.conversation = Conversation(
82
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
83
+ )
84
+ self.conversation.history_offset = history_offset
85
+ self.session.proxies = proxies
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> dict:
95
+ """Chat with AI
96
+
97
+ Args:
98
+ prompt (str): Prompt to be send.
99
+ stream (bool, optional): Flag for streaming response. Defaults to False.
100
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
101
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
+ Returns:
104
+ dict : {}
105
+ ```json
106
+ {
107
+ "text" : "How may I assist you today?"
108
+ }
109
+ ```
110
+ """
111
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
+ if optimizer:
113
+ if optimizer in self.__available_optimizers:
114
+ conversation_prompt = getattr(Optimizers, optimizer)(
115
+ conversation_prompt if conversationally else prompt
116
+ )
117
+ else:
118
+ raise Exception(
119
+ f"Optimizer is not one of {self.__available_optimizers}"
120
+ )
121
+
122
+ self.session.headers.update(self.headers)
123
+ payload = {
124
+ "messages": [
125
+ {"role": "system", "content": "Keep your responses long and detailed"},
126
+ {"role": "user", "content": conversation_prompt}
127
+ ],
128
+ "stream": True,
129
+ "model": self.model,
130
+ "temperature": self.temperature,
131
+ "presence_penalty": self.presence_penalty,
132
+ "frequency_penalty": self.frequency_penalty,
133
+ "top_p": self.top_p,
134
+ "max_tokens": self.max_tokens_to_sample
135
+ }
136
+
137
+ def for_stream():
138
+ response = self.session.post(
139
+ self.api_endpoint, json=payload, stream=True, timeout=self.timeout
140
+ )
141
+ if not response.ok:
142
+ raise exceptions.FailedToGenerateResponseError(
143
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144
+ )
145
+
146
+ streaming_response = ""
147
+ for line in response.iter_lines(decode_unicode=True, chunk_size=1):
148
+ if line:
149
+ modified_line = re.sub("data:", "", line)
150
+ try:
151
+ json_data = json.loads(modified_line)
152
+ content = json_data['choices'][0]['delta']['content']
153
+ streaming_response += content
154
+ yield content if raw else dict(text=streaming_response)
155
+ except:
156
+ continue
157
+ self.last_response.update(dict(text=streaming_response))
158
+ self.conversation.update_chat_history(
159
+ prompt, self.get_message(self.last_response)
160
+ )
161
+
162
+ def for_non_stream():
163
+ for _ in for_stream():
164
+ pass
165
+ return self.last_response
166
+
167
+ return for_stream() if stream else for_non_stream()
168
+
169
+ def chat(
170
+ self,
171
+ prompt: str,
172
+ stream: bool = False,
173
+ optimizer: str = None,
174
+ conversationally: bool = False,
175
+ ) -> str:
176
+ """Generate response `str`
177
+ Args:
178
+ prompt (str): Prompt to be send.
179
+ stream (bool, optional): Flag for streaming response. Defaults to False.
180
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
181
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
182
+ Returns:
183
+ str: Response generated
184
+ """
185
+
186
+ def for_stream():
187
+ for response in self.ask(
188
+ prompt, True, optimizer=optimizer, conversationally=conversationally
189
+ ):
190
+ yield self.get_message(response)
191
+
192
+ def for_non_stream():
193
+ return self.get_message(
194
+ self.ask(
195
+ prompt,
196
+ False,
197
+ optimizer=optimizer,
198
+ conversationally=conversationally,
199
+ )
200
+ )
201
+
202
+ return for_stream() if stream else for_non_stream()
203
+
204
+ def get_message(self, response: dict) -> str:
205
+ """Retrieves message only from response
206
+
207
+ Args:
208
+ response (dict): Response generated by `self.ask`
209
+
210
+ Returns:
211
+ str: Message extracted
212
+ """
213
+ assert isinstance(response, dict), "Response should be of dict data-type only"
214
214
  return response["text"]