webscout 5.2__py3-none-any.whl → 5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,22 +1,21 @@
1
1
  webscout/AIauto.py,sha256=_1gkCv8J02kADGOPTlhsjr0CVfWXJYJnc7-ACrMXPrI,12116
2
2
  webscout/AIbase.py,sha256=vv0N8sDYaZKkKD9fkLpK1CA51ksZl0_PQ82tB431c-4,9215
3
- webscout/AIutel.py,sha256=sd3C2qg0Bb0e8feA-RfZeKBVHAe0ms3VVVdpwFm4xZo,35355
3
+ webscout/AIutel.py,sha256=1YQZAzhP20tobL9vBIWspa-EoruCiQE2r81JbyBZrmk,35375
4
4
  webscout/Bard.py,sha256=8XkV_j0gJ-krYYR7bd5UORWMk7VlyTd3z66RPYBtdxg,13134
5
5
  webscout/Bing_search.py,sha256=8pW3ZxFDfVsrtayEoAsAAoXoOCAGv2Jk_-HvOBNfCV4,6228
6
- webscout/DWEBS.py,sha256=9Jtq6weBAYfAy0bMenPn1fdJyzCPHyptc6hGywrB2Ro,6203
6
+ webscout/DWEBS.py,sha256=xB3L_u47vu9KZCFNUb_H6WX5OLeL068Er9t9Mbss9YU,7895
7
7
  webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
8
8
  webscout/YTdownloader.py,sha256=uWpUWnw9pxeEGw9KJ_3XDyQ5gd38gH1dJpr-HJo4vzU,39144
9
- webscout/__init__.py,sha256=JsKjYyLtQMl46oriHg_aPoxYOpBDHjIBrEyiwiKzYWU,1312
9
+ webscout/__init__.py,sha256=A4VcaiWmm02GvbDKCmf6F0-jRW_uT_BJkZhN6m6O108,1294
10
10
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
11
11
  webscout/cli.py,sha256=RlBKeS9CSIsiBMqlzxevWtKjbY9htkZvA7J0bM_hHE8,14999
12
- webscout/exceptions.py,sha256=GMeOdYqWKmuFU6Uq8MHKCInXQmJc7a_7AanKdyVcYTM,607
12
+ webscout/exceptions.py,sha256=jqrZvxV2JnZFqvmCsKG1lQRd6fYQh7qk3ceA7m7-k9k,611
13
13
  webscout/g4f.py,sha256=NNcnlOtIWV9R93UsBN4jBGBEJ9sJ-Np1WbgjkGVDcYc,24487
14
14
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
15
15
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
16
- webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
16
+ webscout/transcriber.py,sha256=kRymTd69zCjXdduCf7Gabt93Kz1l5ubsCwfwqs-pHl8,22334
17
17
  webscout/utils.py,sha256=2O8_lftBKsv5OEvVaXCN-h0sipup0m3jxzhFdWQrdY8,2873
18
18
  webscout/version.py,sha256=kYEBR9-iTYFuCIx_LnCClbTsANOXCfjXQq9O8i5H-GA,44
19
- webscout/voice.py,sha256=AHyeb3D8rYuAa-zBJsuMDgHq_Zvi98ROMKAUnEsKldo,1169
20
19
  webscout/webai.py,sha256=76AhP-vbPd5OW6To03I_6B18KOGEUvfi4Rbn4CqsA10,87745
21
20
  webscout/webscout_search.py,sha256=AOHkaMLmFvM1mS--wVyLiPrDAb5BPLaURBHjleWOi4w,43743
22
21
  webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
@@ -41,8 +40,8 @@ webscout/Local/utils.py,sha256=CSt9IqHhVGk_nJEnKvSFbLhC5nNf01e0MtwpgMmF9pA,6197
41
40
  webscout/Provider/AI21.py,sha256=JBh-xnspxTZNMcl-Gd0Cgseqht9gTM64TUv9I4Imc9k,6218
42
41
  webscout/Provider/Andi.py,sha256=uBME1v8lZbvpPHq5e_IOiOUC766uGTWMfNx9lWACssU,10681
43
42
  webscout/Provider/BasedGPT.py,sha256=pQd6_eDgdjv5_J0HZGugZ5ghqPLv2Hs18szq1G0bIh8,8905
44
- webscout/Provider/Berlin4h.py,sha256=-mO-ljQUV6pCnm-nKEeV7sePDn7wiGO_WG9XgVh2z10,8774
45
43
  webscout/Provider/Blackboxai.py,sha256=OXq8rF0EDHkTK65HVXPXLrJ9sp950h4c56sc-YxbsjU,17378
44
+ webscout/Provider/Chatify.py,sha256=5736HulTCnBso6MFEQHo9vNH-mDH7ayAVuDx2FaBj6Y,6278
46
45
  webscout/Provider/Cloudflare.py,sha256=4KAyGtpCkNyzFh5mjUcBOQ9wXIuhk92mxs13ahYnkKE,11368
47
46
  webscout/Provider/Cohere.py,sha256=OZ7-0iaJ8L5e4Sy-L2UGm8SnBmS7CbaFIj6a08bABVw,8941
48
47
  webscout/Provider/DARKAI.py,sha256=JpfFcPfd2kp15KSJ7GJ5Zy4zrwYQ_zHpqdFD2904Ii0,9065
@@ -56,7 +55,7 @@ webscout/Provider/Groq.py,sha256=h_dPKwqXRwmgvmEmkDYKdXwrlI4Zm2vZuCnSMItoa2w,286
56
55
  webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
57
56
  webscout/Provider/Llama.py,sha256=pqjpB09MFufImzTav1PwTWsukSCr3QiB-yFGJIIBAu8,8293
58
57
  webscout/Provider/Llama3.py,sha256=UnSWyBMSkp4WAxU4zNI9VNsZY0aAOHvT7AK0xJlJW90,7559
59
- webscout/Provider/NetFly.py,sha256=5lWjxe83lzXRJN5lAzntlWY7A0-NCiZiS2K7ZoyZc8w,10243
58
+ webscout/Provider/NetFly.py,sha256=7i-GNeQkJNxXhDawNkiFqOP8OOB4KHBVIV84q-L15S8,8392
60
59
  webscout/Provider/OLLAMA.py,sha256=g8ejBjEZN0zya-10-v_njADZ796Uxu4Nbj_gaNrlj5I,7374
61
60
  webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
62
61
  webscout/Provider/PI.py,sha256=IodVvGR_RIZpGJ0ypFF4U6NBMZAZ5O1BlRFMelot8so,8364
@@ -64,30 +63,31 @@ webscout/Provider/Perplexity.py,sha256=gUnXyVNbl6tWAqirwHEoPkjCzxpORcKxL77aoFKep
64
63
  webscout/Provider/Phind.py,sha256=_3y4CHn_uOsK6j2IP5n9RbnIAS6uTm5tI7IZccaDrMQ,39260
65
64
  webscout/Provider/PizzaGPT.py,sha256=EiHSt0sK9kgmcIbBmkVtuniDvOrlhdi6zim5t_EZo30,7216
66
65
  webscout/Provider/Poe.py,sha256=ObUxa-Fa2Dq7sJcV0hc65m09StS9uWsB2-bR2rSjXDY,7510
67
- webscout/Provider/RUBIKSAI.py,sha256=HPY8klGBNVVkfAXb-RziNrEtJGItjiqbSyXKXTOIHW4,7954
66
+ webscout/Provider/RUBIKSAI.py,sha256=LvssrWDZKg_OSVEdxVmEOagUQtf65NM0vnTbQJiooZM,8168
68
67
  webscout/Provider/Reka.py,sha256=F0ZXENkhARprj5biK3mRxwiuPH0BW3ga7EWsi8agbtE,8917
69
68
  webscout/Provider/TeachAnything.py,sha256=-gx3wiqywMVmVKvVwU9mL6HTSTmux3fvI6tIiCZ22fU,6785
70
69
  webscout/Provider/ThinkAnyAI.py,sha256=_qFjj0djxxrranyEY33w14oizyRjzlVwMv_hzvVtwNc,11616
71
70
  webscout/Provider/Youchat.py,sha256=p4rIodsNP3qxA78VpzZwUymSAs-uADQ_9CKuf_Nf9Ng,9582
72
- webscout/Provider/__init__.py,sha256=wUZ-XESz9WQDR7jg418pTT_7TDV00pDCTCaWLO5ZR1c,2200
73
- webscout/Provider/ai4chat.py,sha256=UB77kWH5vxSqSpz7PPgM4FH0aDpGOpwHJEv42Fa1W_U,7798
74
- webscout/Provider/felo_search.py,sha256=mYi1xW9egUMZ47bJb0MOD9364VLYgGJsOW2NQUbe190,9314
71
+ webscout/Provider/__init__.py,sha256=2QRPvo9bwp7cwyvID9N3mKbyea9fFLKvHzjHURqRTtI,2226
72
+ webscout/Provider/ai4chat.py,sha256=ewUcqjr3hrd27fgcvj6ijvoWVnSjdoA1iK0c8dn2VJo,8067
73
+ webscout/Provider/cerebras.py,sha256=N9Z7wY9pQRhh7chMSDirgHd1GV8Jwjeb3RmYB1pcww4,7302
74
+ webscout/Provider/felo_search.py,sha256=mOEJ8_I_4wIuItZqeHUdkxO_SwxINh3PUeL8zT_Jh6I,7469
75
75
  webscout/Provider/julius.py,sha256=ffm-9oeHYwuQMMkSXu_3ly0Xqvj-0Dh7DlatebCl1ls,10331
76
76
  webscout/Provider/koala.py,sha256=x5OoT7hM8V-camPNMevqddHvfmzjKvLER2tpCDB6X4o,10059
77
- webscout/Provider/liaobots.py,sha256=s2VxS4epBLVxoLCyQR0bdxiRm9Q6ZYUf019TC3xQCtM,10362
78
77
  webscout/Provider/meta.py,sha256=3iBylmAk9d673Axvw6hFi0-0x_Fq7ZgtH_1j2_rcDwY,30715
79
78
  webscout/Provider/turboseek.py,sha256=BNx_urbs6Ixr7SEOgL4Uo1iZdjYC7CxoefJcsN4LK6I,9138
79
+ webscout/Provider/x0gpt.py,sha256=o4xo0XaNgjwGqutkdOLXNzNLEMUB8bhxzzFauAYJUx4,6436
80
80
  webscout/Provider/xdash.py,sha256=KUDTEX8I0z72bIDi-w5Se7xmB_lbmaX7KlCmIl2ad4c,7925
81
81
  webscout/Provider/yep.py,sha256=RbEBzHeEFxgfdnHXHuBny6NKHcYYYNA6bvTggvAzoLk,10399
82
82
  webscout/Provider/TTI/PollinationsAI.py,sha256=ELMc92hYXzS1uFZtRB-tbFb39C3YqpxnfM8dVcucPE0,5485
83
83
  webscout/Provider/TTI/__init__.py,sha256=AjvFaww2xI7_8CHgxfBZwSd0KyXK9CnWqOOfxJqe31s,55
84
84
  webscout/Provider/TTI/deepinfra.py,sha256=o54__jLrP0OaiW7CHPCJISWQTuSwA8g-mSn60gRmTXA,5967
85
85
  webscout/Provider/TTS/__init__.py,sha256=g19AOO1X9Qb-MNFpwhx5lODDWQiG7HxZCO9TmOYOHGc,54
86
- webscout/Provider/TTS/streamElements.py,sha256=MpxAARlRpPbW7x3kA02YDHp7SCZueYYimoKlq-79N4k,8047
87
- webscout/Provider/TTS/voicepod.py,sha256=S0u3-cAKCEIO36y21TRIYdnLAPv6Ey9y8E79bkLdx1E,4327
88
- webscout-5.2.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
89
- webscout-5.2.dist-info/METADATA,sha256=pn01cYo70uaTRHyBmkPZjCszS8RN6ayFb8s8a5OxnvQ,51351
90
- webscout-5.2.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
91
- webscout-5.2.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
92
- webscout-5.2.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
93
- webscout-5.2.dist-info/RECORD,,
86
+ webscout/Provider/TTS/streamElements.py,sha256=qPVlG2xyZMkPVYWnoqKg3Q1Zp2D0XE5fgNeINP0deSI,7901
87
+ webscout/Provider/TTS/voicepod.py,sha256=wljJskejf9LNMHSs_ilTDc2EU-M0fLtPnnwrtFF8S-A,4358
88
+ webscout-5.3.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
89
+ webscout-5.3.dist-info/METADATA,sha256=STHdzub3cUWkIB03czVH5siVA5jdJcR3VzRcUhmPs9M,48560
90
+ webscout-5.3.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
91
+ webscout-5.3.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
92
+ webscout-5.3.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
93
+ webscout-5.3.dist-info/RECORD,,
@@ -1,217 +0,0 @@
1
- import requests
2
- import json
3
- import uuid
4
- from typing import Any, Dict, Optional
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider, AsyncProvider
9
- from webscout import exceptions
10
-
11
- class Berlin4h(Provider):
12
- """
13
- A class to interact with the Berlin4h AI API.
14
- """
15
-
16
- def __init__(
17
- self,
18
- api_token: str = "3bf369cd84339603f8a5361e964f9ebe",
19
- api_endpoint: str = "https://ai.berlin4h.top/api/chat/completions",
20
- model: str = "gpt-3.5-turbo",
21
- temperature: float = 0.9,
22
- presence_penalty: float = 0,
23
- frequency_penalty: float = 0,
24
- max_tokens: int = 4000,
25
- is_conversation: bool = True,
26
- timeout: int = 30,
27
- intro: str = None,
28
- filepath: str = None,
29
- update_file: bool = True,
30
- proxies: dict = {},
31
- history_offset: int = 10250,
32
- act: str = None,
33
- ) -> None:
34
- """
35
- Initializes the Berlin4h API with given parameters.
36
-
37
- Args:
38
- api_token (str): The API token for authentication.
39
- api_endpoint (str): The API endpoint to use for requests.
40
- model (str): The AI model to use for text generation.
41
- temperature (float): The temperature parameter for the model.
42
- presence_penalty (float): The presence penalty parameter for the model.
43
- frequency_penalty (float): The frequency penalty parameter for the model.
44
- max_tokens (int): The maximum number of tokens to generate.
45
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
46
- timeout (int, optional): Http request timeout. Defaults to 30.
47
- intro (str, optional): Conversation introductory prompt. Defaults to None.
48
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
- proxies (dict, optional): Http request proxies. Defaults to {}.
51
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
- """
54
- self.api_token = api_token
55
- self.api_endpoint = api_endpoint
56
- self.model = model
57
- self.temperature = temperature
58
- self.presence_penalty = presence_penalty
59
- self.frequency_penalty = frequency_penalty
60
- self.max_tokens = max_tokens
61
- self.parent_message_id: Optional[str] = None
62
- self.session = requests.Session()
63
- self.is_conversation = is_conversation
64
- self.max_tokens_to_sample = max_tokens
65
- self.stream_chunk_size = 1
66
- self.timeout = timeout
67
- self.last_response = {}
68
- self.headers = {"Content-Type": "application/json", "Token": self.api_token}
69
- self.__available_optimizers = (
70
- method
71
- for method in dir(Optimizers)
72
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
- )
74
- self.session.headers.update(self.headers)
75
- Conversation.intro = (
76
- AwesomePrompts().get_act(
77
- act, raise_not_found=True, default=None, case_insensitive=True
78
- )
79
- if act
80
- else intro or Conversation.intro
81
- )
82
- self.conversation = Conversation(
83
- is_conversation, self.max_tokens_to_sample, filepath, update_file
84
- )
85
- self.conversation.history_offset = history_offset
86
- self.session.proxies = proxies
87
-
88
- def ask(
89
- self,
90
- prompt: str,
91
- stream: bool = False,
92
- raw: bool = False,
93
- optimizer: str = None,
94
- conversationally: bool = False,
95
- ) -> Dict[str, Any]:
96
- """
97
- Sends a prompt to the Berlin4h AI API and returns the response.
98
-
99
- Args:
100
- prompt: The text prompt to generate text from.
101
- stream (bool, optional): Whether to stream the response. Defaults to False.
102
- raw (bool, optional): Whether to return the raw response. Defaults to False.
103
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
104
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
105
-
106
- Returns:
107
- The response from the API.
108
- """
109
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
- if optimizer:
111
- if optimizer in self.__available_optimizers:
112
- conversation_prompt = getattr(Optimizers, optimizer)(
113
- conversation_prompt if conversationally else prompt
114
- )
115
- else:
116
- raise Exception(
117
- f"Optimizer is not one of {self.__available_optimizers}"
118
- )
119
-
120
- payload: Dict[str, any] = {
121
- "prompt": conversation_prompt,
122
- "parentMessageId": self.parent_message_id or str(uuid.uuid4()),
123
- "options": {
124
- "model": self.model,
125
- "temperature": self.temperature,
126
- "presence_penalty": self.presence_penalty,
127
- "frequency_penalty": self.frequency_penalty,
128
- "max_tokens": self.max_tokens,
129
- },
130
- }
131
-
132
- def for_stream():
133
- response = self.session.post(
134
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
135
- )
136
-
137
- if not response.ok:
138
- raise exceptions.FailedToGenerateResponseError(
139
- f"Failed to generate response - ({response.status_code}, {response.reason})"
140
- )
141
-
142
- streaming_response = ""
143
- # Collect the entire line before processing
144
- for line in response.iter_lines(decode_unicode=True):
145
- if line:
146
- try:
147
- json_data = json.loads(line)
148
- content = json_data['content']
149
- if ">" in content: break
150
- streaming_response += content
151
- yield content if raw else dict(text=streaming_response) # Yield accumulated response
152
- except:
153
- continue
154
- self.last_response.update(dict(text=streaming_response))
155
- self.conversation.update_chat_history(
156
- prompt, self.get_message(self.last_response)
157
- )
158
-
159
- def for_non_stream():
160
- for _ in for_stream():
161
- pass
162
- return self.last_response
163
-
164
- return for_stream() if stream else for_non_stream()
165
-
166
- def chat(
167
- self,
168
- prompt: str,
169
- stream: bool = False,
170
- optimizer: str = None,
171
- conversationally: bool = False,
172
- ) -> str:
173
- """Generate response `str`
174
- Args:
175
- prompt (str): Prompt to be send.
176
- stream (bool, optional): Flag for streaming response. Defaults to False.
177
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
- Returns:
180
- str: Response generated
181
- """
182
-
183
- def for_stream():
184
- for response in self.ask(
185
- prompt, True, optimizer=optimizer, conversationally=conversationally
186
- ):
187
- yield self.get_message(response)
188
-
189
- def for_non_stream():
190
- return self.get_message(
191
- self.ask(
192
- prompt,
193
- False,
194
- optimizer=optimizer,
195
- conversationally=conversationally,
196
- )
197
- )
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def get_message(self, response: dict) -> str:
202
- """Retrieves message only from response
203
-
204
- Args:
205
- response (dict): Response generated by `self.ask`
206
-
207
- Returns:
208
- str: Message extracted
209
- """
210
- assert isinstance(response, dict), "Response should be of dict data-type only"
211
- return response["text"]
212
- if __name__ == '__main__':
213
- from rich import print
214
- ai = Berlin4h()
215
- response = ai.chat("tell me about india")
216
- for chunk in response:
217
- print(chunk, end="", flush=True)
@@ -1,268 +0,0 @@
1
- import json
2
- import re
3
- import uuid
4
- import gzip
5
- import zlib
6
- from typing import Any, Dict, Generator, Union
7
-
8
- import requests
9
-
10
- from webscout.AIutel import Optimizers
11
- from webscout.AIutel import Conversation
12
- from webscout.AIutel import AwesomePrompts
13
- from webscout.AIbase import Provider
14
- from webscout import exceptions
15
-
16
- class LiaoBots(Provider):
17
- """
18
- A class to interact with the LiaoBots API.
19
- """
20
-
21
- # List of available models
22
- AVAILABLE_MODELS = [
23
- "gpt-4o-mini",
24
- "gpt-4o-free",
25
- "gpt-4o-mini-free",
26
- "gpt-4-turbo-2024-04-09",
27
- "gpt-4o",
28
- "gpt-4-0613",
29
- "claude-3-5-sonnet-20240620",
30
- "gemini-1.5-pro-latest",
31
- "gemini-1.5-flash-latest"
32
- ]
33
-
34
- def __init__(
35
- self,
36
- auth_code: str = "G3USRn7M5zsXn",
37
- cookie: str = "gkp2=pevIjZCYj8wMcrWPEAq6",
38
- is_conversation: bool = True,
39
- max_tokens: int = 600,
40
- timeout: int = 30,
41
- intro: str = None,
42
- filepath: str = None,
43
- update_file: bool = True,
44
- proxies: dict = {},
45
- history_offset: int = 10250,
46
- act: str = None,
47
- model: str = "claude-3-5-sonnet-20240620",
48
- system_prompt: str = "You are a helpful assistant."
49
- ) -> None:
50
- """
51
- Initializes the LiaoBots API with given parameters.
52
-
53
- Args:
54
- auth_code (str): The auth code for authentication.
55
- cookie (str): The cookie for authentication.
56
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
57
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
58
- timeout (int, optional): Http request timeout. Defaults to 30.
59
- intro (str, optional): Conversation introductory prompt. Defaults to None.
60
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
61
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
62
- proxies (dict, optional): Http request proxies. Defaults to {}.
63
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
64
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
65
- model (str, optional): AI model to use for text generation. Defaults to "claude-3-5-sonnet-20240620".
66
- system_prompt (str, optional): System prompt for LiaoBots. Defaults to "You are a helpful assistant.".
67
- """
68
-
69
- # Check if the chosen model is available
70
- if model not in self.AVAILABLE_MODELS:
71
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
72
-
73
- self.auth_code = auth_code
74
- self.cookie = cookie
75
- self.api_endpoint = "https://liaobots.work/api/chat"
76
- self.model = model
77
- self.system_prompt = system_prompt
78
- self.session = requests.Session()
79
- self.is_conversation = is_conversation
80
- self.max_tokens_to_sample = max_tokens
81
- self.stream_chunk_size = 64
82
- self.timeout = timeout
83
- self.last_response = {}
84
- self.headers = {
85
- "accept": "*/*",
86
- "accept-encoding": "gzip, deflate, br, zstd",
87
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
88
- "content-type": "application/json",
89
- "cookie": self.cookie,
90
- "dnt": "1",
91
- "origin": "https://liaobots.work",
92
- "priority": "u=1, i",
93
- "referer": "https://liaobots.work/en",
94
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
95
- "sec-ch-ua-mobile": "?0",
96
- "sec-ch-ua-platform": '"Windows"',
97
- "sec-fetch-dest": "empty",
98
- "sec-fetch-mode": "cors",
99
- "sec-fetch-site": "same-origin",
100
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
101
- "x-Auth-Code": self.auth_code,
102
- }
103
- self.__available_optimizers = (
104
- method
105
- for method in dir(Optimizers)
106
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
107
- )
108
- self.session.headers.update(self.headers)
109
- Conversation.intro = (
110
- AwesomePrompts().get_act(
111
- act, raise_not_found=True, default=None, case_insensitive=True
112
- )
113
- if act
114
- else intro or Conversation.intro
115
- )
116
- self.conversation = Conversation(
117
- is_conversation, self.max_tokens_to_sample, filepath, update_file
118
- )
119
- self.conversation.history_offset = history_offset
120
- self.session.proxies = proxies
121
-
122
- def ask(
123
- self,
124
- prompt: str,
125
- stream: bool = False,
126
- raw: bool = False,
127
- optimizer: str = None,
128
- conversationally: bool = False,
129
- ) -> Dict[str, Any]:
130
- """
131
- Sends a prompt to the LiaoBots API and returns the response.
132
-
133
- Args:
134
- prompt: The text prompt to generate text from.
135
- stream (bool, optional): Whether to stream the response. Defaults to False.
136
- raw (bool, optional): Whether to return the raw response. Defaults to False.
137
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
138
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
139
-
140
- Returns:
141
- The response from the API.
142
- """
143
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
- if optimizer:
145
- if optimizer in self.__available_optimizers:
146
- conversation_prompt = getattr(Optimizers, optimizer)(
147
- conversation_prompt if conversationally else prompt
148
- )
149
- else:
150
- raise Exception(
151
- f"Optimizer is not one of {self.__available_optimizers}"
152
- )
153
-
154
- payload: Dict[str, any] = {
155
- "conversationId": str(uuid.uuid4()),
156
- "model": {
157
- "id": self.model
158
- },
159
- "messages": [
160
- {
161
- "role": "user",
162
- "content": conversation_prompt
163
- }
164
- ],
165
- "key": "",
166
- "prompt": self.system_prompt
167
- }
168
-
169
- def for_stream():
170
- response = self.session.post(
171
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
172
- )
173
-
174
- if not response.ok:
175
- raise exceptions.FailedToGenerateResponseError(
176
- f"Failed to generate response - ({response.status_code}, {response.reason})"
177
- )
178
-
179
- streaming_response = ""
180
- content_encoding = response.headers.get('Content-Encoding')
181
- # Stream the response
182
- for chunk in response.iter_content():
183
- if chunk:
184
- try:
185
- # Decompress the chunk if necessary
186
- if content_encoding == 'gzip':
187
- chunk = gzip.decompress(chunk)
188
- elif content_encoding == 'deflate':
189
- chunk = zlib.decompress(chunk)
190
-
191
- # Decode the chunk
192
- decoded_chunk = chunk.decode('utf-8')
193
- streaming_response += decoded_chunk
194
- except UnicodeDecodeError:
195
- # Handle non-textual data
196
- pass
197
- else:
198
- pass
199
- self.last_response.update(dict(text=streaming_response))
200
- self.conversation.update_chat_history(
201
- prompt, self.get_message(self.last_response)
202
- )
203
-
204
- if stream:
205
- yield from [] # Yield nothing when streaming, focus on side effects
206
- else:
207
- return [] # Return empty list for non-streaming case
208
-
209
- def for_non_stream():
210
- for _ in for_stream():
211
- pass
212
- return self.last_response
213
-
214
- return for_stream() if stream else for_non_stream()
215
-
216
- def chat(
217
- self,
218
- prompt: str,
219
- stream: bool = False,
220
- optimizer: str = None,
221
- conversationally: bool = False,
222
- ) -> str:
223
- """Generate response `str`
224
- Args:
225
- prompt (str): Prompt to be send.
226
- stream (bool, optional): Flag for streaming response. Defaults to False.
227
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
228
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
229
- Returns:
230
- str: Response generated
231
- """
232
-
233
- def for_stream():
234
- for response in self.ask(
235
- prompt, True, optimizer=optimizer, conversationally=conversationally
236
- ):
237
- yield self.get_message(response)
238
-
239
- def for_non_stream():
240
- return self.get_message(
241
- self.ask(
242
- prompt,
243
- False,
244
- optimizer=optimizer,
245
- conversationally=conversationally,
246
- )
247
- )
248
-
249
- return for_stream() if stream else for_non_stream()
250
-
251
- def get_message(self, response: dict) -> str:
252
- """Retrieves message only from response
253
-
254
- Args:
255
- response (dict): Response generated by `self.ask`
256
-
257
- Returns:
258
- str: Message extracted
259
- """
260
- assert isinstance(response, dict), "Response should be of dict data-type only"
261
- return response["text"]
262
-
263
- if __name__ == '__main__':
264
- from rich import print
265
- liaobots = LiaoBots()
266
- response = liaobots.chat("tell me about india")
267
- for chunk in response:
268
- print(chunk, end="", flush=True)
webscout/voice.py DELETED
@@ -1,34 +0,0 @@
1
- import requests
2
- import typing
3
-
4
- def play_audio(message: str, voice: str = "Brian") -> typing.Union[str, typing.NoReturn]:
5
- """
6
- Text to speech using StreamElements API
7
-
8
- Parameters:
9
- message (str): The text to convert to speech
10
- voice (str): The voice to use for speech synthesis. Default is "Brian".
11
-
12
- Returns:
13
- result (Union[str, None]): Temporary file path or None in failure
14
- """
15
- # Base URL for provider API
16
- url: str = f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{message}}}"
17
-
18
- # Request headers
19
- headers: typing.Dict[str, str] = {
20
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
21
- }
22
- # Try to send request or return None on failure
23
- try:
24
- result = requests.get(url=url, headers=headers)
25
- return result.content
26
- except:
27
- return None
28
-
29
- if __name__ == "__main__":
30
- # Example usage of the play_audio function
31
- message = "Hello, world!"
32
- voice = "Brian"
33
- audio_result = play_audio(message, voice)
34
- print(audio_result)
File without changes