webscout 6.3__py3-none-any.whl → 6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (131) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +441 -1130
  4. webscout/DWEBS.py +189 -35
  5. webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
  6. webscout/Extra/YTToolkit/__init__.py +3 -0
  7. webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +479 -551
  8. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  10. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  11. webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
  12. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  13. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  14. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  15. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  16. webscout/Extra/YTToolkit/ytapi/query.py +37 -0
  17. webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
  18. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  19. webscout/Extra/YTToolkit/ytapi/video.py +102 -0
  20. webscout/Extra/__init__.py +3 -1
  21. webscout/Extra/autocoder/__init__.py +9 -0
  22. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  23. webscout/Extra/autocoder/rawdog.py +680 -0
  24. webscout/Extra/autollama.py +246 -195
  25. webscout/Extra/gguf.py +81 -56
  26. webscout/Extra/markdownlite/__init__.py +862 -0
  27. webscout/Extra/weather_ascii.py +2 -2
  28. webscout/LLM.py +206 -43
  29. webscout/Litlogger/__init__.py +681 -0
  30. webscout/Provider/DARKAI.py +1 -1
  31. webscout/Provider/EDITEE.py +1 -1
  32. webscout/Provider/NinjaChat.py +1 -1
  33. webscout/Provider/PI.py +120 -35
  34. webscout/Provider/Perplexity.py +590 -598
  35. webscout/Provider/Reka.py +0 -1
  36. webscout/Provider/RoboCoders.py +206 -0
  37. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  38. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  39. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  40. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  41. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  42. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  43. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  44. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  45. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  46. webscout/Provider/TTI/__init__.py +2 -4
  47. webscout/Provider/TTI/artbit/__init__.py +22 -0
  48. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  49. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  50. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  51. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  52. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  53. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  54. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  55. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  56. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  57. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  58. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  59. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  60. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  61. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  62. webscout/Provider/TTI/talkai/__init__.py +4 -0
  63. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  65. webscout/Provider/TTS/__init__.py +5 -1
  66. webscout/Provider/TTS/deepgram.py +183 -0
  67. webscout/Provider/TTS/elevenlabs.py +137 -0
  68. webscout/Provider/TTS/gesserit.py +151 -0
  69. webscout/Provider/TTS/murfai.py +139 -0
  70. webscout/Provider/TTS/parler.py +134 -107
  71. webscout/Provider/TTS/streamElements.py +360 -275
  72. webscout/Provider/TTS/utils.py +280 -0
  73. webscout/Provider/TTS/voicepod.py +116 -116
  74. webscout/Provider/__init__.py +8 -1
  75. webscout/Provider/askmyai.py +2 -2
  76. webscout/Provider/cerebras.py +227 -219
  77. webscout/Provider/llama3mitril.py +0 -1
  78. webscout/Provider/meta.py +794 -779
  79. webscout/Provider/mhystical.py +176 -0
  80. webscout/Provider/perplexitylabs.py +265 -0
  81. webscout/Provider/twitterclone.py +251 -245
  82. webscout/Provider/typegpt.py +358 -0
  83. webscout/__init__.py +9 -8
  84. webscout/__main__.py +5 -5
  85. webscout/cli.py +252 -280
  86. webscout/conversation.py +227 -0
  87. webscout/exceptions.py +161 -29
  88. webscout/litagent/__init__.py +172 -0
  89. webscout/litprinter/__init__.py +832 -0
  90. webscout/optimizers.py +270 -0
  91. webscout/prompt_manager.py +279 -0
  92. webscout/scout/__init__.py +11 -0
  93. webscout/scout/core.py +884 -0
  94. webscout/scout/element.py +459 -0
  95. webscout/scout/parsers/__init__.py +69 -0
  96. webscout/scout/parsers/html5lib_parser.py +172 -0
  97. webscout/scout/parsers/html_parser.py +236 -0
  98. webscout/scout/parsers/lxml_parser.py +178 -0
  99. webscout/scout/utils.py +38 -0
  100. webscout/swiftcli/__init__.py +810 -0
  101. webscout/update_checker.py +125 -0
  102. webscout/version.py +1 -1
  103. webscout/zeroart/__init__.py +55 -0
  104. webscout/zeroart/base.py +61 -0
  105. webscout/zeroart/effects.py +99 -0
  106. webscout/zeroart/fonts.py +816 -0
  107. webscout/zerodir/__init__.py +225 -0
  108. {webscout-6.3.dist-info → webscout-6.5.dist-info}/METADATA +37 -112
  109. webscout-6.5.dist-info/RECORD +179 -0
  110. webscout/Agents/Onlinesearcher.py +0 -182
  111. webscout/Agents/__init__.py +0 -2
  112. webscout/Agents/functioncall.py +0 -248
  113. webscout/Bing_search.py +0 -154
  114. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  115. webscout/Provider/TTI/Nexra.py +0 -120
  116. webscout/Provider/TTI/PollinationsAI.py +0 -138
  117. webscout/Provider/TTI/WebSimAI.py +0 -142
  118. webscout/Provider/TTI/aiforce.py +0 -160
  119. webscout/Provider/TTI/artbit.py +0 -141
  120. webscout/Provider/TTI/deepinfra.py +0 -148
  121. webscout/Provider/TTI/huggingface.py +0 -155
  122. webscout/Provider/TTI/talkai.py +0 -116
  123. webscout/g4f.py +0 -666
  124. webscout/models.py +0 -23
  125. webscout/requestsHTMLfix.py +0 -775
  126. webscout/webai.py +0 -2590
  127. webscout-6.3.dist-info/RECORD +0 -124
  128. {webscout-6.3.dist-info → webscout-6.5.dist-info}/LICENSE.md +0 -0
  129. {webscout-6.3.dist-info → webscout-6.5.dist-info}/WHEEL +0 -0
  130. {webscout-6.3.dist-info → webscout-6.5.dist-info}/entry_points.txt +0 -0
  131. {webscout-6.3.dist-info → webscout-6.5.dist-info}/top_level.txt +0 -0
@@ -1,219 +1,227 @@
1
- import re
2
- import requests
3
- import json
4
- import os
5
- from typing import Any, Dict, Optional, Generator, List, Union
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- from fake_useragent import UserAgent
10
- from cerebras.cloud.sdk import Cerebras as CerebrasSDK # type: ignore
11
-
12
-
13
- class Cerebras(Provider):
14
- """
15
- A class to interact with the Cerebras API using a cookie for authentication.
16
- """
17
-
18
- def __init__(
19
- self,
20
- is_conversation: bool = True,
21
- max_tokens: int = 2049,
22
- timeout: int = 30,
23
- intro: str = None,
24
- filepath: str = None,
25
- update_file: bool = True,
26
- proxies: dict = {},
27
- history_offset: int = 10250,
28
- act: str = None,
29
- cookie_path: str = "cookie.json",
30
- model: str = "llama3.1-8b",
31
- system_prompt: str = "You are a helpful assistant.",
32
- ):
33
- # Initialize basic settings first
34
- self.timeout = timeout
35
- self.model = model
36
- self.system_prompt = system_prompt
37
- self.is_conversation = is_conversation
38
- self.max_tokens_to_sample = max_tokens
39
- self.last_response = {}
40
-
41
- # Get API key first
42
- try:
43
- self.api_key = self.get_demo_api_key(cookie_path)
44
- # Set environment variable for the SDK
45
- os.environ["CEREBRAS_API_KEY"] = self.api_key
46
- # Initialize the client with the API key
47
- self.client = CerebrasSDK(api_key=self.api_key)
48
- except Exception as e:
49
- raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
50
-
51
- # Initialize optimizers
52
- self.__available_optimizers = (
53
- method
54
- for method in dir(Optimizers)
55
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
56
- )
57
-
58
- # Initialize conversation settings
59
- Conversation.intro = (
60
- AwesomePrompts().get_act(
61
- act, raise_not_found=True, default=None, case_insensitive=True
62
- )
63
- if act
64
- else intro or Conversation.intro
65
- )
66
- self.conversation = Conversation(
67
- is_conversation, self.max_tokens_to_sample, filepath, update_file
68
- )
69
- self.conversation.history_offset = history_offset
70
-
71
- @staticmethod
72
- def extract_query(text: str) -> str:
73
- """Extracts the first code block from the given text."""
74
- pattern = r"```(.*?)```"
75
- matches = re.findall(pattern, text, re.DOTALL)
76
- return matches[0].strip() if matches else text.strip()
77
-
78
- @staticmethod
79
- def refiner(text: str) -> str:
80
- """Refines the input text by removing surrounding quotes."""
81
- return text.strip('"')
82
-
83
- def get_demo_api_key(self, cookie_path: str) -> str:
84
- """Retrieves the demo API key using the provided cookie."""
85
- try:
86
- with open(cookie_path, "r") as file:
87
- cookies = {item["name"]: item["value"] for item in json.load(file)}
88
- except FileNotFoundError:
89
- raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
90
- except json.JSONDecodeError:
91
- raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
92
-
93
- headers = {
94
- "Accept": "*/*",
95
- "Accept-Language": "en-US,en;q=0.9",
96
- "Content-Type": "application/json",
97
- "Origin": "https://inference.cerebras.ai",
98
- "Referer": "https://inference.cerebras.ai/",
99
- "user-agent": UserAgent().random,
100
- }
101
-
102
- json_data = {
103
- "operationName": "GetMyDemoApiKey",
104
- "variables": {},
105
- "query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
106
- }
107
-
108
- try:
109
- response = requests.post(
110
- "https://inference.cerebras.ai/api/graphql",
111
- cookies=cookies,
112
- headers=headers,
113
- json=json_data,
114
- timeout=self.timeout,
115
- )
116
- response.raise_for_status()
117
- api_key = response.json()["data"]["GetMyDemoApiKey"]
118
- return api_key
119
- except requests.exceptions.RequestException as e:
120
- raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
121
- except KeyError:
122
- raise exceptions.InvalidResponseError("API key not found in response.")
123
-
124
- def ask(
125
- self,
126
- prompt: str,
127
- stream: bool = False,
128
- raw: bool = False,
129
- optimizer: str = None,
130
- conversationally: bool = False,
131
- ) -> Union[Dict, Generator]:
132
- """Send a prompt to the model and get a response."""
133
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
134
- if optimizer:
135
- if optimizer in self.__available_optimizers:
136
- conversation_prompt = getattr(Optimizers, optimizer)(
137
- conversation_prompt if conversationally else prompt
138
- )
139
- else:
140
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
141
-
142
- messages = [
143
- {"content": self.system_prompt, "role": "system"},
144
- {"content": conversation_prompt, "role": "user"},
145
- ]
146
-
147
- try:
148
- if stream:
149
- return self._handle_stream_response(messages)
150
- return self._handle_normal_response(messages)
151
- except Exception as e:
152
- raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
153
-
154
- def _handle_stream_response(self, messages: List[Dict]) -> Generator:
155
- """Handle streaming response from the model."""
156
- try:
157
- response = self.client.chat.completions.create(
158
- messages=messages,
159
- model=self.model,
160
- stream=True
161
- )
162
-
163
- for choice in response.choices:
164
- if hasattr(choice, 'delta') and hasattr(choice.delta, 'content') and choice.delta.content:
165
- yield dict(text=choice.delta.content)
166
-
167
- # Update last response with the complete message
168
- if hasattr(response.choices[0], 'message'):
169
- self.last_response.update({"text": response.choices[0].message.content})
170
-
171
- except Exception as e:
172
- raise exceptions.FailedToGenerateResponseError(f"Error during streaming: {e}")
173
-
174
- def _handle_normal_response(self, messages: List[Dict]) -> Dict:
175
- """Handle normal (non-streaming) response from the model."""
176
- try:
177
- response = self.client.chat.completions.create(
178
- messages=messages,
179
- model=self.model
180
- )
181
- self.last_response.update({"text": response.choices[0].message.content})
182
- return self.last_response
183
- except Exception as e:
184
- raise exceptions.FailedToGenerateResponseError(f"Error during response: {e}")
185
-
186
- def chat(
187
- self,
188
- prompt: str,
189
- stream: bool = False,
190
- optimizer: str = None,
191
- conversationally: bool = False,
192
- ) -> Union[str, Generator]:
193
- """High-level method to chat with the model."""
194
- return self.get_message(
195
- self.ask(
196
- prompt, stream, optimizer=optimizer, conversationally=conversationally
197
- )
198
- )
199
-
200
- def get_message(self, response: dict) -> str:
201
- """Retrieves message from response."""
202
- assert isinstance(response, dict), "Response should be of dict data-type only"
203
- return response["text"]
204
-
205
-
206
- if __name__ == "__main__":
207
- from rich import print
208
-
209
- # Example usage
210
- cerebras = Cerebras(
211
- cookie_path='cookie.json',
212
- model='llama3.1-8b',
213
- system_prompt="You are a helpful AI assistant."
214
- )
215
-
216
- # Test with streaming
217
- response = cerebras.chat("What is the meaning of life?", stream=True)
218
- for chunk in response:
219
- print(chunk, end="", flush=True)
1
+ import re
2
+ import requests
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, Optional, Generator, List, Union
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+ from fake_useragent import UserAgent
10
+
11
+ class Cerebras(Provider):
12
+ """
13
+ A class to interact with the Cerebras API using a cookie for authentication.
14
+ """
15
+ def __init__(
16
+ self,
17
+ is_conversation: bool = True,
18
+ max_tokens: int = 2049,
19
+ timeout: int = 30,
20
+ intro: str = None,
21
+ filepath: str = None,
22
+ update_file: bool = True,
23
+ proxies: dict = {},
24
+ history_offset: int = 10250,
25
+ act: str = None,
26
+ cookie_path: str = "cookie.json",
27
+ model: str = "llama3.1-8b",
28
+ system_prompt: str = "You are a helpful assistant.",
29
+ ):
30
+ # Initialize basic settings first
31
+ self.timeout = timeout
32
+ self.model = model
33
+ self.system_prompt = system_prompt
34
+ self.is_conversation = is_conversation
35
+ self.max_tokens_to_sample = max_tokens
36
+ self.last_response = {}
37
+
38
+ # Get API key first
39
+ try:
40
+ self.api_key = self.get_demo_api_key(cookie_path)
41
+ except Exception as e:
42
+ raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
43
+
44
+ # Initialize optimizers
45
+ self.__available_optimizers = (
46
+ method
47
+ for method in dir(Optimizers)
48
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
49
+ )
50
+
51
+ # Initialize conversation settings
52
+ Conversation.intro = (
53
+ AwesomePrompts().get_act(
54
+ act, raise_not_found=True, default=None, case_insensitive=True
55
+ )
56
+ if act
57
+ else None
58
+ )
59
+ self.conversation = Conversation(
60
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
61
+ )
62
+ self.conversation.history_offset = history_offset
63
+
64
+ @staticmethod
65
+ def extract_query(text: str) -> str:
66
+ """Extracts the first code block from the given text."""
67
+ pattern = r"```(.*?)```"
68
+ matches = re.findall(pattern, text, re.DOTALL)
69
+ return matches[0].strip() if matches else text.strip()
70
+
71
+ @staticmethod
72
+ def refiner(text: str) -> str:
73
+ """Refines the input text by removing surrounding quotes."""
74
+ return text.strip('"')
75
+
76
+ def get_demo_api_key(self, cookie_path: str) -> str:
77
+ """Retrieves the demo API key using the provided cookie."""
78
+ try:
79
+ with open(cookie_path, "r") as file:
80
+ cookies = {item["name"]: item["value"] for item in json.load(file)}
81
+ except FileNotFoundError:
82
+ raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
83
+ except json.JSONDecodeError:
84
+ raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
85
+
86
+ headers = {
87
+ "Accept": "*/*",
88
+ "Accept-Language": "en-US,en;q=0.9",
89
+ "Content-Type": "application/json",
90
+ "Origin": "https://inference.cerebras.ai",
91
+ "Referer": "https://inference.cerebras.ai/",
92
+ "user-agent": UserAgent().random,
93
+ }
94
+
95
+ json_data = {
96
+ "operationName": "GetMyDemoApiKey",
97
+ "variables": {},
98
+ "query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
99
+ }
100
+
101
+ try:
102
+ response = requests.post(
103
+ "https://inference.cerebras.ai/api/graphql",
104
+ cookies=cookies,
105
+ headers=headers,
106
+ json=json_data,
107
+ timeout=self.timeout,
108
+ )
109
+ response.raise_for_status()
110
+ api_key = response.json()["data"]["GetMyDemoApiKey"]
111
+ return api_key
112
+ except requests.exceptions.RequestException as e:
113
+ raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
114
+ except KeyError:
115
+ raise exceptions.InvalidResponseError("API key not found in response.")
116
+
117
+ def _make_request(self, messages: List[Dict], stream: bool = False) -> Union[Dict, Generator]:
118
+ """Make a request to the Cerebras API."""
119
+ headers = {
120
+ "Authorization": f"Bearer {self.api_key}",
121
+ "Content-Type": "application/json",
122
+ "User-Agent": UserAgent().random
123
+ }
124
+
125
+ payload = {
126
+ "model": self.model,
127
+ "messages": messages,
128
+ "stream": stream
129
+ }
130
+
131
+ try:
132
+ response = requests.post(
133
+ "https://api.cerebras.ai/v1/chat/completions",
134
+ headers=headers,
135
+ json=payload,
136
+ stream=stream,
137
+ timeout=self.timeout
138
+ )
139
+ response.raise_for_status()
140
+
141
+ if stream:
142
+ def generate_stream():
143
+ for line in response.iter_lines():
144
+ if line:
145
+ line = line.decode('utf-8')
146
+ if line.startswith('data:'):
147
+ try:
148
+ data = json.loads(line[6:])
149
+ if data.get('choices') and data['choices'][0].get('delta', {}).get('content'):
150
+ content = data['choices'][0]['delta']['content']
151
+ yield content
152
+ except json.JSONDecodeError:
153
+ continue
154
+
155
+ return generate_stream()
156
+ else:
157
+ response_json = response.json()
158
+ return response_json['choices'][0]['message']['content']
159
+
160
+ except requests.exceptions.RequestException as e:
161
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
162
+
163
+ def ask(
164
+ self,
165
+ prompt: str,
166
+ stream: bool = False,
167
+ optimizer: str = None,
168
+ conversationally: bool = False,
169
+ ) -> Union[Dict, Generator]:
170
+ """Send a prompt to the model and get a response."""
171
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
172
+ if optimizer:
173
+ if optimizer in self.__available_optimizers:
174
+ conversation_prompt = getattr(Optimizers, optimizer)(
175
+ conversation_prompt if conversationally else prompt
176
+ )
177
+ else:
178
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
179
+
180
+ messages = [
181
+ {"role": "system", "content": self.system_prompt},
182
+ {"role": "user", "content": conversation_prompt}
183
+ ]
184
+
185
+ try:
186
+ response = self._make_request(messages, stream)
187
+ if stream:
188
+ return response
189
+
190
+ self.last_response = response
191
+ return response
192
+
193
+ except Exception as e:
194
+ raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
195
+
196
+ def chat(
197
+ self,
198
+ prompt: str,
199
+ stream: bool = False,
200
+ optimizer: str = None,
201
+ conversationally: bool = False,
202
+ ) -> Union[str, Generator]:
203
+ """Chat with the model."""
204
+ response = self.ask(prompt, stream, optimizer, conversationally)
205
+ if stream:
206
+ return response
207
+ return response
208
+
209
+ def get_message(self, response: str) -> str:
210
+ """Retrieves message from response."""
211
+ return response
212
+
213
+
214
+ if __name__ == "__main__":
215
+ from rich import print
216
+
217
+ # Example usage
218
+ cerebras = Cerebras(
219
+ cookie_path=r'C:\Users\koula\OneDrive\Desktop\Webscout\cookie.json',
220
+ model='llama3.1-8b',
221
+ system_prompt="You are a helpful AI assistant."
222
+ )
223
+
224
+ # Test with streaming
225
+ response = cerebras.chat("Hello!", stream=True)
226
+ for chunk in response:
227
+ print(chunk, end="", flush=True)
@@ -2,7 +2,6 @@ import requests
2
2
  import json
3
3
  import re
4
4
  from typing import Any, Dict, Optional, Generator
5
-
6
5
  from webscout.AIutel import Optimizers
7
6
  from webscout.AIutel import Conversation
8
7
  from webscout.AIutel import AwesomePrompts