webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (144) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +3 -3
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +3 -4
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +3 -3
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +3 -3
  45. webscout/Provider/Groq.py +5 -1
  46. webscout/Provider/Jadve.py +3 -3
  47. webscout/Provider/Marcus.py +191 -192
  48. webscout/Provider/Netwrck.py +3 -3
  49. webscout/Provider/PI.py +2 -2
  50. webscout/Provider/PizzaGPT.py +2 -3
  51. webscout/Provider/QwenLM.py +311 -0
  52. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  53. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  54. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  55. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  56. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  57. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  58. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  59. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  60. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  61. webscout/Provider/TTI/artbit/__init__.py +22 -22
  62. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  63. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  64. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  65. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  66. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  67. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  68. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  69. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  70. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  71. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  72. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  73. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  74. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  75. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  76. webscout/Provider/TTI/talkai/__init__.py +4 -4
  77. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  78. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  79. webscout/Provider/TTS/deepgram.py +182 -182
  80. webscout/Provider/TTS/elevenlabs.py +136 -136
  81. webscout/Provider/TTS/gesserit.py +150 -150
  82. webscout/Provider/TTS/murfai.py +138 -138
  83. webscout/Provider/TTS/parler.py +133 -134
  84. webscout/Provider/TTS/streamElements.py +360 -360
  85. webscout/Provider/TTS/utils.py +280 -280
  86. webscout/Provider/TTS/voicepod.py +116 -116
  87. webscout/Provider/TextPollinationsAI.py +2 -3
  88. webscout/Provider/WiseCat.py +193 -0
  89. webscout/Provider/__init__.py +144 -134
  90. webscout/Provider/cerebras.py +242 -227
  91. webscout/Provider/chatglm.py +204 -204
  92. webscout/Provider/dgaf.py +2 -3
  93. webscout/Provider/gaurish.py +2 -3
  94. webscout/Provider/geminiapi.py +208 -208
  95. webscout/Provider/granite.py +223 -0
  96. webscout/Provider/hermes.py +218 -218
  97. webscout/Provider/llama3mitril.py +179 -179
  98. webscout/Provider/llamatutor.py +3 -3
  99. webscout/Provider/llmchat.py +2 -3
  100. webscout/Provider/meta.py +794 -794
  101. webscout/Provider/multichat.py +331 -331
  102. webscout/Provider/typegpt.py +359 -359
  103. webscout/Provider/yep.py +2 -2
  104. webscout/__main__.py +5 -5
  105. webscout/cli.py +319 -319
  106. webscout/conversation.py +241 -242
  107. webscout/exceptions.py +328 -328
  108. webscout/litagent/__init__.py +28 -28
  109. webscout/litagent/agent.py +2 -3
  110. webscout/litprinter/__init__.py +0 -58
  111. webscout/scout/__init__.py +8 -8
  112. webscout/scout/core.py +884 -884
  113. webscout/scout/element.py +459 -459
  114. webscout/scout/parsers/__init__.py +69 -69
  115. webscout/scout/parsers/html5lib_parser.py +172 -172
  116. webscout/scout/parsers/html_parser.py +236 -236
  117. webscout/scout/parsers/lxml_parser.py +178 -178
  118. webscout/scout/utils.py +38 -38
  119. webscout/swiftcli/__init__.py +811 -811
  120. webscout/update_checker.py +2 -12
  121. webscout/version.py +1 -1
  122. webscout/webscout_search.py +5 -4
  123. webscout/zeroart/__init__.py +54 -54
  124. webscout/zeroart/base.py +60 -60
  125. webscout/zeroart/effects.py +99 -99
  126. webscout/zeroart/fonts.py +816 -816
  127. {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
  128. webscout-7.2.dist-info/RECORD +217 -0
  129. webstoken/__init__.py +30 -30
  130. webstoken/classifier.py +189 -189
  131. webstoken/keywords.py +216 -216
  132. webstoken/language.py +128 -128
  133. webstoken/ner.py +164 -164
  134. webstoken/normalizer.py +35 -35
  135. webstoken/processor.py +77 -77
  136. webstoken/sentiment.py +206 -206
  137. webstoken/stemmer.py +73 -73
  138. webstoken/tagger.py +60 -60
  139. webstoken/tokenizer.py +158 -158
  140. webscout-7.1.dist-info/RECORD +0 -198
  141. {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  142. {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  143. {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  144. {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,227 +1,242 @@
1
- import re
2
- import requests
3
- import json
4
- import os
5
- from typing import Any, Dict, Optional, Generator, List, Union
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- from webscout import LitAgent as UserAgent
10
-
11
- class Cerebras(Provider):
12
- """
13
- A class to interact with the Cerebras API using a cookie for authentication.
14
- """
15
- def __init__(
16
- self,
17
- is_conversation: bool = True,
18
- max_tokens: int = 2049,
19
- timeout: int = 30,
20
- intro: str = None,
21
- filepath: str = None,
22
- update_file: bool = True,
23
- proxies: dict = {},
24
- history_offset: int = 10250,
25
- act: str = None,
26
- cookie_path: str = "cookie.json",
27
- model: str = "llama3.1-8b",
28
- system_prompt: str = "You are a helpful assistant.",
29
- ):
30
- # Initialize basic settings first
31
- self.timeout = timeout
32
- self.model = model
33
- self.system_prompt = system_prompt
34
- self.is_conversation = is_conversation
35
- self.max_tokens_to_sample = max_tokens
36
- self.last_response = {}
37
-
38
- # Get API key first
39
- try:
40
- self.api_key = self.get_demo_api_key(cookie_path)
41
- except Exception as e:
42
- raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
43
-
44
- # Initialize optimizers
45
- self.__available_optimizers = (
46
- method
47
- for method in dir(Optimizers)
48
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
49
- )
50
-
51
- # Initialize conversation settings
52
- Conversation.intro = (
53
- AwesomePrompts().get_act(
54
- act, raise_not_found=True, default=None, case_insensitive=True
55
- )
56
- if act
57
- else None
58
- )
59
- self.conversation = Conversation(
60
- is_conversation, self.max_tokens_to_sample, filepath, update_file
61
- )
62
- self.conversation.history_offset = history_offset
63
-
64
- @staticmethod
65
- def extract_query(text: str) -> str:
66
- """Extracts the first code block from the given text."""
67
- pattern = r"```(.*?)```"
68
- matches = re.findall(pattern, text, re.DOTALL)
69
- return matches[0].strip() if matches else text.strip()
70
-
71
- @staticmethod
72
- def refiner(text: str) -> str:
73
- """Refines the input text by removing surrounding quotes."""
74
- return text.strip('"')
75
-
76
- def get_demo_api_key(self, cookie_path: str) -> str:
77
- """Retrieves the demo API key using the provided cookie."""
78
- try:
79
- with open(cookie_path, "r") as file:
80
- cookies = {item["name"]: item["value"] for item in json.load(file)}
81
- except FileNotFoundError:
82
- raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
83
- except json.JSONDecodeError:
84
- raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
85
-
86
- headers = {
87
- "Accept": "*/*",
88
- "Accept-Language": "en-US,en;q=0.9",
89
- "Content-Type": "application/json",
90
- "Origin": "https://inference.cerebras.ai",
91
- "Referer": "https://inference.cerebras.ai/",
92
- "user-agent": UserAgent().random(),
93
- }
94
-
95
- json_data = {
96
- "operationName": "GetMyDemoApiKey",
97
- "variables": {},
98
- "query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
99
- }
100
-
101
- try:
102
- response = requests.post(
103
- "https://inference.cerebras.ai/api/graphql",
104
- cookies=cookies,
105
- headers=headers,
106
- json=json_data,
107
- timeout=self.timeout,
108
- )
109
- response.raise_for_status()
110
- api_key = response.json()["data"]["GetMyDemoApiKey"]
111
- return api_key
112
- except requests.exceptions.RequestException as e:
113
- raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
114
- except KeyError:
115
- raise exceptions.InvalidResponseError("API key not found in response.")
116
-
117
- def _make_request(self, messages: List[Dict], stream: bool = False) -> Union[Dict, Generator]:
118
- """Make a request to the Cerebras API."""
119
- headers = {
120
- "Authorization": f"Bearer {self.api_key}",
121
- "Content-Type": "application/json",
122
- "User-Agent": UserAgent().random(),
123
- }
124
-
125
- payload = {
126
- "model": self.model,
127
- "messages": messages,
128
- "stream": stream
129
- }
130
-
131
- try:
132
- response = requests.post(
133
- "https://api.cerebras.ai/v1/chat/completions",
134
- headers=headers,
135
- json=payload,
136
- stream=stream,
137
- timeout=self.timeout
138
- )
139
- response.raise_for_status()
140
-
141
- if stream:
142
- def generate_stream():
143
- for line in response.iter_lines():
144
- if line:
145
- line = line.decode('utf-8')
146
- if line.startswith('data:'):
147
- try:
148
- data = json.loads(line[6:])
149
- if data.get('choices') and data['choices'][0].get('delta', {}).get('content'):
150
- content = data['choices'][0]['delta']['content']
151
- yield content
152
- except json.JSONDecodeError:
153
- continue
154
-
155
- return generate_stream()
156
- else:
157
- response_json = response.json()
158
- return response_json['choices'][0]['message']['content']
159
-
160
- except requests.exceptions.RequestException as e:
161
- raise exceptions.APIConnectionError(f"Request failed: {e}")
162
-
163
- def ask(
164
- self,
165
- prompt: str,
166
- stream: bool = False,
167
- optimizer: str = None,
168
- conversationally: bool = False,
169
- ) -> Union[Dict, Generator]:
170
- """Send a prompt to the model and get a response."""
171
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
172
- if optimizer:
173
- if optimizer in self.__available_optimizers:
174
- conversation_prompt = getattr(Optimizers, optimizer)(
175
- conversation_prompt if conversationally else prompt
176
- )
177
- else:
178
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
179
-
180
- messages = [
181
- {"role": "system", "content": self.system_prompt},
182
- {"role": "user", "content": conversation_prompt}
183
- ]
184
-
185
- try:
186
- response = self._make_request(messages, stream)
187
- if stream:
188
- return response
189
-
190
- self.last_response = response
191
- return response
192
-
193
- except Exception as e:
194
- raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
195
-
196
- def chat(
197
- self,
198
- prompt: str,
199
- stream: bool = False,
200
- optimizer: str = None,
201
- conversationally: bool = False,
202
- ) -> Union[str, Generator]:
203
- """Chat with the model."""
204
- response = self.ask(prompt, stream, optimizer, conversationally)
205
- if stream:
206
- return response
207
- return response
208
-
209
- def get_message(self, response: str) -> str:
210
- """Retrieves message from response."""
211
- return response
212
-
213
-
214
- if __name__ == "__main__":
215
- from rich import print
216
-
217
- # Example usage
218
- cerebras = Cerebras(
219
- cookie_path=r'C:\Users\koula\OneDrive\Desktop\Webscout\cookie.json',
220
- model='llama3.1-8b',
221
- system_prompt="You are a helpful AI assistant."
222
- )
223
-
224
- # Test with streaming
225
- response = cerebras.chat("Hello!", stream=True)
226
- for chunk in response:
227
- print(chunk, end="", flush=True)
1
+
2
+ import re
3
+ import requests
4
+ import json
5
+ import os
6
+ from typing import Any, Dict, Optional, Generator, List, Union
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout import LitAgent as UserAgent
11
+
12
+ class Cerebras(Provider):
13
+ """
14
+ A class to interact with the Cerebras API using a cookie for authentication.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ "llama3.1-8b",
19
+ "llama-3.3-70b",
20
+ "deepseek-r1-distill-llama-70b"
21
+ ]
22
+
23
+ def __init__(
24
+ self,
25
+ is_conversation: bool = True,
26
+ max_tokens: int = 2049,
27
+ timeout: int = 30,
28
+ intro: str = None,
29
+ filepath: str = None,
30
+ update_file: bool = True,
31
+ proxies: dict = {},
32
+ history_offset: int = 10250,
33
+ act: str = None,
34
+ cookie_path: str = "cookie.json",
35
+ model: str = "llama3.1-8b",
36
+ system_prompt: str = "You are a helpful assistant.",
37
+ ):
38
+ # Validate model choice
39
+ if model not in self.AVAILABLE_MODELS:
40
+ raise ValueError(
41
+ f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
42
+ )
43
+
44
+ # Initialize basic settings first
45
+ self.timeout = timeout
46
+ self.model = model
47
+ self.system_prompt = system_prompt
48
+ self.is_conversation = is_conversation
49
+ self.max_tokens_to_sample = max_tokens
50
+ self.last_response = {}
51
+
52
+ # Get API key first
53
+ try:
54
+ self.api_key = self.get_demo_api_key(cookie_path)
55
+ except Exception as e:
56
+ raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
57
+
58
+ # Initialize optimizers
59
+ self.__available_optimizers = (
60
+ method
61
+ for method in dir(Optimizers)
62
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
63
+ )
64
+
65
+ # Initialize conversation settings
66
+ Conversation.intro = (
67
+ AwesomePrompts().get_act(
68
+ act, raise_not_found=True, default=None, case_insensitive=True
69
+ )
70
+ if act
71
+ else None
72
+ )
73
+ self.conversation = Conversation(
74
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
75
+ )
76
+ self.conversation.history_offset = history_offset
77
+
78
+ # Rest of the class implementation remains the same...
79
+ @staticmethod
80
+ def extract_query(text: str) -> str:
81
+ """Extracts the first code block from the given text."""
82
+ pattern = r"```(.*?)```"
83
+ matches = re.findall(pattern, text, re.DOTALL)
84
+ return matches[0].strip() if matches else text.strip()
85
+
86
+ @staticmethod
87
+ def refiner(text: str) -> str:
88
+ """Refines the input text by removing surrounding quotes."""
89
+ return text.strip('"')
90
+
91
+ def get_demo_api_key(self, cookie_path: str) -> str:
92
+ """Retrieves the demo API key using the provided cookie."""
93
+ try:
94
+ with open(cookie_path, "r") as file:
95
+ cookies = {item["name"]: item["value"] for item in json.load(file)}
96
+ except FileNotFoundError:
97
+ raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
98
+ except json.JSONDecodeError:
99
+ raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
100
+
101
+ headers = {
102
+ "Accept": "*/*",
103
+ "Accept-Language": "en-US,en;q=0.9",
104
+ "Content-Type": "application/json",
105
+ "Origin": "https://inference.cerebras.ai",
106
+ "Referer": "https://inference.cerebras.ai/",
107
+ "user-agent": UserAgent().random(),
108
+ }
109
+
110
+ json_data = {
111
+ "operationName": "GetMyDemoApiKey",
112
+ "variables": {},
113
+ "query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
114
+ }
115
+
116
+ try:
117
+ response = requests.post(
118
+ "https://inference.cerebras.ai/api/graphql",
119
+ cookies=cookies,
120
+ headers=headers,
121
+ json=json_data,
122
+ timeout=self.timeout,
123
+ )
124
+ response.raise_for_status()
125
+ api_key = response.json()["data"]["GetMyDemoApiKey"]
126
+ return api_key
127
+ except requests.exceptions.RequestException as e:
128
+ raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
129
+ except KeyError:
130
+ raise exceptions.InvalidResponseError("API key not found in response.")
131
+
132
+ def _make_request(self, messages: List[Dict], stream: bool = False) -> Union[Dict, Generator]:
133
+ """Make a request to the Cerebras API."""
134
+ headers = {
135
+ "Authorization": f"Bearer {self.api_key}",
136
+ "Content-Type": "application/json",
137
+ "User-Agent": UserAgent().random(),
138
+ }
139
+
140
+ payload = {
141
+ "model": self.model,
142
+ "messages": messages,
143
+ "stream": stream
144
+ }
145
+
146
+ try:
147
+ response = requests.post(
148
+ "https://api.cerebras.ai/v1/chat/completions",
149
+ headers=headers,
150
+ json=payload,
151
+ stream=stream,
152
+ timeout=self.timeout
153
+ )
154
+ response.raise_for_status()
155
+
156
+ if stream:
157
+ def generate_stream():
158
+ for line in response.iter_lines():
159
+ if line:
160
+ line = line.decode('utf-8')
161
+ if line.startswith('data:'):
162
+ try:
163
+ data = json.loads(line[6:])
164
+ if data.get('choices') and data['choices'][0].get('delta', {}).get('content'):
165
+ content = data['choices'][0]['delta']['content']
166
+ yield content
167
+ except json.JSONDecodeError:
168
+ continue
169
+
170
+ return generate_stream()
171
+ else:
172
+ response_json = response.json()
173
+ return response_json['choices'][0]['message']['content']
174
+
175
+ except requests.exceptions.RequestException as e:
176
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
177
+
178
+ def ask(
179
+ self,
180
+ prompt: str,
181
+ stream: bool = False,
182
+ optimizer: str = None,
183
+ conversationally: bool = False,
184
+ ) -> Union[Dict, Generator]:
185
+ """Send a prompt to the model and get a response."""
186
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
187
+ if optimizer:
188
+ if optimizer in self.__available_optimizers:
189
+ conversation_prompt = getattr(Optimizers, optimizer)(
190
+ conversation_prompt if conversationally else prompt
191
+ )
192
+ else:
193
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
194
+
195
+ messages = [
196
+ {"role": "system", "content": self.system_prompt},
197
+ {"role": "user", "content": conversation_prompt}
198
+ ]
199
+
200
+ try:
201
+ response = self._make_request(messages, stream)
202
+ if stream:
203
+ return response
204
+
205
+ self.last_response = response
206
+ return response
207
+
208
+ except Exception as e:
209
+ raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
210
+
211
+ def chat(
212
+ self,
213
+ prompt: str,
214
+ stream: bool = False,
215
+ optimizer: str = None,
216
+ conversationally: bool = False,
217
+ ) -> Union[str, Generator]:
218
+ """Chat with the model."""
219
+ response = self.ask(prompt, stream, optimizer, conversationally)
220
+ if stream:
221
+ return response
222
+ return response
223
+
224
+ def get_message(self, response: str) -> str:
225
+ """Retrieves message from response."""
226
+ return response
227
+
228
+
229
+ if __name__ == "__main__":
230
+ from rich import print
231
+
232
+ # Example usage
233
+ cerebras = Cerebras(
234
+ cookie_path='cookie.json',
235
+ model='llama3.1-8b',
236
+ system_prompt="You are a helpful AI assistant."
237
+ )
238
+
239
+ # Test with streaming
240
+ response = cerebras.chat("Hello!", stream=True)
241
+ for chunk in response:
242
+ print(chunk, end="", flush=True)