webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (79) hide show
  1. webscout/AIutel.py +53 -800
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/__init__.py +11 -10
  4. webscout/Provider/AISEARCH/felo_search.py +7 -3
  5. webscout/Provider/AISEARCH/scira_search.py +26 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  7. webscout/Provider/Deepinfra.py +81 -57
  8. webscout/Provider/ExaChat.py +9 -5
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/LambdaChat.py +39 -31
  14. webscout/Provider/Netwrck.py +5 -8
  15. webscout/Provider/OLLAMA.py +8 -9
  16. webscout/Provider/OPENAI/README.md +1 -1
  17. webscout/Provider/OPENAI/TogetherAI.py +57 -48
  18. webscout/Provider/OPENAI/TwoAI.py +94 -1
  19. webscout/Provider/OPENAI/__init__.py +1 -3
  20. webscout/Provider/OPENAI/autoproxy.py +1 -1
  21. webscout/Provider/OPENAI/copilot.py +73 -26
  22. webscout/Provider/OPENAI/deepinfra.py +60 -24
  23. webscout/Provider/OPENAI/exachat.py +9 -5
  24. webscout/Provider/OPENAI/monochat.py +3 -3
  25. webscout/Provider/OPENAI/netwrck.py +4 -7
  26. webscout/Provider/OPENAI/qodo.py +630 -0
  27. webscout/Provider/OPENAI/scirachat.py +86 -49
  28. webscout/Provider/OPENAI/textpollinations.py +19 -14
  29. webscout/Provider/OPENAI/venice.py +1 -0
  30. webscout/Provider/Perplexitylabs.py +163 -147
  31. webscout/Provider/Qodo.py +478 -0
  32. webscout/Provider/TTI/__init__.py +1 -0
  33. webscout/Provider/TTI/monochat.py +3 -3
  34. webscout/Provider/TTI/together.py +7 -6
  35. webscout/Provider/TTI/venice.py +368 -0
  36. webscout/Provider/TextPollinationsAI.py +19 -14
  37. webscout/Provider/TogetherAI.py +57 -44
  38. webscout/Provider/TwoAI.py +96 -2
  39. webscout/Provider/TypliAI.py +33 -27
  40. webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
  41. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  42. webscout/Provider/Venice.py +1 -0
  43. webscout/Provider/WiseCat.py +18 -20
  44. webscout/Provider/__init__.py +4 -10
  45. webscout/Provider/copilot.py +58 -61
  46. webscout/Provider/freeaichat.py +64 -55
  47. webscout/Provider/monochat.py +275 -0
  48. webscout/Provider/scira_chat.py +115 -21
  49. webscout/Provider/toolbaz.py +5 -10
  50. webscout/Provider/typefully.py +1 -11
  51. webscout/Provider/x0gpt.py +325 -315
  52. webscout/__init__.py +4 -11
  53. webscout/auth/__init__.py +19 -4
  54. webscout/auth/api_key_manager.py +189 -189
  55. webscout/auth/auth_system.py +25 -40
  56. webscout/auth/config.py +105 -6
  57. webscout/auth/database.py +377 -22
  58. webscout/auth/models.py +185 -130
  59. webscout/auth/request_processing.py +175 -11
  60. webscout/auth/routes.py +119 -5
  61. webscout/auth/server.py +9 -2
  62. webscout/auth/simple_logger.py +236 -0
  63. webscout/sanitize.py +1074 -0
  64. webscout/version.py +1 -1
  65. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
  66. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
  67. webscout/Provider/AI21.py +0 -177
  68. webscout/Provider/HuggingFaceChat.py +0 -469
  69. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  70. webscout/Provider/OPENAI/freeaichat.py +0 -363
  71. webscout/Provider/OPENAI/typegpt.py +0 -368
  72. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  73. webscout/Provider/WritingMate.py +0 -273
  74. webscout/Provider/typegpt.py +0 -284
  75. webscout/Provider/uncovr.py +0 -333
  76. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
  77. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
  78. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
  79. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
@@ -1,273 +0,0 @@
1
- import re
2
- import json
3
- from curl_cffi import CurlError
4
- from curl_cffi.requests import Session
5
- from typing import Union, Any, Dict, Generator, Optional
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- from webscout.litagent import LitAgent
10
-
11
- class WritingMate(Provider):
12
- AVAILABLE_MODELS = [
13
- "claude-3-haiku-20240307",
14
- "gemini-1.5-flash-latest",
15
- "llama3-8b-8192",
16
- "llama3-70b-8192",
17
- "google/gemini-flash-1.5-8b-exp",
18
- "gpt-4o-mini"
19
- ]
20
- """
21
- Provider for WritingMate streaming API.
22
- """
23
- api_endpoint = "https://chat.writingmate.ai/api/chat/tools-stream"
24
-
25
- def __init__(
26
- self,
27
- cookies_path: str = "cookies.json",
28
- is_conversation: bool = True,
29
- max_tokens: int = 4096,
30
- timeout: int = 60,
31
- intro: str = None,
32
- filepath: str = None,
33
- update_file: bool = True,
34
- proxies: dict = {}, # Added proxies parameter
35
- history_offset: int = 10250, # Added history_offset parameter
36
- act: str = None,
37
- system_prompt: str = "You are a friendly, helpful AI assistant.",
38
- model: str = "gpt-4o-mini"
39
- ):
40
- self.cookies_path = cookies_path
41
- # Load cookies into a dictionary for curl_cffi
42
- self.cookies = self._load_cookies_dict(cookies_path)
43
- # Initialize curl_cffi Session
44
- self.session = Session()
45
- self.timeout = timeout
46
- self.system_prompt = system_prompt
47
- self.model = model
48
- if self.model not in self.AVAILABLE_MODELS:
49
- raise ValueError(f"Unknown model: {self.model}. Choose from {self.AVAILABLE_MODELS}")
50
- self.last_response = {}
51
- self.agent = LitAgent() # Initialize LitAgent
52
- self.headers = {
53
- "Accept": "*/*",
54
- "Accept-Encoding": "gzip, deflate, br, zstd",
55
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
56
- # Content-Type might be application/json based on body, but API expects text/plain? Keep for now.
57
- "Content-Type": "text/plain;charset=UTF-8",
58
- "Origin": "https://chat.writingmate.ai",
59
- "Referer": "https://chat.writingmate.ai/chat",
60
- # Remove Cookie header, pass cookies via parameter
61
- # "Cookie": self.cookies,
62
- "DNT": "1",
63
- "sec-ch-ua": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
64
- "sec-ch-ua-mobile": "?0",
65
- "sec-ch-ua-platform": "\"Windows\"",
66
- "Sec-Fetch-Dest": "empty",
67
- "Sec-Fetch-Mode": "cors",
68
- "Sec-Fetch-Site": "same-origin",
69
- "Sec-GPC": "1",
70
- "User-Agent": self.agent.random() # Use LitAgent
71
- }
72
- # Update curl_cffi session headers and proxies
73
- self.session.headers.update(self.headers)
74
- self.session.proxies = proxies
75
-
76
- self.__available_optimizers = (
77
- m for m in dir(Optimizers)
78
- if callable(getattr(Optimizers, m)) and not m.startswith("__")
79
- )
80
- Conversation.intro = (
81
- AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
82
- if act else intro or Conversation.intro
83
- )
84
- self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
85
- # Apply history offset
86
- self.conversation.history_offset = history_offset
87
-
88
- # Keep original _load_cookies if needed elsewhere, or remove
89
- # def _load_cookies(self, path: str) -> str:
90
- # try:
91
- # with open(path, 'r') as f:
92
- # data = json.load(f)
93
- # return '; '.join(f"{c['name']}={c['value']}" for c in data)
94
- # except (FileNotFoundError, json.JSONDecodeError):
95
- # raise RuntimeError(f"Failed to load cookies from {path}")
96
-
97
- # New method to load cookies as a dictionary
98
- def _load_cookies_dict(self, path: str) -> Dict[str, str]:
99
- try:
100
- with open(path, 'r') as f:
101
- data = json.load(f)
102
- # Ensure data is a list of cookie objects
103
- if not isinstance(data, list):
104
- raise ValueError("Cookie file should contain a list of cookie objects.")
105
- return {c['name']: c['value'] for c in data if 'name' in c and 'value' in c}
106
- except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
107
- raise RuntimeError(f"Failed to load cookies from {path}: {e}")
108
-
109
- @staticmethod
110
- def _writingmate_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
111
- """Extracts content from the WritingMate stream format '0:"..."'."""
112
- if isinstance(chunk, str):
113
- # Regex to find the pattern 0:"<content>"
114
- match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
115
- if match:
116
- # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
117
- content = match.group(1).encode().decode('unicode_escape')
118
- return content.replace('\\\\', '\\').replace('\\"', '"')
119
- return None
120
-
121
- def ask(
122
- self,
123
- prompt: str,
124
- stream: bool = True, # Defaulting stream to True as per original
125
- raw: bool = False,
126
- optimizer: str = None,
127
- conversationally: bool = False
128
- ) -> Union[Dict[str,Any], Generator[Any,None,None]]:
129
- # ... existing prompt generation and optimizer logic ...
130
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
131
- if optimizer:
132
- if optimizer in self.__available_optimizers:
133
- conversation_prompt = getattr(Optimizers, optimizer)(
134
- conversation_prompt if conversationally else prompt
135
- )
136
- else:
137
- # Use the correct exception type
138
- raise exceptions.FailedToGenerateResponseError(f"Unknown optimizer: {optimizer}")
139
-
140
- # Body seems to be JSON, let curl_cffi handle serialization
141
- body = {
142
- "chatSettings": {
143
- "model": self.model,
144
- "prompt": self.system_prompt,
145
- "temperature": 0.5,
146
- "contextLength": 4096,
147
- "includeProfileContext": True,
148
- "includeWorkspaceInstructions": True,
149
- "embeddingsProvider": "openai"
150
- },
151
- "messages": [
152
- {"role": "system", "content": self.system_prompt},
153
- {"role": "user", "content": conversation_prompt}
154
- ],
155
- "selectedTools": []
156
- }
157
-
158
- def for_stream():
159
- try:
160
- # Use curl_cffi session post, pass cookies dict
161
- response = self.session.post(
162
- self.api_endpoint,
163
- headers=self.headers,
164
- cookies=self.cookies, # Pass cookies dict
165
- json=body, # Pass body as json
166
- stream=True,
167
- timeout=self.timeout,
168
- impersonate="chrome120" # Add impersonate
169
- # http_version=CurlHttpVersion.V1_1 # Add if HTTP/2 errors occur
170
- )
171
- if not response.ok:
172
- raise exceptions.FailedToGenerateResponseError(
173
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
174
- )
175
- streaming_text = ""
176
- # Use sanitize_stream with the custom extractor
177
- processed_stream = sanitize_stream(
178
- data=response.iter_content(chunk_size=None), # Pass byte iterator
179
- intro_value=None, # No simple prefix
180
- to_json=False, # Content is not JSON
181
- content_extractor=self._writingmate_extractor, # Use the specific extractor
182
- raw=raw
183
- )
184
-
185
- for content_chunk in processed_stream:
186
- # Always yield as string, even in raw mode
187
- if isinstance(content_chunk, bytes):
188
- content_chunk = content_chunk.decode('utf-8', errors='ignore')
189
- if raw:
190
- yield content_chunk
191
- else:
192
- if content_chunk and isinstance(content_chunk, str):
193
- streaming_text += content_chunk
194
- yield dict(text=content_chunk)
195
-
196
- self.last_response.update(dict(text=streaming_text))
197
- self.conversation.update_chat_history(
198
- prompt, self.get_message(self.last_response)
199
- )
200
- except CurlError as e: # Catch CurlError
201
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
202
- except Exception as e: # Catch other potential exceptions
203
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
204
-
205
- def for_non_stream():
206
- for _ in for_stream():
207
- pass
208
- return self.last_response
209
-
210
- effective_stream = stream if stream is not None else True
211
- return for_stream() if effective_stream else for_non_stream()
212
-
213
- def chat(
214
- self,
215
- prompt: str,
216
- stream: bool = False, # Default stream to False as per original chat method
217
- optimizer: str = None,
218
- conversationally: bool = False,
219
- raw: bool = False, # Added raw parameter
220
- ) -> Union[str, Generator[str,None,None]]:
221
- if stream:
222
- def text_stream():
223
- for response in self.ask(
224
- prompt, stream=True, raw=raw,
225
- optimizer=optimizer, conversationally=conversationally
226
- ):
227
- if raw:
228
- yield response
229
- else:
230
- yield self.get_message(response)
231
- return text_stream()
232
- else:
233
- response_data = self.ask(
234
- prompt,
235
- stream=False,
236
- raw=raw,
237
- optimizer=optimizer,
238
- conversationally=conversationally,
239
- )
240
- if raw:
241
- return response_data
242
- if isinstance(response_data, dict):
243
- return self.get_message(response_data)
244
- else:
245
- full_text = "".join(self.get_message(chunk) for chunk in response_data if isinstance(chunk, dict))
246
- return full_text
247
-
248
-
249
- def get_message(self, response: dict) -> str:
250
- assert isinstance(response, dict), "Response should be of dict data-type only"
251
- # Ensure text exists before processing
252
- # Formatting is now mostly handled by the extractor
253
- text = response.get("text", "")
254
- formatted_text = text # Keep newline replacement if needed: .replace('\\n', '\n')
255
- return formatted_text
256
-
257
- if __name__ == "__main__":
258
- from rich import print
259
- try:
260
- ai = WritingMate(cookies_path="cookies.json", proxies={}, timeout=120) # Example with proxies and timeout
261
- # Get input within the try block
262
- user_input = input(">>> ")
263
- response = ai.chat(user_input, stream=True)
264
- print("[bold green]Assistant:[/bold green]")
265
- for chunk in response:
266
- print(chunk, end="", flush=True)
267
- print() # Add a newline at the end
268
- except RuntimeError as e:
269
- print(f"[bold red]Error initializing WritingMate:[/bold red] {e}")
270
- except exceptions.FailedToGenerateResponseError as e:
271
- print(f"[bold red]Error during chat:[/bold red] {e}")
272
- except Exception as e:
273
- print(f"[bold red]An unexpected error occurred:[/bold red] {e}")
@@ -1,284 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- from typing import Union, Any, Dict, Generator
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
- from webscout.AIbase import Provider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class TypeGPT(Provider):
14
- """
15
- A class to interact with the TypeGPT.net API. Improved to match webscout standards.
16
- """
17
- AVAILABLE_MODELS = [
18
- # Working Models (based on testing)
19
- # "gpt-4o-mini-2024-07-18",
20
- "gpt-4o-mini",
21
- "chatgpt-4o-latest",
22
- "deepseek-r1",
23
- "deepseek-v3",
24
- "uncensored-r1",
25
- # "Image-Generator",
26
- ]
27
-
28
- def __init__(
29
- self,
30
- is_conversation: bool = True,
31
- max_tokens: int = 4000, # Set a reasonable default
32
- timeout: int = 30,
33
- intro: str = None,
34
- filepath: str = None,
35
- update_file: bool = True,
36
- proxies: dict = {},
37
- history_offset: int = 10250,
38
- act: str = None,
39
- model: str = "chatgpt-4o-latest",
40
- system_prompt: str = "You are a helpful assistant.",
41
- temperature: float = 0.5,
42
- presence_penalty: int = 0,
43
- frequency_penalty: int = 0,
44
- top_p: float = 1,
45
- ):
46
- """Initializes the TypeGPT API client."""
47
- if model not in self.AVAILABLE_MODELS:
48
- raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
49
-
50
- # Initialize curl_cffi Session
51
- self.session = Session()
52
- self.is_conversation = is_conversation
53
- self.max_tokens_to_sample = max_tokens
54
- self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
55
- self.timeout = timeout
56
- self.last_response = {}
57
- self.model = model
58
- self.system_prompt = system_prompt
59
- self.temperature = temperature
60
- self.presence_penalty = presence_penalty
61
- self.frequency_penalty = frequency_penalty
62
- self.top_p = top_p
63
- self.headers = {
64
- "authority": "chat.typegpt.net",
65
- "accept": "application/json, text/event-stream",
66
- "accept-language": "en-US,en;q=0.9",
67
- "content-type": "application/json",
68
- "origin": "https://chat.typegpt.net",
69
- "referer": "https://chat.typegpt.net/",
70
- "user-agent": LitAgent().random()
71
- }
72
-
73
- self.__available_optimizers = (
74
- method
75
- for method in dir(Optimizers)
76
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
77
- )
78
- Conversation.intro = (
79
- AwesomePrompts().get_act(
80
- act, raise_not_found=True, default=None, case_insensitive=True
81
- )
82
- if act
83
- else intro or Conversation.intro
84
- )
85
- self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
86
- self.conversation.history_offset = history_offset
87
- # Update curl_cffi session headers and proxies
88
- self.session.headers.update(self.headers)
89
- self.session.proxies = proxies
90
-
91
- def ask(
92
- self,
93
- prompt: str,
94
- stream: bool = False,
95
- raw: bool = False,
96
- optimizer: str = None,
97
- conversationally: bool = False,
98
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
99
- """Sends a prompt to the TypeGPT.net API and returns the response."""
100
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
- if optimizer:
102
- if optimizer in self.__available_optimizers:
103
- conversation_prompt = getattr(Optimizers, optimizer)(
104
- conversation_prompt if conversationally else prompt
105
- )
106
- else:
107
- raise exceptions.FailedToGenerateResponseError(
108
- f"Optimizer is not one of {self.__available_optimizers}"
109
- )
110
- payload = {
111
- "messages": [
112
- {"role": "system", "content": self.system_prompt},
113
- {"role": "user", "content": conversation_prompt}
114
- ],
115
- "stream": stream,
116
- "model": self.model,
117
- "temperature": self.temperature,
118
- "presence_penalty": self.presence_penalty,
119
- "frequency_penalty": self.frequency_penalty,
120
- "top_p": self.top_p,
121
- "max_tokens": self.max_tokens_to_sample,
122
- }
123
- def for_stream():
124
- try:
125
- response = self.session.post(
126
- self.api_endpoint,
127
- headers=self.headers,
128
- json=payload,
129
- stream=True,
130
- timeout=self.timeout,
131
- impersonate="chrome120"
132
- )
133
- except CurlError as ce:
134
- raise exceptions.FailedToGenerateResponseError(
135
- f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
136
- ) from ce
137
- response.raise_for_status() # Check for HTTP errors first
138
- streaming_text = ""
139
- processed_stream = sanitize_stream(
140
- data=response.iter_content(chunk_size=None), # Pass byte iterator
141
- intro_value="data:",
142
- to_json=True, # Stream sends JSON
143
- skip_markers=["[DONE]"],
144
- content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
145
- yield_raw_on_error=False,
146
- raw=raw
147
- )
148
- for content_chunk in processed_stream:
149
- if isinstance(content_chunk, bytes):
150
- content_chunk = content_chunk.decode('utf-8', errors='ignore')
151
- if content_chunk is None:
152
- continue
153
- if raw:
154
- yield content_chunk
155
- else:
156
- if content_chunk and isinstance(content_chunk, str):
157
- streaming_text += content_chunk
158
- yield dict(text=content_chunk)
159
- self.last_response = dict(text=streaming_text)
160
- if streaming_text:
161
- self.conversation.update_chat_history(prompt, streaming_text)
162
- def for_non_stream():
163
- try:
164
- response = self.session.post(
165
- self.api_endpoint,
166
- headers=self.headers,
167
- json=payload,
168
- timeout=self.timeout,
169
- impersonate="chrome120"
170
- )
171
- except CurlError as ce:
172
- raise exceptions.FailedToGenerateResponseError(
173
- f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
174
- ) from ce
175
- response.raise_for_status() # Check for HTTP errors
176
- try:
177
- response_text = response.text # Get raw text
178
- processed_stream = sanitize_stream(
179
- data=response_text,
180
- to_json=True, # Parse the whole text as JSON
181
- intro_value=None,
182
- content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
183
- yield_raw_on_error=False,
184
- raw=raw
185
- )
186
- content = ""
187
- for extracted_content in processed_stream:
188
- if isinstance(extracted_content, bytes):
189
- extracted_content = extracted_content.decode('utf-8', errors='ignore')
190
- if extracted_content is None:
191
- continue
192
- if raw:
193
- content += extracted_content
194
- else:
195
- content = extracted_content if isinstance(extracted_content, str) else ""
196
- self.last_response = {"text": content}
197
- self.conversation.update_chat_history(prompt, content)
198
- return self.last_response if not raw else content
199
- except (json.JSONDecodeError, Exception) as je:
200
- raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
201
- return for_stream() if stream else for_non_stream()
202
-
203
- def chat(
204
- self,
205
- prompt: str,
206
- stream: bool = False,
207
- optimizer: str = None,
208
- conversationally: bool = False,
209
- raw: bool = False, # Added raw parameter
210
- ) -> Union[str, Generator[str, None, None]]:
211
- if stream:
212
- gen = self.ask(
213
- prompt, stream=True, raw=raw, # Ensure ask yields dicts or raw
214
- optimizer=optimizer, conversationally=conversationally
215
- )
216
- for chunk in gen:
217
- if raw:
218
- yield chunk
219
- else:
220
- yield self.get_message(chunk)
221
- else:
222
- response = self.ask(
223
- prompt, stream=False, raw=raw,
224
- optimizer=optimizer, conversationally=conversationally
225
- )
226
- if raw:
227
- return response
228
- else:
229
- return self.get_message(response)
230
- def get_message(self, response: Dict[str, Any]) -> str:
231
- """Retrieves message from response."""
232
- if isinstance(response, dict):
233
- assert isinstance(response, dict), "Response should be of dict data-type only"
234
- # Handle potential unicode escapes in the final text
235
- text = response.get("text", "")
236
- try:
237
- # Attempt to decode escapes, return original if fails
238
- return text.encode('utf-8').decode('unicode_escape')
239
- except UnicodeDecodeError:
240
- return text
241
- else:
242
- # This case should ideally not be reached if ask() behaves as expected
243
- raise TypeError(f"Invalid response type: {type(response)}. Expected dict.")
244
-
245
- if __name__ == "__main__":
246
- print("-" * 80)
247
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
248
- print("-" * 80)
249
-
250
- # Test all available models
251
- working = 0
252
- total = len(TypeGPT.AVAILABLE_MODELS)
253
-
254
- for model in TypeGPT.AVAILABLE_MODELS:
255
- try:
256
- test_ai = TypeGPT(model=model, timeout=60)
257
- # Test stream first
258
- response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
259
- response_text = ""
260
- print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
261
- for chunk in response_stream:
262
- response_text += chunk
263
- # Optional: print chunks as they arrive for visual feedback
264
- # print(chunk, end="", flush=True)
265
-
266
- if response_text and len(response_text.strip()) > 0:
267
- status = "✓"
268
- # Clean and truncate response
269
- clean_text = response_text.strip() # Already decoded in get_message
270
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
271
- else:
272
- status = "✗ (Stream)"
273
- display_text = "Empty or invalid stream response"
274
- print(f"\r{model:<50} {status:<10} {display_text}")
275
-
276
- # Optional: Add non-stream test if needed, but stream test covers basic functionality
277
- # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
278
- # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
279
- # if not response_non_stream or len(response_non_stream.strip()) == 0:
280
- # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
281
-
282
-
283
- except Exception as e:
284
- print(f"\r{model:<50} {'✗':<10} {str(e)}")