webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,244 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import secrets
5
- from typing import Any, Dict, Optional, Generator, Union
6
-
7
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
-
11
- class SCNet(Provider):
12
- """
13
- Provider for SCNet chatbot API.
14
- """
15
- AVAILABLE_MODELS = [
16
- {"modelId": 2, "name": "Deepseek-r1-7B"},
17
- {"modelId": 3, "name": "Deepseek-r1-32B"},
18
- {"modelId": 5, "name": "Deepseek-r1-70B"},
19
- {"modelId": 7, "name": "QWQ-32B"},
20
- {"modelId": 8, "name": "minimax-text-01-456B"},
21
- {"modelId": 9, "name": "Qwen3-30B-A3B"}, # Added new model
22
- # Add more models here as needed
23
- ]
24
- MODEL_NAME_TO_ID = {m["name"]: m["modelId"] for m in AVAILABLE_MODELS}
25
- MODEL_ID_TO_NAME = {m["modelId"]: m["name"] for m in AVAILABLE_MODELS}
26
-
27
- def __init__(
28
- self,
29
- model: str = "QWQ-32B",
30
- is_conversation: bool = True,
31
- max_tokens: int = 2048, # Note: max_tokens is not used by this API
32
- timeout: int = 30,
33
- intro: Optional[str] = ("You are a helpful, advanced LLM assistant. "
34
- "You must always answer in English, regardless of the user's language. "
35
- "If the user asks in another language, politely respond in English only. "
36
- "Be clear, concise, and helpful."),
37
- filepath: Optional[str] = None,
38
- update_file: bool = True,
39
- proxies: Optional[dict] = None,
40
- history_offset: int = 0, # Note: history_offset might not be fully effective due to API structure
41
- act: Optional[str] = None,
42
- system_prompt: str = (
43
- "You are a helpful, advanced LLM assistant. "
44
- "You must always answer in English, regardless of the user's language. "
45
- "If the user asks in another language, politely respond in English only. "
46
- "Be clear, concise, and helpful."
47
- ),
48
- ):
49
- if model not in self.MODEL_NAME_TO_ID:
50
- raise ValueError(f"Invalid model: {model}. Choose from: {list(self.MODEL_NAME_TO_ID.keys())}")
51
- self.model = model
52
- self.modelId = self.MODEL_NAME_TO_ID[model]
53
- self.system_prompt = system_prompt
54
- # Initialize curl_cffi Session
55
- self.session = Session()
56
- self.is_conversation = is_conversation
57
- self.max_tokens_to_sample = max_tokens
58
- self.timeout = timeout
59
- self.last_response: Dict[str, Any] = {}
60
- self.proxies = proxies or {}
61
- self.cookies = {
62
- "Token": secrets.token_hex(16), # Keep cookie generation logic
63
- }
64
- self.headers = {
65
- "accept": "text/event-stream",
66
- "content-type": "application/json",
67
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
68
- "referer": "https://www.scnet.cn/ui/chatbot/temp_1744712663464",
69
- "origin": "https://www.scnet.cn",
70
- # Add sec-ch-ua headers if needed for impersonation consistency
71
- }
72
- self.url = "https://www.scnet.cn/acx/chatbot/v1/chat/completion"
73
-
74
- # Update curl_cffi session headers, proxies, and cookies
75
- self.session.headers.update(self.headers)
76
- self.session.proxies = self.proxies # Assign proxies directly
77
- # Set cookies on the session object for curl_cffi
78
- for name, value in self.cookies.items():
79
- self.session.cookies.set(name, value)
80
-
81
- self.__available_optimizers = (
82
- method for method in dir(Optimizers)
83
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
- )
85
- Conversation.intro = (
86
- AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
87
- if act
88
- else intro or Conversation.intro
89
- )
90
- self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
91
- self.conversation.history_offset = history_offset
92
-
93
- @staticmethod
94
- def _scnet_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
95
- """Extracts content from SCNet stream JSON objects."""
96
- if isinstance(chunk, dict):
97
- return chunk.get("content")
98
- return None
99
-
100
- def ask(
101
- self,
102
- prompt: str,
103
- stream: bool = False,
104
- raw: bool = False,
105
- optimizer: Optional[str] = None,
106
- conversationally: bool = False,
107
- ) -> Union[Dict[str, Any], Generator]:
108
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
109
- if optimizer:
110
- if optimizer in self.__available_optimizers:
111
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
112
- else:
113
- raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {list(self.__available_optimizers)}")
114
-
115
- payload = {
116
- "conversationId": "",
117
- "content": f"SYSTEM: {self.system_prompt} USER: {conversation_prompt}",
118
- "thinking": 0,
119
- "online": 0,
120
- "modelId": self.modelId,
121
- "textFile": [],
122
- "imageFile": [],
123
- "clusterId": ""
124
- }
125
-
126
- def for_stream():
127
- try:
128
- # Use curl_cffi session post with impersonate
129
- # Cookies are now handled by the session object
130
- response = self.session.post(
131
- self.url,
132
- json=payload,
133
- stream=True,
134
- timeout=self.timeout,
135
- impersonate="chrome120" # Changed impersonation to chrome120
136
- )
137
- response.raise_for_status() # Check for HTTP errors
138
-
139
- streaming_text = ""
140
- # Use sanitize_stream
141
- processed_stream = sanitize_stream(
142
- data=response.iter_content(chunk_size=None), # Pass byte iterator
143
- intro_value="data:",
144
- to_json=True, # Stream sends JSON
145
- skip_markers=["[done]"],
146
- content_extractor=self._scnet_extractor, # Use the specific extractor
147
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
148
- )
149
-
150
- for content_chunk in processed_stream:
151
- # content_chunk is the string extracted by _scnet_extractor
152
- if content_chunk and isinstance(content_chunk, str):
153
- streaming_text += content_chunk
154
- yield {"text": content_chunk} if not raw else content_chunk
155
- # Update history and last response after stream finishes
156
- self.last_response = {"text": streaming_text}
157
- self.conversation.update_chat_history(prompt, streaming_text)
158
-
159
- except CurlError as e: # Catch CurlError
160
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
161
- except Exception as e: # Catch other potential exceptions (like HTTPError)
162
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
163
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
164
-
165
- def for_non_stream():
166
- # Aggregate the stream using the updated for_stream logic
167
- text = ""
168
- # Ensure raw=False so for_stream yields dicts
169
- for chunk_data in for_stream():
170
- if isinstance(chunk_data, dict) and "text" in chunk_data:
171
- text += chunk_data["text"]
172
- # Handle raw string case if raw=True was passed
173
- elif isinstance(chunk_data, str):
174
- text += chunk_data
175
- # last_response and history are updated within for_stream
176
- # Return the final aggregated response dict or raw string
177
- return text if raw else self.last_response
178
-
179
-
180
- return for_stream() if stream else for_non_stream()
181
-
182
- def chat(
183
- self,
184
- prompt: str,
185
- stream: bool = False,
186
- optimizer: Optional[str] = None,
187
- conversationally: bool = False,
188
- raw: bool = False, # Added raw parameter
189
- ) -> Union[str, Generator[str, None, None]]:
190
- def for_stream_chat():
191
- for response in self.ask(
192
- prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
193
- ):
194
- if raw:
195
- yield response
196
- else:
197
- yield self.get_message(response)
198
- def for_non_stream_chat():
199
- response_data = self.ask(
200
- prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
201
- )
202
- if raw:
203
- return response_data if isinstance(response_data, str) else self.get_message(response_data)
204
- else:
205
- return self.get_message(response_data)
206
- return for_stream_chat() if stream else for_non_stream_chat()
207
-
208
- def get_message(self, response: dict) -> str:
209
- assert isinstance(response, dict), "Response should be of dict data-type only"
210
- return response["text"]
211
-
212
- if __name__ == "__main__":
213
- # Ensure curl_cffi is installed
214
- print("-" * 80)
215
- print(f"{'ModelId':<10} {'Model':<30} {'Status':<10} {'Response'}")
216
- print("-" * 80)
217
- for model in SCNet.AVAILABLE_MODELS:
218
- try:
219
- test_ai = SCNet(model=model["name"], timeout=60)
220
- # Test stream first
221
- response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
222
- response_text = ""
223
- print(f"\r{model['modelId']:<10} {model['name']:<30} {'Streaming...':<10}", end="", flush=True)
224
- for chunk in response_stream:
225
- response_text += chunk
226
-
227
- if response_text and len(response_text.strip()) > 0:
228
- status = "✓"
229
- # Clean and truncate response
230
- clean_text = response_text.strip()
231
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
232
- else:
233
- status = "✗ (Stream)"
234
- display_text = "Empty or invalid stream response"
235
- print(f"\r{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
236
-
237
- # Optional: Add non-stream test if needed
238
- # print(f"\r{model['modelId']:<10} {model['name']:<30} {'Non-Stream...':<10}", end="", flush=True)
239
- # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
240
- # if not response_non_stream or len(response_non_stream.strip()) == 0:
241
- # print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗ (Non-Stream)':<10} Empty non-stream response")
242
-
243
- except Exception as e:
244
- print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
@@ -1,194 +0,0 @@
1
- import uuid
2
- import cloudscraper
3
- import json
4
- from typing import Union, Any, Dict, Optional, Generator
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
- from webscout.AIbase import Provider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class Talkai(Provider):
14
- """
15
- A class to interact with the Talkai.info API.
16
- """
17
-
18
- def __init__(
19
- self,
20
- is_conversation: bool = True,
21
- max_tokens: int = 2048,
22
- timeout: int = 30,
23
- intro: str = None,
24
- filepath: str = None,
25
- update_file: bool = True,
26
- proxies: dict = {},
27
- history_offset: int = 10250,
28
- act: str = None,
29
- model: str = "gpt-4o-mini", # Default model
30
- ):
31
- """
32
- Initializes the Talkai.info API with given parameters.
33
- """
34
- self.session = cloudscraper.create_scraper()
35
- self.is_conversation = is_conversation
36
- self.max_tokens_to_sample = max_tokens
37
- self.api_endpoint = "https://talkai.info/chat/send/"
38
- self.timeout = timeout
39
- self.last_response = {}
40
- self.model = model
41
- self.headers = {
42
- 'Accept': 'application/json, text/event-stream',
43
- 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
44
- 'Content-Type': 'application/json',
45
- 'Origin': 'https://talkai.info',
46
- 'Referer': 'https://talkai.info/chat/',
47
- 'User-Agent': LitAgent().random(),
48
- 'Cookie': '_csrf-front=e19e203a958c74e439261f6860535403324c9ab2ede76449e6407e54e1f366afa%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22QbnGY7XS5q9i3JnDvi6KRzrOk0D6XFnk%22%3B%7D; _ga=GA1.1.1383924142.1734246140; _ym_uid=1723397035198647017; _ym_d=1734246141; _ym_isad=1; _ym_visorc=b; talkai-front=ngbj23of1t0ujg2raoa3l57vqe; _ga_FB7V9WMN30=GS1.1.1734246139.1.1734246143.0.0.0'
49
- }
50
- self.__available_optimizers = (
51
- method
52
- for method in dir(Optimizers)
53
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
54
- )
55
- Conversation.intro = (
56
- AwesomePrompts().get_act(
57
- act, raise_not_found=True, default=None, case_insensitive=True
58
- )
59
- if act
60
- else intro or Conversation.intro
61
- )
62
- self.conversation = Conversation(
63
- is_conversation, self.max_tokens_to_sample, filepath, update_file
64
- )
65
- self.conversation.history_offset = history_offset
66
- self.session.proxies = proxies
67
-
68
- def ask(
69
- self,
70
- prompt: str,
71
- stream: bool = False,
72
- raw: bool = False,
73
- optimizer: str = None,
74
- conversationally: bool = False,
75
- ) -> Dict[str, Any]:
76
- """Chat with Talkai
77
-
78
- Args:
79
- prompt (str): Prompt to be sent.
80
- stream (bool, optional): Flag for streaming response. Defaults to False.
81
- raw (bool, optional): Stream back raw response as received. Defaults to False.
82
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
83
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
84
- Returns:
85
- dict: Response dictionary.
86
- """
87
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
- if optimizer:
89
- if optimizer in self.__available_optimizers:
90
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
91
- else:
92
- raise exceptions.FailedToGenerateResponseError(
93
- f"Optimizer is not one of {self.__available_optimizers}"
94
- )
95
-
96
- payload = {
97
- "type": "chat",
98
- "messagesHistory": [
99
- {
100
- "id": str(uuid.uuid4()),
101
- "from": "you",
102
- "content": conversation_prompt
103
- }
104
- ],
105
- "settings": {
106
- "model": self.model
107
- }
108
- }
109
-
110
- def for_stream():
111
- try:
112
- with self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
113
- response.raise_for_status()
114
-
115
- full_response = ""
116
- for line in response.iter_lines():
117
- if line:
118
- decoded_line = line.decode('utf-8')
119
- if 'event: trylimit' in decoded_line:
120
- break # Stop if trylimit event is encountered
121
- if decoded_line.startswith('data:'):
122
- data = decoded_line[6:] # Remove 'data: ' prefix
123
- full_response += data
124
- yield data if raw else dict(text=data)
125
-
126
- self.last_response.update(dict(text=full_response))
127
- self.conversation.update_chat_history(
128
- prompt, self.get_message(self.last_response)
129
- )
130
-
131
- except cloudscraper.exceptions as e:
132
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
133
-
134
- def for_non_stream():
135
- full_response = ""
136
- for line in for_stream():
137
- full_response += line['text'] if not raw else line
138
- return dict(text=full_response)
139
-
140
- return for_stream() if stream else for_non_stream()
141
-
142
-
143
- def chat(
144
- self,
145
- prompt: str,
146
- stream: bool = False,
147
- optimizer: str = None,
148
- conversationally: bool = False,
149
- ) -> Union[str, Generator[str, None, None]]:
150
- """Generate response `str`
151
- Args:
152
- prompt (str): Prompt to be send.
153
- stream (bool, optional): Flag for streaming response. Defaults to False.
154
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
155
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
156
- Returns:
157
- str: Response generated
158
- """
159
-
160
- def for_stream():
161
- for response in self.ask(
162
- prompt, True, optimizer=optimizer, conversationally=conversationally
163
- ):
164
- yield self.get_message(response)
165
-
166
- def for_non_stream():
167
- return self.get_message(
168
- self.ask(
169
- prompt,
170
- False,
171
- optimizer=optimizer,
172
- conversationally=conversationally,
173
- )
174
- )
175
-
176
- return for_stream() if stream else for_non_stream()
177
-
178
- def get_message(self, response: Dict[str, Any]) -> str:
179
- """Retrieves message only from response.
180
-
181
- Args:
182
- response (dict): Response generated by `self.ask`
183
-
184
- Returns:
185
- str: Message extracted.
186
- """
187
- assert isinstance(response, dict), "Response should be of dict data-type only"
188
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
189
-
190
- if __name__ == "__main__":
191
- t = Talkai()
192
- resp = t.chat("write me about AI", stream=True)
193
- for chunk in resp:
194
- print(chunk, end="", flush=True)
File without changes
File without changes
File without changes
File without changes