webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/Provider/elmo.py
CHANGED
|
@@ -1,283 +1,283 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
from typing import Optional, Union, Any, Dict, Generator
|
|
5
|
-
from webscout import exceptions
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout.litagent import LitAgent
|
|
11
|
-
import re # Import re for the extractor
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class Elmo(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the Elmo.chat API.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
def __init__(
|
|
20
|
-
self,
|
|
21
|
-
is_conversation: bool = True,
|
|
22
|
-
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
23
|
-
timeout: int = 30,
|
|
24
|
-
intro: str = None,
|
|
25
|
-
filepath: str = None,
|
|
26
|
-
update_file: bool = True,
|
|
27
|
-
proxies: dict = {},
|
|
28
|
-
history_offset: int = 10250,
|
|
29
|
-
act: str = None,
|
|
30
|
-
system_prompt: str = "You are a helpful AI assistant. Provide clear, concise, and well-structured information. Organize your responses into paragraphs for better readability.",
|
|
31
|
-
) -> None:
|
|
32
|
-
"""Instantiates Elmo
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
36
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
37
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
38
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
39
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
40
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
41
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
42
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
43
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
44
|
-
system_prompt (str, optional): System prompt for Elmo. Defaults to the provided string.
|
|
45
|
-
"""
|
|
46
|
-
# Initialize curl_cffi Session
|
|
47
|
-
self.session = Session()
|
|
48
|
-
self.is_conversation = is_conversation
|
|
49
|
-
self.max_tokens_to_sample = max_tokens
|
|
50
|
-
self.api_endpoint = "https://www.elmo.chat/api/v1/prompt"
|
|
51
|
-
self.stream_chunk_size = 64
|
|
52
|
-
self.timeout = timeout
|
|
53
|
-
self.last_response = {}
|
|
54
|
-
self.system_prompt = system_prompt
|
|
55
|
-
self.headers = {
|
|
56
|
-
"accept": "*/*",
|
|
57
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
58
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
59
|
-
"dnt": "1",
|
|
60
|
-
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
61
|
-
"priority": "u=1, i",
|
|
62
|
-
"sec-fetch-dest": "empty",
|
|
63
|
-
"sec-fetch-mode": "cors",
|
|
64
|
-
"sec-fetch-site": "cross-site",
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
self.__available_optimizers = (
|
|
68
|
-
method
|
|
69
|
-
for method in dir(Optimizers)
|
|
70
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
71
|
-
)
|
|
72
|
-
# Update curl_cffi session headers and proxies
|
|
73
|
-
self.session.headers.update(self.headers)
|
|
74
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
75
|
-
|
|
76
|
-
Conversation.intro = (
|
|
77
|
-
AwesomePrompts().get_act(
|
|
78
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
-
)
|
|
80
|
-
if act
|
|
81
|
-
else intro or Conversation.intro
|
|
82
|
-
)
|
|
83
|
-
self.conversation = Conversation(
|
|
84
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
|
-
)
|
|
86
|
-
self.conversation.history_offset = history_offset
|
|
87
|
-
|
|
88
|
-
@staticmethod
|
|
89
|
-
def _elmo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
90
|
-
"""Extracts content from the Elmo stream format '0:"..."'."""
|
|
91
|
-
if isinstance(chunk, str):
|
|
92
|
-
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
93
|
-
if match:
|
|
94
|
-
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
95
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
96
|
-
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
97
|
-
return None
|
|
98
|
-
|
|
99
|
-
def ask(
|
|
100
|
-
self,
|
|
101
|
-
prompt: str,
|
|
102
|
-
stream: bool = False, # API supports streaming
|
|
103
|
-
raw: bool = False,
|
|
104
|
-
optimizer: str = None,
|
|
105
|
-
conversationally: bool = False,
|
|
106
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
|
|
107
|
-
"""Chat with AI
|
|
108
|
-
|
|
109
|
-
Args:
|
|
110
|
-
prompt (str): Prompt to be send.
|
|
111
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
112
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
113
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
114
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
115
|
-
Returns:
|
|
116
|
-
dict : {}
|
|
117
|
-
```json
|
|
118
|
-
{
|
|
119
|
-
"text" : "How may I assist you today?"
|
|
120
|
-
}
|
|
121
|
-
```json
|
|
122
|
-
"""
|
|
123
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
124
|
-
if optimizer:
|
|
125
|
-
if optimizer in self.__available_optimizers:
|
|
126
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
127
|
-
conversation_prompt if conversationally else prompt
|
|
128
|
-
)
|
|
129
|
-
else:
|
|
130
|
-
raise Exception(
|
|
131
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
payload = {
|
|
135
|
-
"metadata": {
|
|
136
|
-
"system": {"language": "en-US"},
|
|
137
|
-
"website": {
|
|
138
|
-
"url": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm/options.html",
|
|
139
|
-
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
140
|
-
"title": "Elmo Chat - Your AI Web Copilot",
|
|
141
|
-
"xpathIndexLength": 0,
|
|
142
|
-
"favicons": [],
|
|
143
|
-
"language": "en",
|
|
144
|
-
"content": "",
|
|
145
|
-
"type": "html",
|
|
146
|
-
"selection": "",
|
|
147
|
-
"hash": "d41d8cd98f00b204e9800998ecf8427e",
|
|
148
|
-
},
|
|
149
|
-
},
|
|
150
|
-
"regenerate": True,
|
|
151
|
-
"conversation": [
|
|
152
|
-
{"role": "system", "content": self.system_prompt},
|
|
153
|
-
{"role": "user", "content": conversation_prompt},
|
|
154
|
-
],
|
|
155
|
-
"enableCache": False,
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
def for_stream():
|
|
159
|
-
streaming_text = "" # Initialize outside try block
|
|
160
|
-
try:
|
|
161
|
-
# Use curl_cffi session post with impersonate
|
|
162
|
-
# Note: The API expects 'text/plain' but we send JSON.
|
|
163
|
-
# If this fails, try sending json.dumps(payload) as data with 'Content-Type': 'application/json'
|
|
164
|
-
response = self.session.post(
|
|
165
|
-
self.api_endpoint,
|
|
166
|
-
# headers are set on the session, but content-type might need override if sending JSON
|
|
167
|
-
json=payload, # Sending as JSON
|
|
168
|
-
stream=True,
|
|
169
|
-
timeout=self.timeout,
|
|
170
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
171
|
-
)
|
|
172
|
-
response.raise_for_status() # Check for HTTP errors
|
|
173
|
-
|
|
174
|
-
# Use sanitize_stream
|
|
175
|
-
processed_stream = sanitize_stream(
|
|
176
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
177
|
-
intro_value=None, # No simple prefix
|
|
178
|
-
to_json=False, # Content is text after extraction
|
|
179
|
-
content_extractor=self._elmo_extractor, # Use the specific extractor
|
|
180
|
-
yield_raw_on_error=True
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
for content_chunk in processed_stream:
|
|
184
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
185
|
-
streaming_text += content_chunk
|
|
186
|
-
resp = dict(text=content_chunk)
|
|
187
|
-
yield resp if not raw else content_chunk
|
|
188
|
-
|
|
189
|
-
except CurlError as e: # Catch CurlError
|
|
190
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
191
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
192
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
193
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
194
|
-
finally:
|
|
195
|
-
# Update history after stream finishes
|
|
196
|
-
self.last_response = dict(text=streaming_text)
|
|
197
|
-
self.conversation.update_chat_history(
|
|
198
|
-
prompt, streaming_text
|
|
199
|
-
)
|
|
200
|
-
|
|
201
|
-
def for_non_stream():
|
|
202
|
-
# Aggregate the stream using the updated for_stream logic
|
|
203
|
-
collected_text = ""
|
|
204
|
-
try:
|
|
205
|
-
# Ensure raw=False so for_stream yields dicts
|
|
206
|
-
for chunk_data in for_stream():
|
|
207
|
-
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
208
|
-
collected_text += chunk_data["text"]
|
|
209
|
-
# Handle raw string case if raw=True was passed
|
|
210
|
-
elif raw and isinstance(chunk_data, str):
|
|
211
|
-
collected_text += chunk_data
|
|
212
|
-
except Exception as e:
|
|
213
|
-
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
214
|
-
if not collected_text:
|
|
215
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
216
|
-
|
|
217
|
-
# Update last_response and history *after* aggregation for non-stream
|
|
218
|
-
self.last_response = {"text": collected_text}
|
|
219
|
-
self.conversation.update_chat_history(prompt, collected_text)
|
|
220
|
-
# Return the final aggregated response dict or raw string
|
|
221
|
-
return collected_text if raw else self.last_response
|
|
222
|
-
|
|
223
|
-
return for_stream() if stream else for_non_stream()
|
|
224
|
-
|
|
225
|
-
def chat(
|
|
226
|
-
self,
|
|
227
|
-
prompt: str,
|
|
228
|
-
stream: bool = False,
|
|
229
|
-
optimizer: str = None,
|
|
230
|
-
conversationally: bool = False,
|
|
231
|
-
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
232
|
-
"""Generate response `str`
|
|
233
|
-
Args:
|
|
234
|
-
prompt (str): Prompt to be send.
|
|
235
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
236
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
237
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
238
|
-
Returns:
|
|
239
|
-
str: Response generated
|
|
240
|
-
"""
|
|
241
|
-
|
|
242
|
-
def for_stream_chat(): # Renamed inner function
|
|
243
|
-
# ask() yields dicts or strings when streaming
|
|
244
|
-
gen = self.ask(
|
|
245
|
-
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
246
|
-
optimizer=optimizer, conversationally=conversationally
|
|
247
|
-
)
|
|
248
|
-
for response_dict in gen:
|
|
249
|
-
yield self.get_message(response_dict) # get_message expects dict
|
|
250
|
-
|
|
251
|
-
def for_non_stream_chat(): # Renamed inner function
|
|
252
|
-
# ask() returns dict or str when not streaming
|
|
253
|
-
response_data = self.ask(
|
|
254
|
-
prompt,
|
|
255
|
-
stream=False,
|
|
256
|
-
raw=False, # Ensure ask returns dict
|
|
257
|
-
optimizer=optimizer,
|
|
258
|
-
conversationally=conversationally,
|
|
259
|
-
)
|
|
260
|
-
return self.get_message(response_data) # get_message expects dict
|
|
261
|
-
|
|
262
|
-
return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
|
|
263
|
-
|
|
264
|
-
def get_message(self, response: dict) -> str:
|
|
265
|
-
"""Retrieves message only from response
|
|
266
|
-
|
|
267
|
-
Args:
|
|
268
|
-
response (dict): Response generated by `self.ask`
|
|
269
|
-
|
|
270
|
-
Returns:
|
|
271
|
-
str: Message extracted
|
|
272
|
-
"""
|
|
273
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
274
|
-
return response.get("text", "") # Use .get for safety
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
if __name__ == "__main__":
|
|
278
|
-
# Ensure curl_cffi is installed
|
|
279
|
-
from rich import print
|
|
280
|
-
ai = Elmo()
|
|
281
|
-
response = ai.chat("write a poem about AI", stream=True)
|
|
282
|
-
for chunk in response:
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Optional, Union, Any, Dict, Generator
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
import re # Import re for the extractor
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Elmo(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the Elmo.chat API.
|
|
17
|
+
"""
|
|
18
|
+
required_auth = False
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
system_prompt: str = "You are a helpful AI assistant. Provide clear, concise, and well-structured information. Organize your responses into paragraphs for better readability.",
|
|
31
|
+
) -> None:
|
|
32
|
+
"""Instantiates Elmo
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
36
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
37
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
38
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
39
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
40
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
41
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
42
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
43
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
44
|
+
system_prompt (str, optional): System prompt for Elmo. Defaults to the provided string.
|
|
45
|
+
"""
|
|
46
|
+
# Initialize curl_cffi Session
|
|
47
|
+
self.session = Session()
|
|
48
|
+
self.is_conversation = is_conversation
|
|
49
|
+
self.max_tokens_to_sample = max_tokens
|
|
50
|
+
self.api_endpoint = "https://www.elmo.chat/api/v1/prompt"
|
|
51
|
+
self.stream_chunk_size = 64
|
|
52
|
+
self.timeout = timeout
|
|
53
|
+
self.last_response = {}
|
|
54
|
+
self.system_prompt = system_prompt
|
|
55
|
+
self.headers = {
|
|
56
|
+
"accept": "*/*",
|
|
57
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
58
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
59
|
+
"dnt": "1",
|
|
60
|
+
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
61
|
+
"priority": "u=1, i",
|
|
62
|
+
"sec-fetch-dest": "empty",
|
|
63
|
+
"sec-fetch-mode": "cors",
|
|
64
|
+
"sec-fetch-site": "cross-site",
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
self.__available_optimizers = (
|
|
68
|
+
method
|
|
69
|
+
for method in dir(Optimizers)
|
|
70
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
71
|
+
)
|
|
72
|
+
# Update curl_cffi session headers and proxies
|
|
73
|
+
self.session.headers.update(self.headers)
|
|
74
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
75
|
+
|
|
76
|
+
Conversation.intro = (
|
|
77
|
+
AwesomePrompts().get_act(
|
|
78
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
+
)
|
|
80
|
+
if act
|
|
81
|
+
else intro or Conversation.intro
|
|
82
|
+
)
|
|
83
|
+
self.conversation = Conversation(
|
|
84
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
|
+
)
|
|
86
|
+
self.conversation.history_offset = history_offset
|
|
87
|
+
|
|
88
|
+
@staticmethod
|
|
89
|
+
def _elmo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
90
|
+
"""Extracts content from the Elmo stream format '0:"..."'."""
|
|
91
|
+
if isinstance(chunk, str):
|
|
92
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
93
|
+
if match:
|
|
94
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
95
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
96
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
97
|
+
return None
|
|
98
|
+
|
|
99
|
+
def ask(
|
|
100
|
+
self,
|
|
101
|
+
prompt: str,
|
|
102
|
+
stream: bool = False, # API supports streaming
|
|
103
|
+
raw: bool = False,
|
|
104
|
+
optimizer: str = None,
|
|
105
|
+
conversationally: bool = False,
|
|
106
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
|
|
107
|
+
"""Chat with AI
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
prompt (str): Prompt to be send.
|
|
111
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
112
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
113
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
114
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
115
|
+
Returns:
|
|
116
|
+
dict : {}
|
|
117
|
+
```json
|
|
118
|
+
{
|
|
119
|
+
"text" : "How may I assist you today?"
|
|
120
|
+
}
|
|
121
|
+
```json
|
|
122
|
+
"""
|
|
123
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
124
|
+
if optimizer:
|
|
125
|
+
if optimizer in self.__available_optimizers:
|
|
126
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
127
|
+
conversation_prompt if conversationally else prompt
|
|
128
|
+
)
|
|
129
|
+
else:
|
|
130
|
+
raise Exception(
|
|
131
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
payload = {
|
|
135
|
+
"metadata": {
|
|
136
|
+
"system": {"language": "en-US"},
|
|
137
|
+
"website": {
|
|
138
|
+
"url": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm/options.html",
|
|
139
|
+
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
140
|
+
"title": "Elmo Chat - Your AI Web Copilot",
|
|
141
|
+
"xpathIndexLength": 0,
|
|
142
|
+
"favicons": [],
|
|
143
|
+
"language": "en",
|
|
144
|
+
"content": "",
|
|
145
|
+
"type": "html",
|
|
146
|
+
"selection": "",
|
|
147
|
+
"hash": "d41d8cd98f00b204e9800998ecf8427e",
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
"regenerate": True,
|
|
151
|
+
"conversation": [
|
|
152
|
+
{"role": "system", "content": self.system_prompt},
|
|
153
|
+
{"role": "user", "content": conversation_prompt},
|
|
154
|
+
],
|
|
155
|
+
"enableCache": False,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
def for_stream():
|
|
159
|
+
streaming_text = "" # Initialize outside try block
|
|
160
|
+
try:
|
|
161
|
+
# Use curl_cffi session post with impersonate
|
|
162
|
+
# Note: The API expects 'text/plain' but we send JSON.
|
|
163
|
+
# If this fails, try sending json.dumps(payload) as data with 'Content-Type': 'application/json'
|
|
164
|
+
response = self.session.post(
|
|
165
|
+
self.api_endpoint,
|
|
166
|
+
# headers are set on the session, but content-type might need override if sending JSON
|
|
167
|
+
json=payload, # Sending as JSON
|
|
168
|
+
stream=True,
|
|
169
|
+
timeout=self.timeout,
|
|
170
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
171
|
+
)
|
|
172
|
+
response.raise_for_status() # Check for HTTP errors
|
|
173
|
+
|
|
174
|
+
# Use sanitize_stream
|
|
175
|
+
processed_stream = sanitize_stream(
|
|
176
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
177
|
+
intro_value=None, # No simple prefix
|
|
178
|
+
to_json=False, # Content is text after extraction
|
|
179
|
+
content_extractor=self._elmo_extractor, # Use the specific extractor
|
|
180
|
+
yield_raw_on_error=True
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
for content_chunk in processed_stream:
|
|
184
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
185
|
+
streaming_text += content_chunk
|
|
186
|
+
resp = dict(text=content_chunk)
|
|
187
|
+
yield resp if not raw else content_chunk
|
|
188
|
+
|
|
189
|
+
except CurlError as e: # Catch CurlError
|
|
190
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
191
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
192
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
193
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
194
|
+
finally:
|
|
195
|
+
# Update history after stream finishes
|
|
196
|
+
self.last_response = dict(text=streaming_text)
|
|
197
|
+
self.conversation.update_chat_history(
|
|
198
|
+
prompt, streaming_text
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def for_non_stream():
|
|
202
|
+
# Aggregate the stream using the updated for_stream logic
|
|
203
|
+
collected_text = ""
|
|
204
|
+
try:
|
|
205
|
+
# Ensure raw=False so for_stream yields dicts
|
|
206
|
+
for chunk_data in for_stream():
|
|
207
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
208
|
+
collected_text += chunk_data["text"]
|
|
209
|
+
# Handle raw string case if raw=True was passed
|
|
210
|
+
elif raw and isinstance(chunk_data, str):
|
|
211
|
+
collected_text += chunk_data
|
|
212
|
+
except Exception as e:
|
|
213
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
214
|
+
if not collected_text:
|
|
215
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
216
|
+
|
|
217
|
+
# Update last_response and history *after* aggregation for non-stream
|
|
218
|
+
self.last_response = {"text": collected_text}
|
|
219
|
+
self.conversation.update_chat_history(prompt, collected_text)
|
|
220
|
+
# Return the final aggregated response dict or raw string
|
|
221
|
+
return collected_text if raw else self.last_response
|
|
222
|
+
|
|
223
|
+
return for_stream() if stream else for_non_stream()
|
|
224
|
+
|
|
225
|
+
def chat(
|
|
226
|
+
self,
|
|
227
|
+
prompt: str,
|
|
228
|
+
stream: bool = False,
|
|
229
|
+
optimizer: str = None,
|
|
230
|
+
conversationally: bool = False,
|
|
231
|
+
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
232
|
+
"""Generate response `str`
|
|
233
|
+
Args:
|
|
234
|
+
prompt (str): Prompt to be send.
|
|
235
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
236
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
237
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
238
|
+
Returns:
|
|
239
|
+
str: Response generated
|
|
240
|
+
"""
|
|
241
|
+
|
|
242
|
+
def for_stream_chat(): # Renamed inner function
|
|
243
|
+
# ask() yields dicts or strings when streaming
|
|
244
|
+
gen = self.ask(
|
|
245
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
246
|
+
optimizer=optimizer, conversationally=conversationally
|
|
247
|
+
)
|
|
248
|
+
for response_dict in gen:
|
|
249
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
250
|
+
|
|
251
|
+
def for_non_stream_chat(): # Renamed inner function
|
|
252
|
+
# ask() returns dict or str when not streaming
|
|
253
|
+
response_data = self.ask(
|
|
254
|
+
prompt,
|
|
255
|
+
stream=False,
|
|
256
|
+
raw=False, # Ensure ask returns dict
|
|
257
|
+
optimizer=optimizer,
|
|
258
|
+
conversationally=conversationally,
|
|
259
|
+
)
|
|
260
|
+
return self.get_message(response_data) # get_message expects dict
|
|
261
|
+
|
|
262
|
+
return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
|
|
263
|
+
|
|
264
|
+
def get_message(self, response: dict) -> str:
|
|
265
|
+
"""Retrieves message only from response
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
response (dict): Response generated by `self.ask`
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
str: Message extracted
|
|
272
|
+
"""
|
|
273
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
274
|
+
return response.get("text", "") # Use .get for safety
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
if __name__ == "__main__":
|
|
278
|
+
# Ensure curl_cffi is installed
|
|
279
|
+
from rich import print
|
|
280
|
+
ai = Elmo()
|
|
281
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
282
|
+
for chunk in response:
|
|
283
283
|
print(chunk, end="", flush=True)
|