webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
4
|
+
from uuid import uuid4
|
|
5
|
+
import time
|
|
6
|
+
import base64
|
|
7
|
+
import random
|
|
8
|
+
|
|
9
|
+
from webscout.AIbase import AISearch
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Response:
|
|
15
|
+
"""A wrapper class for Liner API responses.
|
|
16
|
+
|
|
17
|
+
This class automatically converts response objects to their text representation
|
|
18
|
+
when printed or converted to string.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
text (str): The text content of the response
|
|
22
|
+
|
|
23
|
+
Example:
|
|
24
|
+
>>> response = Response("Hello, world!")
|
|
25
|
+
>>> print(response)
|
|
26
|
+
Hello, world!
|
|
27
|
+
>>> str(response)
|
|
28
|
+
'Hello, world!'
|
|
29
|
+
"""
|
|
30
|
+
def __init__(self, text: str):
|
|
31
|
+
self.text = text
|
|
32
|
+
|
|
33
|
+
def __str__(self):
|
|
34
|
+
return self.text
|
|
35
|
+
|
|
36
|
+
def __repr__(self):
|
|
37
|
+
return self.text
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class Liner(AISearch):
|
|
41
|
+
"""A class to interact with the Liner AI search API.
|
|
42
|
+
|
|
43
|
+
Liner provides a powerful search interface that returns AI-generated responses
|
|
44
|
+
based on web content. It supports both streaming and non-streaming responses.
|
|
45
|
+
|
|
46
|
+
Basic Usage:
|
|
47
|
+
>>> from webscout import Liner
|
|
48
|
+
>>> ai = Liner(cookies_path="cookies.json")
|
|
49
|
+
>>> # Non-streaming example
|
|
50
|
+
>>> response = ai.search("What is Python?")
|
|
51
|
+
>>> print(response)
|
|
52
|
+
Python is a high-level programming language...
|
|
53
|
+
|
|
54
|
+
>>> # Streaming example
|
|
55
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
56
|
+
... print(chunk, end="", flush=True)
|
|
57
|
+
Artificial Intelligence is...
|
|
58
|
+
|
|
59
|
+
>>> # Raw response format
|
|
60
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
61
|
+
... print(chunk)
|
|
62
|
+
{'text': 'Hello'}
|
|
63
|
+
{'text': ' there!'}
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
cookies_path (str): Path to the cookies JSON file
|
|
67
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
68
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
69
|
+
deep_search (bool, optional): Enable deep research mode. Defaults to True.
|
|
70
|
+
reasoning_mode (bool, optional): Enable reasoning mode. Defaults to False.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
cookies_path: str,
|
|
76
|
+
timeout: int = 600,
|
|
77
|
+
proxies: Optional[dict] = None,
|
|
78
|
+
deep_search: bool = True,
|
|
79
|
+
reasoning_mode: bool = False,
|
|
80
|
+
):
|
|
81
|
+
"""Initialize the Liner API client.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
cookies_path (str): Path to the cookies JSON file
|
|
85
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
86
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
87
|
+
deep_search (bool, optional): Enable deep research mode. Defaults to True.
|
|
88
|
+
reasoning_mode (bool, optional): Enable reasoning mode. Defaults to False.
|
|
89
|
+
"""
|
|
90
|
+
self.session = requests.Session()
|
|
91
|
+
self.chat_endpoint = "https://getliner.com/lisa/v1/answer"
|
|
92
|
+
self.stream_chunk_size = 64
|
|
93
|
+
self.timeout = timeout
|
|
94
|
+
self.last_response = {}
|
|
95
|
+
self.cookies_path = cookies_path
|
|
96
|
+
self.deep_search = deep_search
|
|
97
|
+
self.reasoning_mode = reasoning_mode
|
|
98
|
+
|
|
99
|
+
# Generate random IDs
|
|
100
|
+
self.space_id = random.randint(10000000, 99999999)
|
|
101
|
+
self.thread_id = random.randint(10000000, 99999999)
|
|
102
|
+
self.user_message_id = random.randint(100000000, 999999999)
|
|
103
|
+
self.user_id = random.randint(1000000, 9999999)
|
|
104
|
+
|
|
105
|
+
self.headers = {
|
|
106
|
+
"accept": "text/event-stream",
|
|
107
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
108
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
109
|
+
"content-type": "application/json",
|
|
110
|
+
"dnt": "1",
|
|
111
|
+
"origin": "https://getliner.com",
|
|
112
|
+
"referer": f"https://getliner.com/search/s/{self.space_id}/t/t_{uuid4()}?mode=temp&msg-entry-type=main&build-id=kwJaNRjnCKjh7PijZgqV2",
|
|
113
|
+
"sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
|
|
114
|
+
"sec-ch-ua-mobile": "?0",
|
|
115
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
116
|
+
"sec-fetch-dest": "empty",
|
|
117
|
+
"sec-fetch-mode": "cors",
|
|
118
|
+
"sec-fetch-site": "same-origin",
|
|
119
|
+
"sec-gpc": "1",
|
|
120
|
+
"user-agent": LitAgent().random()
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Load cookies from JSON file
|
|
124
|
+
self.cookies = self._load_cookies()
|
|
125
|
+
if not self.cookies:
|
|
126
|
+
raise ValueError("Failed to load cookies from file")
|
|
127
|
+
|
|
128
|
+
# Set headers and cookies in session
|
|
129
|
+
self.session.headers.update(self.headers)
|
|
130
|
+
self.session.cookies.update(self.cookies)
|
|
131
|
+
self.session.proxies = proxies or {}
|
|
132
|
+
|
|
133
|
+
def _load_cookies(self) -> Optional[Dict[str, str]]:
|
|
134
|
+
"""Load cookies from a JSON file.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Optional[Dict[str, str]]: Dictionary of cookies if successful, None otherwise
|
|
138
|
+
"""
|
|
139
|
+
try:
|
|
140
|
+
with open(self.cookies_path, 'r') as f:
|
|
141
|
+
cookies_data = json.load(f)
|
|
142
|
+
return {cookie['name']: cookie['value'] for cookie in cookies_data}
|
|
143
|
+
except FileNotFoundError:
|
|
144
|
+
print(f"Error: {self.cookies_path} file not found!")
|
|
145
|
+
return None
|
|
146
|
+
except json.JSONDecodeError:
|
|
147
|
+
print(f"Error: Invalid JSON format in {self.cookies_path}!")
|
|
148
|
+
return None
|
|
149
|
+
except KeyError:
|
|
150
|
+
print(f"Error: Invalid cookie format in {self.cookies_path}! Each cookie must have 'name' and 'value' keys.")
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
def search(
|
|
154
|
+
self,
|
|
155
|
+
prompt: str,
|
|
156
|
+
stream: bool = False,
|
|
157
|
+
raw: bool = False,
|
|
158
|
+
) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
159
|
+
"""Search using the Liner API and get AI-generated responses.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
prompt (str): The search query or prompt to send to the API.
|
|
163
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
164
|
+
If False, returns complete response. Defaults to False.
|
|
165
|
+
raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
|
|
166
|
+
If False, returns Response objects that convert to text automatically.
|
|
167
|
+
Defaults to False.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
171
|
+
- If stream=False: Returns complete response
|
|
172
|
+
- If stream=True: Yields response chunks as they arrive
|
|
173
|
+
|
|
174
|
+
Raises:
|
|
175
|
+
APIConnectionError: If the API request fails
|
|
176
|
+
"""
|
|
177
|
+
payload = {
|
|
178
|
+
"spaceId": self.space_id,
|
|
179
|
+
"threadId": self.thread_id,
|
|
180
|
+
"userMessageId": self.user_message_id,
|
|
181
|
+
"userId": self.user_id,
|
|
182
|
+
"query": prompt,
|
|
183
|
+
"agentId": "liner",
|
|
184
|
+
"platform": "web",
|
|
185
|
+
"regenerate": False,
|
|
186
|
+
"showReferenceChunks": True,
|
|
187
|
+
"mode": "general",
|
|
188
|
+
"answerMode": "search",
|
|
189
|
+
"isReasoningMode": self.reasoning_mode,
|
|
190
|
+
"experimentId": random.randint(80, 90),
|
|
191
|
+
"modelType": "liner",
|
|
192
|
+
"experimentVariants": [],
|
|
193
|
+
"isDeepResearchMode": self.deep_search
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
def for_stream():
|
|
197
|
+
try:
|
|
198
|
+
with self.session.post(
|
|
199
|
+
self.chat_endpoint,
|
|
200
|
+
json=payload,
|
|
201
|
+
stream=True,
|
|
202
|
+
timeout=self.timeout,
|
|
203
|
+
) as response:
|
|
204
|
+
if not response.ok:
|
|
205
|
+
raise exceptions.APIConnectionError(
|
|
206
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
current_reasoning = ""
|
|
210
|
+
current_answer = ""
|
|
211
|
+
|
|
212
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
213
|
+
if line == "event:finish_answer":
|
|
214
|
+
break
|
|
215
|
+
|
|
216
|
+
if line.startswith('data:'):
|
|
217
|
+
try:
|
|
218
|
+
data = json.loads(line[5:]) # Remove 'data:' prefix
|
|
219
|
+
|
|
220
|
+
# Handle reasoning updates if enabled
|
|
221
|
+
if self.reasoning_mode and 'reasoning' in data:
|
|
222
|
+
current_reasoning += data['reasoning']
|
|
223
|
+
if raw:
|
|
224
|
+
yield {"text": data['reasoning']}
|
|
225
|
+
else:
|
|
226
|
+
yield Response(data['reasoning'])
|
|
227
|
+
|
|
228
|
+
# Handle answer updates
|
|
229
|
+
if 'answer' in data:
|
|
230
|
+
current_answer += data['answer']
|
|
231
|
+
if raw:
|
|
232
|
+
yield {"text": data['answer']}
|
|
233
|
+
else:
|
|
234
|
+
yield Response(data['answer'])
|
|
235
|
+
|
|
236
|
+
except json.JSONDecodeError:
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
except requests.exceptions.RequestException as e:
|
|
240
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
241
|
+
|
|
242
|
+
def for_non_stream():
|
|
243
|
+
full_response = ""
|
|
244
|
+
for chunk in for_stream():
|
|
245
|
+
if raw:
|
|
246
|
+
yield chunk
|
|
247
|
+
else:
|
|
248
|
+
full_response += str(chunk)
|
|
249
|
+
|
|
250
|
+
if not raw:
|
|
251
|
+
self.last_response = Response(full_response)
|
|
252
|
+
return self.last_response
|
|
253
|
+
|
|
254
|
+
return for_stream() if stream else for_non_stream()
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
if __name__ == "__main__":
|
|
258
|
+
from rich import print
|
|
259
|
+
|
|
260
|
+
ai = Liner(cookies_path="cookies.json")
|
|
261
|
+
response = ai.search(input(">>> "), stream=True, raw=False)
|
|
262
|
+
for chunk in response:
|
|
263
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
############################################################
|
|
2
|
+
# NOT WORKING
|
|
3
|
+
############################################################
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
import json
|
|
7
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
8
|
+
|
|
9
|
+
from webscout.AIutel import Optimizers
|
|
10
|
+
from webscout.AIutel import Conversation
|
|
11
|
+
from webscout.AIutel import AwesomePrompts
|
|
12
|
+
from webscout.AIbase import Provider
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class oivscode(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with a test API.
|
|
19
|
+
"""
|
|
20
|
+
AVAILABLE_MODELS = [
|
|
21
|
+
"deepseek/deepseek-chat",
|
|
22
|
+
"claude-3-5-haiku-20241022",
|
|
23
|
+
"gpt-4o-mini",
|
|
24
|
+
"claude-3-5-sonnet-20240620",
|
|
25
|
+
"ours/deepseek-chat",
|
|
26
|
+
"custom/deepseek",
|
|
27
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
28
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
29
|
+
"claude-3-5-sonnet-20241022",
|
|
30
|
+
"omni-moderation-latest",
|
|
31
|
+
"omni-moderation-latest-intents",
|
|
32
|
+
"omni-moderation-2024-09-26",
|
|
33
|
+
"gpt-4",
|
|
34
|
+
"gpt-4o",
|
|
35
|
+
"gpt-4o-audio-preview",
|
|
36
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
37
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
38
|
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
|
39
|
+
"gpt-4o-mini",
|
|
40
|
+
"gpt-4o-mini-2024-07-18",
|
|
41
|
+
"o1",
|
|
42
|
+
"o1-mini",
|
|
43
|
+
"o1-mini-2024-09-12",
|
|
44
|
+
"o1-preview",
|
|
45
|
+
"o1-preview-2024-09-12",
|
|
46
|
+
"o1-2024-12-17",
|
|
47
|
+
"chatgpt-4o-latest",
|
|
48
|
+
"gpt-4o-2024-05-13",
|
|
49
|
+
"gpt-4o-2024-08-06",
|
|
50
|
+
"gpt-4o-2024-11-20",
|
|
51
|
+
"gpt-4o-realtime-preview-2024-10-01",
|
|
52
|
+
"gpt-4o-realtime-preview",
|
|
53
|
+
"gpt-4o-realtime-preview-2024-12-17",
|
|
54
|
+
"gpt-4o-mini-realtime-preview",
|
|
55
|
+
"gpt-4o-mini-realtime-preview-2024-12-17",
|
|
56
|
+
"gpt-4-turbo-preview",
|
|
57
|
+
"gpt-4-0314",
|
|
58
|
+
"gpt-4-0613",
|
|
59
|
+
"gpt-4-32k",
|
|
60
|
+
"gpt-4-32k-0314",
|
|
61
|
+
"gpt-4-32k-0613",
|
|
62
|
+
"gpt-4-turbo",
|
|
63
|
+
"gpt-4-turbo-2024-04-09",
|
|
64
|
+
"gpt-4-1106-preview",
|
|
65
|
+
"gpt-4-0125-preview",
|
|
66
|
+
"gpt-4-vision-preview",
|
|
67
|
+
"gpt-4-1106-vision-preview",
|
|
68
|
+
"gpt-3.5-turbo",
|
|
69
|
+
"gpt-3.5-turbo-0301",
|
|
70
|
+
"gpt-3.5-turbo-0613",
|
|
71
|
+
"gpt-3.5-turbo-1106",
|
|
72
|
+
"gpt-3.5-turbo-0125",
|
|
73
|
+
"gpt-3.5-turbo-16k",
|
|
74
|
+
"gpt-3.5-turbo-16k-0613",
|
|
75
|
+
"text-embedding-3-large",
|
|
76
|
+
"text-embedding-3-small",
|
|
77
|
+
"text-embedding-ada-002",
|
|
78
|
+
"text-embedding-ada-002-v2",
|
|
79
|
+
"text-moderation-stable",
|
|
80
|
+
"text-moderation-007",
|
|
81
|
+
"text-moderation-latest",
|
|
82
|
+
"256-x-256/dall-e-2",
|
|
83
|
+
"512-x-512/dall-e-2",
|
|
84
|
+
"1024-x-1024/dall-e-2",
|
|
85
|
+
"hd/1024-x-1792/dall-e-3",
|
|
86
|
+
"hd/1792-x-1024/dall-e-3",
|
|
87
|
+
"hd/1024-x-1024/dall-e-3",
|
|
88
|
+
"standard/1024-x-1792/dall-e-3",
|
|
89
|
+
"standard/1792-x-1024/dall-e-3",
|
|
90
|
+
"standard/1024-x-1024/dall-e-3",
|
|
91
|
+
"whisper-1",
|
|
92
|
+
"tts-1",
|
|
93
|
+
"tts-1-hd",
|
|
94
|
+
"ft:davinci-002",
|
|
95
|
+
"ft:babbage-002",
|
|
96
|
+
"babbage-002",
|
|
97
|
+
"davinci-002",
|
|
98
|
+
"gpt-3.5-turbo-instruct",
|
|
99
|
+
"gpt-3.5-turbo-instruct-0914",
|
|
100
|
+
"claude-instant-1",
|
|
101
|
+
"claude-instant-1.2",
|
|
102
|
+
"claude-2",
|
|
103
|
+
"claude-2.1",
|
|
104
|
+
"claude-3-haiku-20240307",
|
|
105
|
+
"claude-3-5-haiku-20241022",
|
|
106
|
+
"claude-3-opus-20240229",
|
|
107
|
+
"claude-3-sonnet-20240229",
|
|
108
|
+
"claude-3-5-sonnet-20240620",
|
|
109
|
+
"claude-3-5-sonnet-20241022",
|
|
110
|
+
"togethercomputer/llama-2-70b-chat",
|
|
111
|
+
"togethercomputer/llama-2-70b",
|
|
112
|
+
"togethercomputer/LLaMA-2-7B-32K",
|
|
113
|
+
"togethercomputer/Llama-2-7B-32K-Instruct",
|
|
114
|
+
"togethercomputer/llama-2-7b",
|
|
115
|
+
"togethercomputer/falcon-40b-instruct",
|
|
116
|
+
"togethercomputer/falcon-7b-instruct",
|
|
117
|
+
"togethercomputer/alpaca-7b",
|
|
118
|
+
"HuggingFaceH4/starchat-alpha",
|
|
119
|
+
"togethercomputer/CodeLlama-34b",
|
|
120
|
+
"togethercomputer/CodeLlama-34b-Instruct",
|
|
121
|
+
"togethercomputer/CodeLlama-34b-Python",
|
|
122
|
+
"defog/sqlcoder",
|
|
123
|
+
"NumbersStation/nsql-llama-2-7B",
|
|
124
|
+
"WizardLM/WizardCoder-15B-V1.0",
|
|
125
|
+
"WizardLM/WizardCoder-Python-34B-V1.0",
|
|
126
|
+
"NousResearch/Nous-Hermes-Llama2-13b",
|
|
127
|
+
"Austism/chronos-hermes-13b",
|
|
128
|
+
"upstage/SOLAR-0-70b-16bit",
|
|
129
|
+
"WizardLM/WizardLM-70B-V1.0",
|
|
130
|
+
"deepseek/deepseek-chat",
|
|
131
|
+
"deepseek/deepseek-coder",
|
|
132
|
+
"fireworks_ai/accounts/fireworks/models/llama-v3p2-1b-instruct",
|
|
133
|
+
"fireworks_ai/accounts/fireworks/models/llama-v3p2-3b-instruct",
|
|
134
|
+
"fireworks_ai/accounts/fireworks/models/llama-v3p2-11b-vision-instruct",
|
|
135
|
+
"accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
|
|
136
|
+
"fireworks_ai/accounts/fireworks/models/firefunction-v2",
|
|
137
|
+
"fireworks_ai/accounts/fireworks/models/mixtral-8x22b-instruct-hf",
|
|
138
|
+
"fireworks_ai/accounts/fireworks/models/qwen2-72b-instruct",
|
|
139
|
+
"fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct",
|
|
140
|
+
"fireworks_ai/accounts/fireworks/models/yi-large",
|
|
141
|
+
"fireworks_ai/accounts/fireworks/models/deepseek-coder-v2-instruct",
|
|
142
|
+
"fireworks_ai/accounts/fireworks/models/deepseek-v3",
|
|
143
|
+
"fireworks_ai/nomic-ai/nomic-embed-text-v1.5",
|
|
144
|
+
"fireworks_ai/nomic-ai/nomic-embed-text-v1",
|
|
145
|
+
"fireworks_ai/WhereIsAI/UAE-Large-V1",
|
|
146
|
+
"fireworks_ai/thenlper/gte-large",
|
|
147
|
+
"fireworks_ai/thenlper/gte-base",
|
|
148
|
+
]
|
|
149
|
+
|
|
150
|
+
def __init__(
|
|
151
|
+
self,
|
|
152
|
+
is_conversation: bool = True,
|
|
153
|
+
max_tokens: int = 1024,
|
|
154
|
+
timeout: int = 30,
|
|
155
|
+
intro: str = None,
|
|
156
|
+
filepath: str = None,
|
|
157
|
+
update_file: bool = True,
|
|
158
|
+
proxies: dict = {},
|
|
159
|
+
history_offset: int = 10250,
|
|
160
|
+
act: str = None,
|
|
161
|
+
model: str = "claude-3-5-sonnet-20240620",
|
|
162
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
163
|
+
|
|
164
|
+
):
|
|
165
|
+
"""
|
|
166
|
+
Initializes the oivscode with given parameters.
|
|
167
|
+
"""
|
|
168
|
+
if model not in self.AVAILABLE_MODELS:
|
|
169
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
self.session = requests.Session()
|
|
173
|
+
self.is_conversation = is_conversation
|
|
174
|
+
self.max_tokens_to_sample = max_tokens
|
|
175
|
+
self.api_endpoint = "https://oi-vscode-server.onrender.com/v1/chat/completions"
|
|
176
|
+
self.timeout = timeout
|
|
177
|
+
self.last_response = {}
|
|
178
|
+
self.model = model
|
|
179
|
+
self.system_prompt = system_prompt
|
|
180
|
+
self.headers = {
|
|
181
|
+
"accept": "*/*",
|
|
182
|
+
"accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
|
|
183
|
+
"cache-control": "no-cache",
|
|
184
|
+
"content-type": "application/json",
|
|
185
|
+
"pragma": "no-cache",
|
|
186
|
+
"priority": "u=1, i",
|
|
187
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
188
|
+
"sec-ch-ua-mobile": "?0",
|
|
189
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
190
|
+
"sec-fetch-dest": "empty",
|
|
191
|
+
"sec-fetch-mode": "cors",
|
|
192
|
+
"sec-fetch-site": "same-site"
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
self.__available_optimizers = (
|
|
197
|
+
method
|
|
198
|
+
for method in dir(Optimizers)
|
|
199
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
200
|
+
)
|
|
201
|
+
self.session.headers.update(self.headers)
|
|
202
|
+
Conversation.intro = (
|
|
203
|
+
AwesomePrompts().get_act(
|
|
204
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
205
|
+
)
|
|
206
|
+
if act
|
|
207
|
+
else intro or Conversation.intro
|
|
208
|
+
)
|
|
209
|
+
self.conversation = Conversation(
|
|
210
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
211
|
+
)
|
|
212
|
+
self.conversation.history_offset = history_offset
|
|
213
|
+
self.session.proxies = proxies
|
|
214
|
+
|
|
215
|
+
def ask(
|
|
216
|
+
self,
|
|
217
|
+
prompt: str,
|
|
218
|
+
stream: bool = False,
|
|
219
|
+
raw: bool = False,
|
|
220
|
+
optimizer: str = None,
|
|
221
|
+
conversationally: bool = False,
|
|
222
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
223
|
+
"""Chat with AI
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
prompt (str): Prompt to be send.
|
|
227
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
228
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
229
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
230
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
231
|
+
Returns:
|
|
232
|
+
dict or generator:
|
|
233
|
+
If stream is False, returns a dict
|
|
234
|
+
If stream is True, returns a generator
|
|
235
|
+
"""
|
|
236
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
237
|
+
if optimizer:
|
|
238
|
+
if optimizer in self.__available_optimizers:
|
|
239
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
240
|
+
conversation_prompt if conversationally else prompt
|
|
241
|
+
)
|
|
242
|
+
else:
|
|
243
|
+
raise Exception(
|
|
244
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
payload = {
|
|
248
|
+
"model": self.model,
|
|
249
|
+
"stream": stream,
|
|
250
|
+
"messages": [
|
|
251
|
+
{"role": "system", "content": self.system_prompt},
|
|
252
|
+
{"role": "user", "content": conversation_prompt}
|
|
253
|
+
]
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
def for_stream():
|
|
257
|
+
response = self.session.post(
|
|
258
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
259
|
+
)
|
|
260
|
+
if not response.ok:
|
|
261
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
262
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
263
|
+
)
|
|
264
|
+
from rich import print
|
|
265
|
+
print(response.text)
|
|
266
|
+
message_load = ""
|
|
267
|
+
for value in response.iter_lines(
|
|
268
|
+
decode_unicode=True,
|
|
269
|
+
delimiter="" if raw else "data:",
|
|
270
|
+
chunk_size=64,
|
|
271
|
+
):
|
|
272
|
+
try:
|
|
273
|
+
resp = json.loads(value)
|
|
274
|
+
incomplete_message = self.get_message(resp)
|
|
275
|
+
if incomplete_message:
|
|
276
|
+
message_load += incomplete_message
|
|
277
|
+
resp["choices"][0]["delta"]["content"] = message_load
|
|
278
|
+
self.last_response.update(resp)
|
|
279
|
+
yield value if raw else resp
|
|
280
|
+
elif raw:
|
|
281
|
+
yield value
|
|
282
|
+
except json.decoder.JSONDecodeError:
|
|
283
|
+
pass
|
|
284
|
+
self.conversation.update_chat_history(
|
|
285
|
+
prompt, self.get_message(self.last_response)
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
def for_non_stream():
|
|
289
|
+
response = self.session.post(
|
|
290
|
+
self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
|
|
291
|
+
)
|
|
292
|
+
if (
|
|
293
|
+
not response.ok
|
|
294
|
+
or not response.headers.get("Content-Type", "") == "application/json"
|
|
295
|
+
):
|
|
296
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
297
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
298
|
+
)
|
|
299
|
+
resp = response.json()
|
|
300
|
+
self.last_response.update(resp)
|
|
301
|
+
self.conversation.update_chat_history(
|
|
302
|
+
prompt, self.get_message(self.last_response)
|
|
303
|
+
)
|
|
304
|
+
return resp
|
|
305
|
+
|
|
306
|
+
return for_stream() if stream else for_non_stream()
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def chat(
|
|
310
|
+
self,
|
|
311
|
+
prompt: str,
|
|
312
|
+
stream: bool = False,
|
|
313
|
+
optimizer: str = None,
|
|
314
|
+
conversationally: bool = False,
|
|
315
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
316
|
+
"""Generate response `str`
|
|
317
|
+
Args:
|
|
318
|
+
prompt (str): Prompt to be send.
|
|
319
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
320
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
321
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
322
|
+
Returns:
|
|
323
|
+
str: Response generated
|
|
324
|
+
"""
|
|
325
|
+
def for_stream():
|
|
326
|
+
for response in self.ask(
|
|
327
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
328
|
+
):
|
|
329
|
+
yield self.get_message(response)
|
|
330
|
+
def for_non_stream():
|
|
331
|
+
return self.get_message(
|
|
332
|
+
self.ask(
|
|
333
|
+
prompt,
|
|
334
|
+
False,
|
|
335
|
+
optimizer=optimizer,
|
|
336
|
+
conversationally=conversationally,
|
|
337
|
+
)
|
|
338
|
+
)
|
|
339
|
+
return for_stream() if stream else for_non_stream()
|
|
340
|
+
|
|
341
|
+
def get_message(self, response: dict) -> str:
|
|
342
|
+
"""Retrieves message only from response"""
|
|
343
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
344
|
+
return response["text"]
|
|
345
|
+
|
|
346
|
+
if __name__ == "__main__":
|
|
347
|
+
from rich import print
|
|
348
|
+
chatbot = oivscode()
|
|
349
|
+
response = chatbot.chat(input(">>> "), stream=True)
|
|
350
|
+
for chunk in response:
|
|
351
|
+
print(chunk, end="", flush=True)
|