webscout 8.2.6__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +97 -87
- webscout/version.py +1 -1
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/METADATA +2 -15
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -910
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -684
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -198
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -322
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -447
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/ChatSandbox.py +0 -342
- webscout/Provider/Cloudflare.py +0 -325
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/Deepinfra.py +0 -338
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -370
- webscout/Provider/GizAI.py +0 -285
- webscout/Provider/Glider.py +0 -222
- webscout/Provider/Groq.py +0 -801
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -257
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Jadve.py +0 -291
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/MCPCore.py +0 -315
- webscout/Provider/Marcus.py +0 -206
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/Netwrck.py +0 -270
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -28
- webscout/Provider/OPENAI/ai4chat.py +0 -286
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -481
- webscout/Provider/OPENAI/deepinfra.py +0 -309
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -437
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/groq.py +0 -354
- webscout/Provider/OPENAI/heckai.py +0 -341
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/mcpcore.py +0 -376
- webscout/Provider/OPENAI/multichat.py +0 -368
- webscout/Provider/OPENAI/netwrck.py +0 -350
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -462
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -329
- webscout/Provider/OPENAI/toolbaz.py +0 -406
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -429
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -8
- webscout/Provider/TTS/base.py +0 -159
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -233
- webscout/Provider/TextPollinationsAI.py +0 -306
- webscout/Provider/TwoAI.py +0 -280
- webscout/Provider/TypliAI.py +0 -305
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/WiseCat.py +0 -233
- webscout/Provider/WrDoChat.py +0 -370
- webscout/Provider/Writecream.py +0 -237
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -178
- webscout/Provider/ai4chat.py +0 -203
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -335
- webscout/Provider/asksteve.py +0 -212
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -288
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/elmo.py +0 -283
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/learnfastai.py +0 -325
- webscout/Provider/llama3mitril.py +0 -215
- webscout/Provider/llmchat.py +0 -255
- webscout/Provider/llmchatco.py +0 -306
- webscout/Provider/meta.py +0 -798
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -297
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/searchchat.py +0 -292
- webscout/Provider/sonus.py +0 -258
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -353
- webscout/Provider/turboseek.py +0 -266
- webscout/Provider/typefully.py +0 -330
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/x0gpt.py +0 -299
- webscout/Provider/yep.py +0 -389
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -95
- webscout/swiftcli/core/__init__.py +0 -7
- webscout/swiftcli/core/cli.py +0 -297
- webscout/swiftcli/core/context.py +0 -104
- webscout/swiftcli/core/group.py +0 -241
- webscout/swiftcli/decorators/__init__.py +0 -28
- webscout/swiftcli/decorators/command.py +0 -221
- webscout/swiftcli/decorators/options.py +0 -220
- webscout/swiftcli/decorators/output.py +0 -252
- webscout/swiftcli/exceptions.py +0 -21
- webscout/swiftcli/plugins/__init__.py +0 -9
- webscout/swiftcli/plugins/base.py +0 -135
- webscout/swiftcli/plugins/manager.py +0 -262
- webscout/swiftcli/utils/__init__.py +0 -59
- webscout/swiftcli/utils/formatting.py +0 -252
- webscout/swiftcli/utils/parsing.py +0 -267
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.6.dist-info/RECORD +0 -307
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/OPENAI/groq.py
DELETED
|
@@ -1,354 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import curl_cffi for improved request handling
|
|
8
|
-
from curl_cffi.requests import Session
|
|
9
|
-
from curl_cffi import CurlError
|
|
10
|
-
|
|
11
|
-
# Import base classes and utility structures
|
|
12
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
-
from .utils import (
|
|
14
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
-
ChatCompletionMessage, CompletionUsage
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
# Attempt to import LitAgent, fallback if not available
|
|
19
|
-
try:
|
|
20
|
-
from webscout.litagent import LitAgent
|
|
21
|
-
except ImportError:
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
# --- Groq Client ---
|
|
25
|
-
|
|
26
|
-
class Completions(BaseCompletions):
|
|
27
|
-
def __init__(self, client: 'Groq'):
|
|
28
|
-
self._client = client
|
|
29
|
-
|
|
30
|
-
def create(
|
|
31
|
-
self,
|
|
32
|
-
*,
|
|
33
|
-
model: str,
|
|
34
|
-
messages: List[Dict[str, str]],
|
|
35
|
-
max_tokens: Optional[int] = 2049,
|
|
36
|
-
stream: bool = False,
|
|
37
|
-
temperature: Optional[float] = None,
|
|
38
|
-
top_p: Optional[float] = None,
|
|
39
|
-
**kwargs: Any
|
|
40
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
-
"""
|
|
42
|
-
Creates a model response for the given chat conversation.
|
|
43
|
-
Mimics openai.chat.completions.create
|
|
44
|
-
"""
|
|
45
|
-
payload = {
|
|
46
|
-
"model": model,
|
|
47
|
-
"messages": messages,
|
|
48
|
-
"max_tokens": max_tokens,
|
|
49
|
-
"stream": stream,
|
|
50
|
-
}
|
|
51
|
-
if temperature is not None:
|
|
52
|
-
payload["temperature"] = temperature
|
|
53
|
-
if top_p is not None:
|
|
54
|
-
payload["top_p"] = top_p
|
|
55
|
-
|
|
56
|
-
# Add frequency_penalty and presence_penalty if provided
|
|
57
|
-
if "frequency_penalty" in kwargs:
|
|
58
|
-
payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
|
|
59
|
-
if "presence_penalty" in kwargs:
|
|
60
|
-
payload["presence_penalty"] = kwargs.pop("presence_penalty")
|
|
61
|
-
|
|
62
|
-
# Add any tools if provided
|
|
63
|
-
if "tools" in kwargs and kwargs["tools"]:
|
|
64
|
-
payload["tools"] = kwargs.pop("tools")
|
|
65
|
-
|
|
66
|
-
payload.update(kwargs)
|
|
67
|
-
|
|
68
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
69
|
-
created_time = int(time.time())
|
|
70
|
-
|
|
71
|
-
if stream:
|
|
72
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
73
|
-
else:
|
|
74
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
75
|
-
|
|
76
|
-
def _create_stream(
|
|
77
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
78
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
79
|
-
try:
|
|
80
|
-
response = self._client.session.post(
|
|
81
|
-
self._client.base_url,
|
|
82
|
-
json=payload,
|
|
83
|
-
stream=True,
|
|
84
|
-
timeout=self._client.timeout,
|
|
85
|
-
impersonate="chrome110" # Use impersonate for better compatibility
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
if response.status_code != 200:
|
|
89
|
-
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
90
|
-
|
|
91
|
-
# Track token usage across chunks
|
|
92
|
-
prompt_tokens = 0
|
|
93
|
-
completion_tokens = 0
|
|
94
|
-
total_tokens = 0
|
|
95
|
-
|
|
96
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
97
|
-
if line:
|
|
98
|
-
if line.startswith("data: "):
|
|
99
|
-
json_str = line[6:]
|
|
100
|
-
if json_str == "[DONE]":
|
|
101
|
-
break
|
|
102
|
-
|
|
103
|
-
try:
|
|
104
|
-
data = json.loads(json_str)
|
|
105
|
-
choice_data = data.get('choices', [{}])[0]
|
|
106
|
-
delta_data = choice_data.get('delta', {})
|
|
107
|
-
finish_reason = choice_data.get('finish_reason')
|
|
108
|
-
|
|
109
|
-
# Update token counts if available
|
|
110
|
-
usage_data = data.get('usage', {})
|
|
111
|
-
if usage_data:
|
|
112
|
-
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
113
|
-
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
114
|
-
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
115
|
-
|
|
116
|
-
# Create the delta object
|
|
117
|
-
delta = ChoiceDelta(
|
|
118
|
-
content=delta_data.get('content'),
|
|
119
|
-
role=delta_data.get('role'),
|
|
120
|
-
tool_calls=delta_data.get('tool_calls')
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
# Create the choice object
|
|
124
|
-
choice = Choice(
|
|
125
|
-
index=choice_data.get('index', 0),
|
|
126
|
-
delta=delta,
|
|
127
|
-
finish_reason=finish_reason,
|
|
128
|
-
logprobs=choice_data.get('logprobs')
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
# Create the chunk object
|
|
132
|
-
chunk = ChatCompletionChunk(
|
|
133
|
-
id=request_id,
|
|
134
|
-
choices=[choice],
|
|
135
|
-
created=created_time,
|
|
136
|
-
model=model,
|
|
137
|
-
system_fingerprint=data.get('system_fingerprint')
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
# Convert to dict for proper formatting
|
|
141
|
-
chunk_dict = chunk.to_dict()
|
|
142
|
-
|
|
143
|
-
# Add usage information to match OpenAI format
|
|
144
|
-
usage_dict = {
|
|
145
|
-
"prompt_tokens": prompt_tokens or 10,
|
|
146
|
-
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
147
|
-
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
148
|
-
"estimated_cost": None
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
# Update completion_tokens and total_tokens as we receive more content
|
|
152
|
-
if delta_data.get('content'):
|
|
153
|
-
completion_tokens += 1
|
|
154
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
155
|
-
usage_dict["completion_tokens"] = completion_tokens
|
|
156
|
-
usage_dict["total_tokens"] = total_tokens
|
|
157
|
-
|
|
158
|
-
chunk_dict["usage"] = usage_dict
|
|
159
|
-
|
|
160
|
-
yield chunk
|
|
161
|
-
except json.JSONDecodeError:
|
|
162
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
163
|
-
continue
|
|
164
|
-
except CurlError as e:
|
|
165
|
-
print(f"Error during Groq stream request: {e}")
|
|
166
|
-
raise IOError(f"Groq request failed: {e}") from e
|
|
167
|
-
except Exception as e:
|
|
168
|
-
print(f"Error processing Groq stream: {e}")
|
|
169
|
-
raise
|
|
170
|
-
|
|
171
|
-
def _create_non_stream(
|
|
172
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
173
|
-
) -> ChatCompletion:
|
|
174
|
-
try:
|
|
175
|
-
response = self._client.session.post(
|
|
176
|
-
self._client.base_url,
|
|
177
|
-
json=payload,
|
|
178
|
-
timeout=self._client.timeout,
|
|
179
|
-
impersonate="chrome110" # Use impersonate for better compatibility
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
if response.status_code != 200:
|
|
183
|
-
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
184
|
-
|
|
185
|
-
data = response.json()
|
|
186
|
-
|
|
187
|
-
choices_data = data.get('choices', [])
|
|
188
|
-
usage_data = data.get('usage', {})
|
|
189
|
-
|
|
190
|
-
choices = []
|
|
191
|
-
for choice_d in choices_data:
|
|
192
|
-
message_d = choice_d.get('message', {})
|
|
193
|
-
|
|
194
|
-
# Handle tool calls if present
|
|
195
|
-
tool_calls = message_d.get('tool_calls')
|
|
196
|
-
|
|
197
|
-
message = ChatCompletionMessage(
|
|
198
|
-
role=message_d.get('role', 'assistant'),
|
|
199
|
-
content=message_d.get('content', ''),
|
|
200
|
-
tool_calls=tool_calls
|
|
201
|
-
)
|
|
202
|
-
choice = Choice(
|
|
203
|
-
index=choice_d.get('index', 0),
|
|
204
|
-
message=message,
|
|
205
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
206
|
-
)
|
|
207
|
-
choices.append(choice)
|
|
208
|
-
|
|
209
|
-
usage = CompletionUsage(
|
|
210
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
211
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
212
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
213
|
-
)
|
|
214
|
-
|
|
215
|
-
completion = ChatCompletion(
|
|
216
|
-
id=request_id,
|
|
217
|
-
choices=choices,
|
|
218
|
-
created=created_time,
|
|
219
|
-
model=data.get('model', model),
|
|
220
|
-
usage=usage,
|
|
221
|
-
)
|
|
222
|
-
return completion
|
|
223
|
-
|
|
224
|
-
except CurlError as e:
|
|
225
|
-
print(f"Error during Groq non-stream request: {e}")
|
|
226
|
-
raise IOError(f"Groq request failed: {e}") from e
|
|
227
|
-
except Exception as e:
|
|
228
|
-
print(f"Error processing Groq response: {e}")
|
|
229
|
-
raise
|
|
230
|
-
|
|
231
|
-
class Chat(BaseChat):
|
|
232
|
-
def __init__(self, client: 'Groq'):
|
|
233
|
-
self.completions = Completions(client)
|
|
234
|
-
|
|
235
|
-
class Groq(OpenAICompatibleProvider):
|
|
236
|
-
AVAILABLE_MODELS = [
|
|
237
|
-
"distil-whisper-large-v3-en",
|
|
238
|
-
"gemma2-9b-it",
|
|
239
|
-
"llama-3.3-70b-versatile",
|
|
240
|
-
"llama-3.1-8b-instant",
|
|
241
|
-
"llama-guard-3-8b",
|
|
242
|
-
"llama3-70b-8192",
|
|
243
|
-
"llama3-8b-8192",
|
|
244
|
-
"whisper-large-v3",
|
|
245
|
-
"whisper-large-v3-turbo",
|
|
246
|
-
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
247
|
-
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
248
|
-
"playai-tts",
|
|
249
|
-
"playai-tts-arabic",
|
|
250
|
-
"qwen-qwq-32b",
|
|
251
|
-
"mistral-saba-24b",
|
|
252
|
-
"qwen-2.5-coder-32b",
|
|
253
|
-
"qwen-2.5-32b",
|
|
254
|
-
"deepseek-r1-distill-qwen-32b",
|
|
255
|
-
"deepseek-r1-distill-llama-70b",
|
|
256
|
-
"llama-3.3-70b-specdec",
|
|
257
|
-
"llama-3.2-1b-preview",
|
|
258
|
-
"llama-3.2-3b-preview",
|
|
259
|
-
"llama-3.2-11b-vision-preview",
|
|
260
|
-
"llama-3.2-90b-vision-preview",
|
|
261
|
-
"mixtral-8x7b-32768"
|
|
262
|
-
]
|
|
263
|
-
|
|
264
|
-
def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
|
|
265
|
-
self.timeout = timeout
|
|
266
|
-
self.base_url = "https://api.groq.com/openai/v1/chat/completions"
|
|
267
|
-
self.api_key = api_key
|
|
268
|
-
|
|
269
|
-
# Initialize curl_cffi Session
|
|
270
|
-
self.session = Session()
|
|
271
|
-
|
|
272
|
-
# Set up headers with API key if provided
|
|
273
|
-
self.headers = {
|
|
274
|
-
"Content-Type": "application/json",
|
|
275
|
-
}
|
|
276
|
-
|
|
277
|
-
if api_key:
|
|
278
|
-
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
279
|
-
|
|
280
|
-
# Try to use LitAgent for browser fingerprinting
|
|
281
|
-
try:
|
|
282
|
-
agent = LitAgent()
|
|
283
|
-
fingerprint = agent.generate_fingerprint(browser)
|
|
284
|
-
|
|
285
|
-
self.headers.update({
|
|
286
|
-
"Accept": fingerprint["accept"],
|
|
287
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
288
|
-
"Accept-Language": fingerprint["accept_language"],
|
|
289
|
-
"Cache-Control": "no-cache",
|
|
290
|
-
"Connection": "keep-alive",
|
|
291
|
-
"Origin": "https://console.groq.com",
|
|
292
|
-
"Pragma": "no-cache",
|
|
293
|
-
"Referer": "https://console.groq.com/",
|
|
294
|
-
"Sec-Fetch-Dest": "empty",
|
|
295
|
-
"Sec-Fetch-Mode": "cors",
|
|
296
|
-
"Sec-Fetch-Site": "same-site",
|
|
297
|
-
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
298
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
299
|
-
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
300
|
-
"User-Agent": fingerprint["user_agent"],
|
|
301
|
-
})
|
|
302
|
-
except (NameError, Exception):
|
|
303
|
-
# Fallback to basic headers if LitAgent is not available
|
|
304
|
-
self.headers.update({
|
|
305
|
-
"Accept": "application/json",
|
|
306
|
-
"Accept-Encoding": "gzip, deflate, br",
|
|
307
|
-
"Accept-Language": "en-US,en;q=0.9",
|
|
308
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
309
|
-
})
|
|
310
|
-
|
|
311
|
-
# Update session headers
|
|
312
|
-
self.session.headers.update(self.headers)
|
|
313
|
-
|
|
314
|
-
# Initialize chat interface
|
|
315
|
-
self.chat = Chat(self)
|
|
316
|
-
|
|
317
|
-
@classmethod
|
|
318
|
-
def get_models(cls, api_key: str = None):
|
|
319
|
-
"""Fetch available models from Groq API.
|
|
320
|
-
|
|
321
|
-
Args:
|
|
322
|
-
api_key (str, optional): Groq API key. If not provided, returns default models.
|
|
323
|
-
|
|
324
|
-
Returns:
|
|
325
|
-
list: List of available model IDs
|
|
326
|
-
"""
|
|
327
|
-
if not api_key:
|
|
328
|
-
return cls.AVAILABLE_MODELS
|
|
329
|
-
|
|
330
|
-
try:
|
|
331
|
-
# Use a temporary curl_cffi session for this class method
|
|
332
|
-
temp_session = Session()
|
|
333
|
-
headers = {
|
|
334
|
-
"Content-Type": "application/json",
|
|
335
|
-
"Authorization": f"Bearer {api_key}",
|
|
336
|
-
}
|
|
337
|
-
|
|
338
|
-
response = temp_session.get(
|
|
339
|
-
"https://api.groq.com/openai/v1/models",
|
|
340
|
-
headers=headers,
|
|
341
|
-
impersonate="chrome110" # Use impersonate for fetching
|
|
342
|
-
)
|
|
343
|
-
|
|
344
|
-
if response.status_code != 200:
|
|
345
|
-
return cls.AVAILABLE_MODELS
|
|
346
|
-
|
|
347
|
-
data = response.json()
|
|
348
|
-
if "data" in data and isinstance(data["data"], list):
|
|
349
|
-
return [model["id"] for model in data["data"]]
|
|
350
|
-
return cls.AVAILABLE_MODELS
|
|
351
|
-
|
|
352
|
-
except (CurlError, Exception):
|
|
353
|
-
# Fallback to default models list if fetching fails
|
|
354
|
-
return cls.AVAILABLE_MODELS
|
|
@@ -1,341 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
5
|
-
|
|
6
|
-
from webscout.litagent import LitAgent
|
|
7
|
-
from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
8
|
-
from .utils import (
|
|
9
|
-
ChatCompletion,
|
|
10
|
-
ChatCompletionChunk,
|
|
11
|
-
Choice,
|
|
12
|
-
ChatCompletionMessage,
|
|
13
|
-
ChoiceDelta,
|
|
14
|
-
CompletionUsage,
|
|
15
|
-
format_prompt
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
# ANSI escape codes for formatting
|
|
19
|
-
BOLD = "\033[1m"
|
|
20
|
-
RED = "\033[91m"
|
|
21
|
-
RESET = "\033[0m"
|
|
22
|
-
|
|
23
|
-
class Completions(BaseCompletions):
|
|
24
|
-
def __init__(self, client: 'HeckAI'):
|
|
25
|
-
self._client = client
|
|
26
|
-
|
|
27
|
-
def create(
|
|
28
|
-
self,
|
|
29
|
-
*,
|
|
30
|
-
model: str,
|
|
31
|
-
messages: List[Dict[str, str]],
|
|
32
|
-
max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
|
|
33
|
-
stream: bool = False,
|
|
34
|
-
temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
|
|
35
|
-
top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
|
|
36
|
-
**kwargs: Any # Not used by HeckAI but kept for compatibility
|
|
37
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
38
|
-
"""
|
|
39
|
-
Creates a model response for the given chat conversation.
|
|
40
|
-
Mimics openai.chat.completions.create
|
|
41
|
-
"""
|
|
42
|
-
# Format the messages using the format_prompt utility
|
|
43
|
-
# This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
|
|
44
|
-
# HeckAI works better with a properly formatted conversation
|
|
45
|
-
question = format_prompt(messages, add_special_tokens=True)
|
|
46
|
-
|
|
47
|
-
# Prepare the payload for HeckAI API
|
|
48
|
-
payload = {
|
|
49
|
-
"model": model,
|
|
50
|
-
"question": question,
|
|
51
|
-
"language": self._client.language,
|
|
52
|
-
"sessionId": self._client.session_id,
|
|
53
|
-
"previousQuestion": None, # Not needed when using format_prompt
|
|
54
|
-
"previousAnswer": None, # Not needed when using format_prompt
|
|
55
|
-
"imgUrls": []
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
59
|
-
created_time = int(time.time())
|
|
60
|
-
|
|
61
|
-
if stream:
|
|
62
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
63
|
-
else:
|
|
64
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
65
|
-
|
|
66
|
-
def _create_stream(
|
|
67
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
68
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
69
|
-
try:
|
|
70
|
-
response = self._client.session.post(
|
|
71
|
-
self._client.url,
|
|
72
|
-
headers=self._client.headers,
|
|
73
|
-
json=payload,
|
|
74
|
-
stream=True,
|
|
75
|
-
timeout=self._client.timeout
|
|
76
|
-
)
|
|
77
|
-
response.raise_for_status()
|
|
78
|
-
|
|
79
|
-
# Track token usage across chunks
|
|
80
|
-
completion_tokens = 0
|
|
81
|
-
|
|
82
|
-
streaming_text = ""
|
|
83
|
-
in_answer = False
|
|
84
|
-
|
|
85
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
86
|
-
if not line:
|
|
87
|
-
continue
|
|
88
|
-
|
|
89
|
-
# Remove "data: " prefix
|
|
90
|
-
if line.startswith("data: "):
|
|
91
|
-
data = line[6:]
|
|
92
|
-
else:
|
|
93
|
-
continue
|
|
94
|
-
|
|
95
|
-
# Check for control markers
|
|
96
|
-
if data == "[ANSWER_START]":
|
|
97
|
-
in_answer = True
|
|
98
|
-
continue
|
|
99
|
-
|
|
100
|
-
if data == "[ANSWER_DONE]":
|
|
101
|
-
in_answer = False
|
|
102
|
-
continue
|
|
103
|
-
|
|
104
|
-
if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
|
|
105
|
-
continue
|
|
106
|
-
|
|
107
|
-
# Process content if we're in an answer section
|
|
108
|
-
if in_answer:
|
|
109
|
-
streaming_text += data
|
|
110
|
-
completion_tokens += len(data) // 4 # Rough estimate
|
|
111
|
-
|
|
112
|
-
# Create a delta object for this chunk
|
|
113
|
-
delta = ChoiceDelta(content=data)
|
|
114
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
115
|
-
|
|
116
|
-
chunk = ChatCompletionChunk(
|
|
117
|
-
id=request_id,
|
|
118
|
-
choices=[choice],
|
|
119
|
-
created=created_time,
|
|
120
|
-
model=model,
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
yield chunk
|
|
124
|
-
|
|
125
|
-
# Store the response for future context
|
|
126
|
-
# We don't need to store previous_question/answer as we're using format_prompt
|
|
127
|
-
# which handles the conversation formatting
|
|
128
|
-
|
|
129
|
-
# Final chunk with finish_reason
|
|
130
|
-
delta = ChoiceDelta(content=None)
|
|
131
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
132
|
-
|
|
133
|
-
chunk = ChatCompletionChunk(
|
|
134
|
-
id=request_id,
|
|
135
|
-
choices=[choice],
|
|
136
|
-
created=created_time,
|
|
137
|
-
model=model,
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
yield chunk
|
|
141
|
-
|
|
142
|
-
except requests.exceptions.RequestException as e:
|
|
143
|
-
print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
|
|
144
|
-
raise IOError(f"HeckAI request failed: {e}") from e
|
|
145
|
-
|
|
146
|
-
def _create_non_stream(
|
|
147
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
148
|
-
) -> ChatCompletion:
|
|
149
|
-
try:
|
|
150
|
-
full_text = ""
|
|
151
|
-
streaming_text = ""
|
|
152
|
-
in_answer = False
|
|
153
|
-
|
|
154
|
-
response = self._client.session.post(
|
|
155
|
-
self._client.url,
|
|
156
|
-
headers=self._client.headers,
|
|
157
|
-
json=payload,
|
|
158
|
-
stream=True,
|
|
159
|
-
timeout=self._client.timeout
|
|
160
|
-
)
|
|
161
|
-
response.raise_for_status()
|
|
162
|
-
|
|
163
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
164
|
-
if not line:
|
|
165
|
-
continue
|
|
166
|
-
|
|
167
|
-
# Remove "data: " prefix
|
|
168
|
-
if line.startswith("data: "):
|
|
169
|
-
data = line[6:]
|
|
170
|
-
else:
|
|
171
|
-
continue
|
|
172
|
-
|
|
173
|
-
# Check for control markers
|
|
174
|
-
if data == "[ANSWER_START]":
|
|
175
|
-
in_answer = True
|
|
176
|
-
continue
|
|
177
|
-
|
|
178
|
-
if data == "[ANSWER_DONE]":
|
|
179
|
-
in_answer = False
|
|
180
|
-
continue
|
|
181
|
-
|
|
182
|
-
if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
|
|
183
|
-
continue
|
|
184
|
-
|
|
185
|
-
# Process content if we're in an answer section
|
|
186
|
-
if in_answer:
|
|
187
|
-
streaming_text += data
|
|
188
|
-
|
|
189
|
-
full_text = streaming_text
|
|
190
|
-
|
|
191
|
-
# Store the response for future context
|
|
192
|
-
# We don't need to store previous_question/answer as we're using format_prompt
|
|
193
|
-
# which handles the conversation formatting
|
|
194
|
-
|
|
195
|
-
# Create usage statistics (estimated)
|
|
196
|
-
prompt_tokens = len(payload["question"]) // 4
|
|
197
|
-
completion_tokens = len(full_text) // 4
|
|
198
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
199
|
-
|
|
200
|
-
usage = CompletionUsage(
|
|
201
|
-
prompt_tokens=prompt_tokens,
|
|
202
|
-
completion_tokens=completion_tokens,
|
|
203
|
-
total_tokens=total_tokens
|
|
204
|
-
)
|
|
205
|
-
|
|
206
|
-
# Create the message object
|
|
207
|
-
message = ChatCompletionMessage(
|
|
208
|
-
role="assistant",
|
|
209
|
-
content=full_text
|
|
210
|
-
)
|
|
211
|
-
|
|
212
|
-
# Create the choice object
|
|
213
|
-
choice = Choice(
|
|
214
|
-
index=0,
|
|
215
|
-
message=message,
|
|
216
|
-
finish_reason="stop"
|
|
217
|
-
)
|
|
218
|
-
|
|
219
|
-
# Create the completion object
|
|
220
|
-
completion = ChatCompletion(
|
|
221
|
-
id=request_id,
|
|
222
|
-
choices=[choice],
|
|
223
|
-
created=created_time,
|
|
224
|
-
model=model,
|
|
225
|
-
usage=usage,
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
return completion
|
|
229
|
-
|
|
230
|
-
except Exception as e:
|
|
231
|
-
print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
|
|
232
|
-
raise IOError(f"HeckAI request failed: {e}") from e
|
|
233
|
-
|
|
234
|
-
class Chat(BaseChat):
|
|
235
|
-
def __init__(self, client: 'HeckAI'):
|
|
236
|
-
self.completions = Completions(client)
|
|
237
|
-
|
|
238
|
-
class HeckAI(OpenAICompatibleProvider):
|
|
239
|
-
"""
|
|
240
|
-
OpenAI-compatible client for HeckAI API.
|
|
241
|
-
|
|
242
|
-
Usage:
|
|
243
|
-
client = HeckAI()
|
|
244
|
-
response = client.chat.completions.create(
|
|
245
|
-
model="google/gemini-2.0-flash-001",
|
|
246
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
247
|
-
)
|
|
248
|
-
print(response.choices[0].message.content)
|
|
249
|
-
"""
|
|
250
|
-
|
|
251
|
-
AVAILABLE_MODELS = [
|
|
252
|
-
"google/gemini-2.0-flash-001",
|
|
253
|
-
"deepseek/deepseek-chat",
|
|
254
|
-
"deepseek/deepseek-r1",
|
|
255
|
-
"openai/gpt-4o-mini",
|
|
256
|
-
"openai/gpt-4.1-mini",
|
|
257
|
-
"x-ai/grok-3-mini-beta",
|
|
258
|
-
"meta-llama/llama-4-scout"
|
|
259
|
-
|
|
260
|
-
]
|
|
261
|
-
|
|
262
|
-
def __init__(
|
|
263
|
-
self,
|
|
264
|
-
timeout: int = 30,
|
|
265
|
-
language: str = "English"
|
|
266
|
-
):
|
|
267
|
-
"""
|
|
268
|
-
Initialize the HeckAI client.
|
|
269
|
-
|
|
270
|
-
Args:
|
|
271
|
-
timeout: Request timeout in seconds.
|
|
272
|
-
language: Language for responses.
|
|
273
|
-
"""
|
|
274
|
-
self.timeout = timeout
|
|
275
|
-
self.language = language
|
|
276
|
-
self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
|
|
277
|
-
self.session_id = str(uuid.uuid4())
|
|
278
|
-
|
|
279
|
-
# Use LitAgent for user-agent
|
|
280
|
-
agent = LitAgent()
|
|
281
|
-
self.headers = {
|
|
282
|
-
'User-Agent': agent.random(),
|
|
283
|
-
'Content-Type': 'application/json',
|
|
284
|
-
'Origin': 'https://heck.ai',
|
|
285
|
-
'Referer': 'https://heck.ai/',
|
|
286
|
-
'Connection': 'keep-alive'
|
|
287
|
-
}
|
|
288
|
-
|
|
289
|
-
self.session = requests.Session()
|
|
290
|
-
self.session.headers.update(self.headers)
|
|
291
|
-
|
|
292
|
-
# Initialize the chat interface
|
|
293
|
-
self.chat = Chat(self)
|
|
294
|
-
|
|
295
|
-
def convert_model_name(self, model: str) -> str:
|
|
296
|
-
"""
|
|
297
|
-
Ensure the model name is in the correct format.
|
|
298
|
-
"""
|
|
299
|
-
if model in self.AVAILABLE_MODELS:
|
|
300
|
-
return model
|
|
301
|
-
|
|
302
|
-
# Try to find a matching model
|
|
303
|
-
for available_model in self.AVAILABLE_MODELS:
|
|
304
|
-
if model.lower() in available_model.lower():
|
|
305
|
-
return available_model
|
|
306
|
-
|
|
307
|
-
# Default to gemini if no match
|
|
308
|
-
print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
|
|
309
|
-
return "google/gemini-2.0-flash-001"
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
# Simple test if run directly
|
|
313
|
-
if __name__ == "__main__":
|
|
314
|
-
print("-" * 80)
|
|
315
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
316
|
-
print("-" * 80)
|
|
317
|
-
|
|
318
|
-
for model in HeckAI.AVAILABLE_MODELS:
|
|
319
|
-
try:
|
|
320
|
-
client = HeckAI(timeout=60)
|
|
321
|
-
# Test with a simple conversation to demonstrate format_prompt usage
|
|
322
|
-
response = client.chat.completions.create(
|
|
323
|
-
model=model,
|
|
324
|
-
messages=[
|
|
325
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
|
326
|
-
{"role": "user", "content": "Say 'Hello' in one word"},
|
|
327
|
-
],
|
|
328
|
-
stream=False
|
|
329
|
-
)
|
|
330
|
-
|
|
331
|
-
if response and response.choices and response.choices[0].message.content:
|
|
332
|
-
status = "✓"
|
|
333
|
-
# Truncate response if too long
|
|
334
|
-
display_text = response.choices[0].message.content.strip()
|
|
335
|
-
display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
|
|
336
|
-
else:
|
|
337
|
-
status = "✗"
|
|
338
|
-
display_text = "Empty or invalid response"
|
|
339
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
340
|
-
except Exception as e:
|
|
341
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|