webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +13 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +4 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +6 -8
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +52 -57
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -56
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +12 -6
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +9 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -14
- webscout/Provider/OPENAI/toolbaz.py +14 -10
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +18 -11
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -15
- webscout/Provider/TogetherAI.py +136 -142
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -174
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +194 -38
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -11
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/copilot.py +0 -305
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -422
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
|
@@ -14,33 +14,23 @@ class TextPollinationsAI(Provider):
|
|
|
14
14
|
A class to interact with the Pollinations AI API.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
+
required_auth = False
|
|
17
18
|
AVAILABLE_MODELS = [
|
|
18
|
-
"deepseek",
|
|
19
19
|
"deepseek-reasoning",
|
|
20
|
-
"
|
|
21
|
-
"grok",
|
|
22
|
-
"llama-fast-roblox",
|
|
23
|
-
"llama-roblox",
|
|
24
|
-
"llamascout",
|
|
20
|
+
"gemini",
|
|
25
21
|
"mistral",
|
|
26
|
-
"
|
|
27
|
-
"mistral-roblox",
|
|
22
|
+
"nova-fast",
|
|
28
23
|
"openai",
|
|
29
24
|
"openai-audio",
|
|
30
25
|
"openai-fast",
|
|
31
|
-
"openai-large",
|
|
32
26
|
"openai-reasoning",
|
|
33
|
-
"openai-roblox",
|
|
34
|
-
"phi",
|
|
35
27
|
"qwen-coder",
|
|
28
|
+
"roblox-rp",
|
|
36
29
|
"bidara",
|
|
37
|
-
"elixposearch",
|
|
38
30
|
"evil",
|
|
39
|
-
"hypnosis-tracy",
|
|
40
31
|
"midijourney",
|
|
41
32
|
"mirexa",
|
|
42
33
|
"rtist",
|
|
43
|
-
"sur",
|
|
44
34
|
"unity",
|
|
45
35
|
]
|
|
46
36
|
_models_url = "https://text.pollinations.ai/models"
|
|
@@ -318,4 +308,4 @@ if __name__ == "__main__":
|
|
|
318
308
|
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
319
309
|
|
|
320
310
|
except Exception as e:
|
|
321
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
311
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/TogetherAI.py
CHANGED
|
@@ -7,82 +7,126 @@ from webscout.AIutel import Conversation
|
|
|
7
7
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent
|
|
11
10
|
|
|
12
11
|
class TogetherAI(Provider):
|
|
13
12
|
"""
|
|
14
13
|
A class to interact with the TogetherAI API.
|
|
15
14
|
"""
|
|
16
15
|
|
|
16
|
+
required_auth = True
|
|
17
|
+
|
|
18
|
+
# Default models list (will be updated dynamically)
|
|
17
19
|
AVAILABLE_MODELS = [
|
|
18
|
-
"
|
|
19
|
-
"
|
|
20
|
-
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
21
|
-
"meta-llama/Llama-3-8b-chat-hf",
|
|
22
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
23
|
-
"togethercomputer/MoA-1-Turbo",
|
|
24
|
-
"eddiehou/meta-llama/Llama-3.1-405B",
|
|
25
|
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
26
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
27
|
-
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
28
|
-
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
|
29
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
30
|
-
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
31
|
-
"arcee-ai/AFM-4.5B-Preview",
|
|
32
|
-
"lgai/exaone-3-5-32b-instruct",
|
|
33
|
-
"meta-llama/Llama-3-70b-chat-hf",
|
|
34
|
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
35
|
-
"google/gemma-2-27b-it",
|
|
20
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
21
|
+
"Qwen/QwQ-32B",
|
|
36
22
|
"Qwen/Qwen2-72B-Instruct",
|
|
37
|
-
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
38
23
|
"Qwen/Qwen2-VL-72B-Instruct",
|
|
39
|
-
"
|
|
40
|
-
"
|
|
41
|
-
"perplexity-ai/r1-1776",
|
|
42
|
-
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
43
|
-
"arcee-ai/maestro-reasoning",
|
|
44
|
-
"togethercomputer/Refuel-Llm-V2-Small",
|
|
24
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
25
|
+
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
45
26
|
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
27
|
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
28
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
|
|
29
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
30
|
+
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
31
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
|
32
|
+
"Salesforce/Llama-Rank-V1",
|
|
33
|
+
"Virtue-AI/VirtueGuard-Text-Lite",
|
|
34
|
+
"arcee-ai/AFM-4.5B",
|
|
46
35
|
"arcee-ai/coder-large",
|
|
47
|
-
"
|
|
36
|
+
"arcee-ai/maestro-reasoning",
|
|
37
|
+
"arcee-ai/virtuoso-large",
|
|
48
38
|
"arcee_ai/arcee-spotlight",
|
|
39
|
+
"blackbox/meta-llama-3-1-8b",
|
|
40
|
+
"deepcogito/cogito-v2-preview-deepseek-671b",
|
|
41
|
+
"deepseek-ai/DeepSeek-R1",
|
|
49
42
|
"deepseek-ai/DeepSeek-R1-0528-tput",
|
|
50
|
-
"marin-community/marin-8b-instruct",
|
|
51
|
-
"lgai/exaone-deep-32b",
|
|
52
|
-
"google/gemma-3-27b-it",
|
|
53
43
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
54
|
-
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
55
|
-
"mistralai/Mistral-7B-Instruct-v0.1",
|
|
56
|
-
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
57
|
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
58
44
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
59
|
-
"
|
|
60
|
-
"
|
|
61
|
-
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
62
|
-
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
|
63
|
-
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
64
|
-
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
65
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
45
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
46
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
66
47
|
"deepseek-ai/DeepSeek-V3",
|
|
67
|
-
"
|
|
68
|
-
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
69
|
-
"Qwen/Qwen3-32B-FP8",
|
|
70
|
-
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
71
|
-
"arcee-ai/virtuoso-large",
|
|
48
|
+
"google/gemma-2-27b-it",
|
|
72
49
|
"google/gemma-3n-E4B-it",
|
|
73
|
-
"
|
|
50
|
+
"lgai/exaone-3-5-32b-instruct",
|
|
51
|
+
"lgai/exaone-deep-32b",
|
|
52
|
+
"marin-community/marin-8b-instruct",
|
|
53
|
+
"meta-llama/Llama-2-70b-hf",
|
|
54
|
+
"meta-llama/Llama-3-70b-chat-hf",
|
|
55
|
+
"meta-llama/Llama-3-8b-chat-hf",
|
|
56
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
57
|
+
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
58
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
59
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
60
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
61
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
62
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
63
|
+
"meta-llama/Llama-Vision-Free",
|
|
64
|
+
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
|
65
|
+
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
|
66
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
67
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
74
68
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"
|
|
78
|
-
"
|
|
79
|
-
"
|
|
80
|
-
"
|
|
81
|
-
"
|
|
82
|
-
"
|
|
83
|
-
"
|
|
69
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
|
70
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
71
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
72
|
+
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
73
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
74
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
75
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
76
|
+
"perplexity-ai/r1-1776",
|
|
77
|
+
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
78
|
+
"scb10x/scb10x-typhoon-2-1-gemma3-12b",
|
|
79
|
+
"togethercomputer/Refuel-Llm-V2-Small",
|
|
80
|
+
"zai-org/GLM-4.5-Air-FP8"
|
|
84
81
|
]
|
|
85
82
|
|
|
83
|
+
@classmethod
|
|
84
|
+
def get_models(cls, api_key: str = None):
|
|
85
|
+
"""Fetch available models from TogetherAI API.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
api_key (str, optional): TogetherAI API key. If not provided, returns default models.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
list: List of available model IDs
|
|
92
|
+
"""
|
|
93
|
+
if not api_key:
|
|
94
|
+
return cls.AVAILABLE_MODELS
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
# Use a temporary curl_cffi session for this class method
|
|
98
|
+
temp_session = Session()
|
|
99
|
+
headers = {
|
|
100
|
+
"Content-Type": "application/json",
|
|
101
|
+
"Authorization": f"Bearer {api_key}",
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
response = temp_session.get(
|
|
105
|
+
"https://api.together.xyz/v1/models",
|
|
106
|
+
headers=headers,
|
|
107
|
+
impersonate="chrome110"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if response.status_code != 200:
|
|
111
|
+
return cls.AVAILABLE_MODELS
|
|
112
|
+
|
|
113
|
+
data = response.json()
|
|
114
|
+
if "data" in data and isinstance(data["data"], list):
|
|
115
|
+
return [model["id"] for model in data["data"]]
|
|
116
|
+
return cls.AVAILABLE_MODELS
|
|
117
|
+
|
|
118
|
+
except (CurlError, Exception):
|
|
119
|
+
# Fallback to default models list if fetching fails
|
|
120
|
+
return cls.AVAILABLE_MODELS
|
|
121
|
+
|
|
122
|
+
def update_available_models(self, api_key: str):
|
|
123
|
+
"""Update available models by fetching from TogetherAI API.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
api_key (str): TogetherAI API key for fetching models.
|
|
127
|
+
"""
|
|
128
|
+
self.AVAILABLE_MODELS = self.get_models(api_key)
|
|
129
|
+
|
|
86
130
|
@staticmethod
|
|
87
131
|
def _togetherai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
88
132
|
"""Extracts content from TogetherAI stream JSON objects."""
|
|
@@ -92,6 +136,7 @@ class TogetherAI(Provider):
|
|
|
92
136
|
|
|
93
137
|
def __init__(
|
|
94
138
|
self,
|
|
139
|
+
api_key: str,
|
|
95
140
|
is_conversation: bool = True,
|
|
96
141
|
max_tokens: int = 2049,
|
|
97
142
|
timeout: int = 30,
|
|
@@ -103,45 +148,48 @@ class TogetherAI(Provider):
|
|
|
103
148
|
act: str = None,
|
|
104
149
|
model: str = "meta-llama/Llama-3.1-8B-Instruct-Turbo",
|
|
105
150
|
system_prompt: str = "You are a helpful assistant.",
|
|
106
|
-
browser: str = "chrome"
|
|
107
151
|
):
|
|
108
|
-
"""Initializes the TogetherAI API client.
|
|
152
|
+
"""Initializes the TogetherAI API client.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
api_key (str): TogetherAI API key.
|
|
156
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
157
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 2049.
|
|
158
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
159
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
160
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
161
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
162
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
163
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
164
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
165
|
+
model (str, optional): LLM model name. Defaults to "meta-llama/Llama-3.1-8B-Instruct-Turbo".
|
|
166
|
+
system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
|
|
167
|
+
"""
|
|
168
|
+
# Update available models from API
|
|
169
|
+
self.update_available_models(api_key)
|
|
170
|
+
|
|
171
|
+
# Validate model after updating available models
|
|
109
172
|
if model not in self.AVAILABLE_MODELS:
|
|
110
173
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
111
174
|
|
|
112
175
|
self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
|
|
113
|
-
self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
|
|
114
|
-
|
|
115
|
-
# Initialize LitAgent
|
|
116
|
-
self.agent = LitAgent()
|
|
117
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
118
|
-
|
|
119
|
-
# Use the fingerprint for headers
|
|
120
|
-
self.headers = {
|
|
121
|
-
"Accept": self.fingerprint["accept"],
|
|
122
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
123
|
-
"Content-Type": "application/json",
|
|
124
|
-
"Cache-Control": "no-cache",
|
|
125
|
-
"Origin": "https://www.codegeneration.ai",
|
|
126
|
-
"Pragma": "no-cache",
|
|
127
|
-
"Referer": "https://www.codegeneration.ai/",
|
|
128
|
-
"Sec-Fetch-Dest": "empty",
|
|
129
|
-
"Sec-Fetch-Mode": "cors",
|
|
130
|
-
"Sec-Fetch-Site": "same-site",
|
|
131
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
# Initialize curl_cffi Session
|
|
135
176
|
self.session = Session()
|
|
136
|
-
self.session.headers.update(self.headers)
|
|
137
|
-
self.session.proxies = proxies
|
|
138
|
-
self.system_prompt = system_prompt
|
|
139
177
|
self.is_conversation = is_conversation
|
|
140
178
|
self.max_tokens_to_sample = max_tokens
|
|
179
|
+
self.api_key = api_key
|
|
180
|
+
self.model = model
|
|
141
181
|
self.timeout = timeout
|
|
142
182
|
self.last_response = {}
|
|
143
|
-
self.
|
|
144
|
-
|
|
183
|
+
self.system_prompt = system_prompt
|
|
184
|
+
|
|
185
|
+
self.headers = {
|
|
186
|
+
"Content-Type": "application/json",
|
|
187
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
# Update curl_cffi session headers
|
|
191
|
+
self.session.headers.update(self.headers)
|
|
192
|
+
self.session.proxies = proxies
|
|
145
193
|
|
|
146
194
|
self.__available_optimizers = (
|
|
147
195
|
method
|
|
@@ -161,45 +209,7 @@ class TogetherAI(Provider):
|
|
|
161
209
|
)
|
|
162
210
|
self.conversation.history_offset = history_offset
|
|
163
211
|
|
|
164
|
-
def refresh_identity(self, browser: str = None):
|
|
165
|
-
"""
|
|
166
|
-
Refreshes the browser identity fingerprint.
|
|
167
|
-
|
|
168
|
-
Args:
|
|
169
|
-
browser: Specific browser to use for the new fingerprint
|
|
170
|
-
"""
|
|
171
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
172
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
173
|
-
|
|
174
|
-
# Update headers with new fingerprint
|
|
175
|
-
self.headers.update({
|
|
176
|
-
"Accept": self.fingerprint["accept"],
|
|
177
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
178
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
179
|
-
})
|
|
180
|
-
|
|
181
|
-
# Update session headers
|
|
182
|
-
self.session.headers.update(self.headers)
|
|
183
|
-
|
|
184
|
-
return self.fingerprint
|
|
185
|
-
|
|
186
|
-
def get_activation_key(self) -> str:
|
|
187
|
-
"""Get API key from activation endpoint"""
|
|
188
|
-
if self._api_key_cache:
|
|
189
|
-
return self._api_key_cache
|
|
190
212
|
|
|
191
|
-
try:
|
|
192
|
-
response = self.session.get(
|
|
193
|
-
self.activation_endpoint,
|
|
194
|
-
headers={"Accept": "application/json"},
|
|
195
|
-
timeout=30
|
|
196
|
-
)
|
|
197
|
-
response.raise_for_status()
|
|
198
|
-
activation_data = response.json()
|
|
199
|
-
self._api_key_cache = activation_data["openAIParams"]["apiKey"]
|
|
200
|
-
return self._api_key_cache
|
|
201
|
-
except Exception as e:
|
|
202
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get activation key: {e}")
|
|
203
213
|
|
|
204
214
|
def ask(
|
|
205
215
|
self,
|
|
@@ -220,10 +230,7 @@ class TogetherAI(Provider):
|
|
|
220
230
|
)
|
|
221
231
|
else:
|
|
222
232
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
223
|
-
|
|
224
|
-
api_key = self.get_activation_key()
|
|
225
|
-
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
226
|
-
self.session.headers.update(self.headers)
|
|
233
|
+
|
|
227
234
|
payload = {
|
|
228
235
|
"model": self.model,
|
|
229
236
|
"messages": [
|
|
@@ -344,20 +351,7 @@ if __name__ == "__main__":
|
|
|
344
351
|
|
|
345
352
|
for model in TogetherAI.AVAILABLE_MODELS:
|
|
346
353
|
try:
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
response_text = ""
|
|
350
|
-
for chunk in response:
|
|
351
|
-
response_text += chunk
|
|
352
|
-
|
|
353
|
-
if response_text and len(response_text.strip()) > 0:
|
|
354
|
-
status = "✓"
|
|
355
|
-
# Clean and truncate response
|
|
356
|
-
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
357
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
358
|
-
else:
|
|
359
|
-
status = "✗"
|
|
360
|
-
display_text = "Empty or invalid response"
|
|
361
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
354
|
+
# Skip testing if no API key is provided
|
|
355
|
+
print(f"\r{model:<50} {'⚠':<10} Requires API key for testing")
|
|
362
356
|
except Exception as e:
|
|
363
357
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|