euriai 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- euriai/__init__.py +1 -0
- euriai/cli.py +66 -0
- euriai/client.py +115 -0
- euriai-0.2.0.dist-info/METADATA +24 -0
- euriai-0.2.0.dist-info/RECORD +8 -0
- euriai-0.2.0.dist-info/WHEEL +5 -0
- euriai-0.2.0.dist-info/entry_points.txt +2 -0
- euriai-0.2.0.dist-info/top_level.txt +1 -0
euriai/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
from .client import EuriaiClient
|
euriai/cli.py
ADDED
@@ -0,0 +1,66 @@
|
|
1
|
+
import argparse
|
2
|
+
from euriai import EuriaiClient
|
3
|
+
|
4
|
+
def show_model_help():
|
5
|
+
print("\nš Available Models & Recommendations:\n")
|
6
|
+
print(f"{'Provider':<10} {'Model Name':<30} {'ID':<40} {'Best For'}")
|
7
|
+
print("-" * 110)
|
8
|
+
models = [
|
9
|
+
("OpenAI", "GPT 4.1 Nano", "gpt-4.1-nano", "Fast replies, chatbots"),
|
10
|
+
("OpenAI", "GPT 4.1 Mini", "gpt-4.1-mini", "Smarter gen, code"),
|
11
|
+
("Google", "Gemini 2.5 Pro Exp", "gemini-2.5-pro-exp-03-25", "Complex tasks, code, LLM agents"),
|
12
|
+
("Google", "Gemini 2.0 Flash", "gemini-2.0-flash-001", "Summarization, short Q&A"),
|
13
|
+
("Meta", "Llama 4 Scout", "llama-4-scout-17b-16e-instruct", "Light assistant, ideas"),
|
14
|
+
("Meta", "Llama 4 Maverick", "llama-4-maverick-17b-128e-instruct", "Heavy reasoning, long answers"),
|
15
|
+
("Meta", "Llama 3.3 70B", "llama-3.3-70b-versatile", "Balanced all-round use"),
|
16
|
+
("DeepSeek", "Deepseek R1 Distilled 70B", "deepseek-r1-distill-llama-70b", "Creative, brainstorming"),
|
17
|
+
("Qwen", "Qwen QwQ 32B", "qwen-qwq-32b", "Multilingual, logic"),
|
18
|
+
("Mistral", "Mistral Saba 24B", "mistral-saba-24b", "Summarization, code"),
|
19
|
+
]
|
20
|
+
for provider, name, model_id, task in models:
|
21
|
+
print(f"{provider:<10} {name:<30} {model_id:<40} {task}")
|
22
|
+
|
23
|
+
print("\nš”ļø Suggested Temperatures:")
|
24
|
+
print("- 0.2 ā 0.4: Deterministic (facts, code)")
|
25
|
+
print("- 0.5 ā 0.7: Balanced (Q&A, general content) [Default: 0.7]")
|
26
|
+
print("- 0.8 ā 1.0: Creative (poems, storytelling)")
|
27
|
+
|
28
|
+
print("\nš¢ Suggested Max Tokens:")
|
29
|
+
print("- 100ā300: Short answers / classification")
|
30
|
+
print("- 300ā600: Summarization / Q&A")
|
31
|
+
print("- 800ā2000: Long-form content")
|
32
|
+
|
33
|
+
print("\nš” Use:")
|
34
|
+
print("euriai --api_key <KEY> --prompt 'Hello AI' --model gpt-4.1-nano --temperature 0.7\n")
|
35
|
+
|
36
|
+
def main():
|
37
|
+
parser = argparse.ArgumentParser(description="Run euriai client")
|
38
|
+
parser.add_argument("--api_key", help="Your EURI API Key")
|
39
|
+
parser.add_argument("--prompt", help="Prompt to send to the model")
|
40
|
+
parser.add_argument("--model", default="gpt-4.1-nano", help="Model ID to use")
|
41
|
+
parser.add_argument("--temperature", type=float, default=0.7, help="Sampling temperature")
|
42
|
+
parser.add_argument("--max_tokens", type=int, default=500, help="Max number of tokens")
|
43
|
+
parser.add_argument("--stream", action="store_true", help="Enable streaming output")
|
44
|
+
parser.add_argument("--models", action="store_true", help="Show all model IDs and suggestions")
|
45
|
+
|
46
|
+
args = parser.parse_args()
|
47
|
+
|
48
|
+
if args.models:
|
49
|
+
show_model_help()
|
50
|
+
return
|
51
|
+
|
52
|
+
if not args.api_key or not args.prompt:
|
53
|
+
parser.error("--api_key and --prompt are required unless using --models")
|
54
|
+
|
55
|
+
client = EuriaiClient(api_key=args.api_key, model=args.model)
|
56
|
+
|
57
|
+
if args.stream:
|
58
|
+
print("š Streaming response:\n")
|
59
|
+
for chunk in client.stream_completion(args.prompt, temperature=args.temperature, max_tokens=args.max_tokens):
|
60
|
+
print(chunk, end='', flush=True)
|
61
|
+
else:
|
62
|
+
result = client.generate_completion(args.prompt, temperature=args.temperature, max_tokens=args.max_tokens)
|
63
|
+
print("ā
Response:\n", result)
|
64
|
+
|
65
|
+
if __name__ == "__main__":
|
66
|
+
main()
|
euriai/client.py
ADDED
@@ -0,0 +1,115 @@
|
|
1
|
+
import requests
|
2
|
+
from typing import Optional, Dict, Any
|
3
|
+
|
4
|
+
class EuriaiClient:
|
5
|
+
def __init__(
|
6
|
+
self,
|
7
|
+
api_key: str,
|
8
|
+
model: str = "gpt-4.1-nano",
|
9
|
+
endpoint: str = "https://api.euron.one/api/v1/euri/alpha/chat/completions"
|
10
|
+
):
|
11
|
+
"""
|
12
|
+
Initializes the EuriaiClient.
|
13
|
+
|
14
|
+
Args:
|
15
|
+
api_key (str): Your EURI API key.
|
16
|
+
model (str, optional): Model ID to use (e.g., 'gpt-4.1-nano', 'gemini-2.0-flash-001').
|
17
|
+
endpoint (str, optional): API endpoint URL.
|
18
|
+
"""
|
19
|
+
self.api_key = api_key
|
20
|
+
self.model = model
|
21
|
+
self.endpoint = endpoint
|
22
|
+
|
23
|
+
def generate_completion(
|
24
|
+
self,
|
25
|
+
prompt: str,
|
26
|
+
temperature: float = 0.7,
|
27
|
+
max_tokens: int = 500,
|
28
|
+
top_p: Optional[float] = None,
|
29
|
+
frequency_penalty: Optional[float] = None,
|
30
|
+
presence_penalty: Optional[float] = None,
|
31
|
+
stop: Optional[list[str]] = None,
|
32
|
+
) -> Dict[str, Any]:
|
33
|
+
"""
|
34
|
+
Generates a non-streamed completion from the model.
|
35
|
+
|
36
|
+
Args:
|
37
|
+
prompt (str): The user's prompt to send to the model.
|
38
|
+
temperature (float, optional): Sampling temperature (0.2ā1.0). Defaults to 0.7.
|
39
|
+
max_tokens (int, optional): Maximum number of output tokens. Defaults to 500.
|
40
|
+
top_p (float, optional): Nucleus sampling value.
|
41
|
+
frequency_penalty (float, optional): Penalizes repetition.
|
42
|
+
presence_penalty (float, optional): Encourages new topic generation.
|
43
|
+
stop (list of str, optional): Stop sequences to end generation.
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
dict: JSON response from the API.
|
47
|
+
"""
|
48
|
+
headers = {
|
49
|
+
"Content-Type": "application/json",
|
50
|
+
"Authorization": f"Bearer {self.api_key}"
|
51
|
+
}
|
52
|
+
|
53
|
+
payload: Dict[str, Any] = {
|
54
|
+
"model": self.model,
|
55
|
+
"messages": [{"role": "user", "content": prompt}],
|
56
|
+
"temperature": temperature,
|
57
|
+
"max_tokens": max_tokens,
|
58
|
+
}
|
59
|
+
|
60
|
+
if top_p is not None:
|
61
|
+
payload["top_p"] = top_p
|
62
|
+
if frequency_penalty is not None:
|
63
|
+
payload["frequency_penalty"] = frequency_penalty
|
64
|
+
if presence_penalty is not None:
|
65
|
+
payload["presence_penalty"] = presence_penalty
|
66
|
+
if stop is not None:
|
67
|
+
payload["stop"] = stop
|
68
|
+
|
69
|
+
response = requests.post(self.endpoint, headers=headers, json=payload)
|
70
|
+
response.raise_for_status()
|
71
|
+
return response.json()
|
72
|
+
|
73
|
+
def stream_completion(
|
74
|
+
self,
|
75
|
+
prompt: str,
|
76
|
+
temperature: float = 0.7,
|
77
|
+
max_tokens: int = 500,
|
78
|
+
top_p: Optional[float] = None,
|
79
|
+
frequency_penalty: Optional[float] = None,
|
80
|
+
presence_penalty: Optional[float] = None,
|
81
|
+
stop: Optional[list[str]] = None,
|
82
|
+
):
|
83
|
+
"""
|
84
|
+
Streams a response token-by-token (if the model supports streaming).
|
85
|
+
|
86
|
+
Yields:
|
87
|
+
str: Each chunk of the streamed output.
|
88
|
+
"""
|
89
|
+
headers = {
|
90
|
+
"Content-Type": "application/json",
|
91
|
+
"Authorization": f"Bearer {self.api_key}"
|
92
|
+
}
|
93
|
+
|
94
|
+
payload: Dict[str, Any] = {
|
95
|
+
"model": self.model,
|
96
|
+
"messages": [{"role": "user", "content": prompt}],
|
97
|
+
"temperature": temperature,
|
98
|
+
"max_tokens": max_tokens,
|
99
|
+
"stream": True
|
100
|
+
}
|
101
|
+
|
102
|
+
if top_p is not None:
|
103
|
+
payload["top_p"] = top_p
|
104
|
+
if frequency_penalty is not None:
|
105
|
+
payload["frequency_penalty"] = frequency_penalty
|
106
|
+
if presence_penalty is not None:
|
107
|
+
payload["presence_penalty"] = presence_penalty
|
108
|
+
if stop is not None:
|
109
|
+
payload["stop"] = stop
|
110
|
+
|
111
|
+
with requests.post(self.endpoint, headers=headers, json=payload, stream=True) as response:
|
112
|
+
response.raise_for_status()
|
113
|
+
for line in response.iter_lines():
|
114
|
+
if line:
|
115
|
+
yield line.decode("utf-8")
|
@@ -0,0 +1,24 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: euriai
|
3
|
+
Version: 0.2.0
|
4
|
+
Summary: Python client for EURI LLM API (euron.one) with CLI and interactive wizard
|
5
|
+
Author: euron.one
|
6
|
+
Author-email: sudhanshu@euron.one
|
7
|
+
License: MIT
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
9
|
+
Classifier: Operating System :: OS Independent
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
12
|
+
Classifier: Intended Audience :: Developers
|
13
|
+
Requires-Python: >=3.6
|
14
|
+
Description-Content-Type: text/markdown
|
15
|
+
Requires-Dist: requests
|
16
|
+
|
17
|
+
# euriai š§
|
18
|
+
|
19
|
+
EURI AI Python Client ā Simple wrapper and CLI tool for the Euron LLM API.
|
20
|
+
|
21
|
+
## š§ Install
|
22
|
+
|
23
|
+
```bash
|
24
|
+
pip install euriai
|
@@ -0,0 +1,8 @@
|
|
1
|
+
euriai/__init__.py,sha256=jyfCq3Mc-PrsIK2SGU49o7ATy834G74oDfiKJsYa-A4,34
|
2
|
+
euriai/cli.py,sha256=hF1wiiL2QQSfWf8WlLQyNVDBd4YkbiwmMSoPxVbyPTM,3290
|
3
|
+
euriai/client.py,sha256=USiqdMULgAiky7nkrJKF3FyKcOS2DtDmUdbeBSnyLYk,4076
|
4
|
+
euriai-0.2.0.dist-info/METADATA,sha256=FYbi-QQCfsjFujbtFdT1a_tKK2sroPAPBOJLQB0qGV4,681
|
5
|
+
euriai-0.2.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
6
|
+
euriai-0.2.0.dist-info/entry_points.txt,sha256=9OkET8KIGcsjQn8UlnpPKRT75s2KW34jq1__1SXtpMA,43
|
7
|
+
euriai-0.2.0.dist-info/top_level.txt,sha256=TG1htJ8cuD62MXn-NJ7DVF21QHY16w6M_QgfF_Er_EQ,7
|
8
|
+
euriai-0.2.0.dist-info/RECORD,,
|
@@ -0,0 +1 @@
|
|
1
|
+
euriai
|