euriai 0.3.12__tar.gz → 0.3.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: euriai
3
- Version: 0.3.12
3
+ Version: 0.3.14
4
4
  Summary: Python client for EURI LLM API (euron.one) with CLI, LangChain, and LlamaIndex integration
5
5
  Author: euron.one
6
6
  Author-email: sudhanshu@euron.one
@@ -0,0 +1,64 @@
1
+ import requests
2
+ from typing import List
3
+ from dataclasses import dataclass, field
4
+ from llama_index.core.llms import LLM
5
+ from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen
6
+
7
+
8
+ @dataclass
9
+ class EuriaiLlamaIndexLLM(LLM):
10
+ api_key: str
11
+ model: str = "gpt-4.1-nano"
12
+ temperature: float = 0.7
13
+ max_tokens: int = 1000
14
+ url: str = field(default="https://api.euron.one/api/v1/chat/completions", init=False)
15
+
16
+ @property
17
+ def metadata(self):
18
+ return {
19
+ "context_window": 8000,
20
+ "num_output": self.max_tokens,
21
+ "is_chat_model": True,
22
+ "model_name": self.model,
23
+ }
24
+
25
+ def chat(self, messages: List[ChatMessage], **kwargs) -> CompletionResponse:
26
+ headers = {
27
+ "Content-Type": "application/json",
28
+ "Authorization": f"Bearer {self.api_key}",
29
+ }
30
+ payload = {
31
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
32
+ "model": self.model,
33
+ "temperature": self.temperature,
34
+ "max_tokens": self.max_tokens,
35
+ }
36
+
37
+ response = requests.post(self.url, headers=headers, json=payload)
38
+ response.raise_for_status()
39
+ result = response.json()
40
+ content = result["choices"][0]["message"]["content"]
41
+
42
+ return CompletionResponse(text=content)
43
+
44
+ def complete(self, prompt: str, **kwargs) -> CompletionResponse:
45
+ return self.chat([ChatMessage(role="user", content=prompt)])
46
+
47
+ # Async & streaming not implemented
48
+ async def achat(self, messages: List[ChatMessage], **kwargs) -> CompletionResponse:
49
+ raise NotImplementedError("Async chat not implemented for Euriai.")
50
+
51
+ async def acomplete(self, prompt: str, **kwargs) -> CompletionResponse:
52
+ raise NotImplementedError("Async complete not implemented for Euriai.")
53
+
54
+ def stream_chat(self, messages: List[ChatMessage], **kwargs) -> CompletionResponseGen:
55
+ raise NotImplementedError("Streaming not supported.")
56
+
57
+ def stream_complete(self, prompt: str, **kwargs) -> CompletionResponseGen:
58
+ raise NotImplementedError("Streaming not supported.")
59
+
60
+ async def astream_chat(self, messages: List[ChatMessage], **kwargs) -> CompletionResponseGen:
61
+ raise NotImplementedError("Async streaming not supported.")
62
+
63
+ async def astream_complete(self, prompt: str, **kwargs) -> CompletionResponseGen:
64
+ raise NotImplementedError("Async streaming not supported.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: euriai
3
- Version: 0.3.12
3
+ Version: 0.3.14
4
4
  Summary: Python client for EURI LLM API (euron.one) with CLI, LangChain, and LlamaIndex integration
5
5
  Author: euron.one
6
6
  Author-email: sudhanshu@euron.one
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="euriai",
5
- version="0.3.12",
5
+ version="0.3.14",
6
6
  description="Python client for EURI LLM API (euron.one) with CLI, LangChain, and LlamaIndex integration",
7
7
  long_description=open("README.md", encoding="utf-8").read(),
8
8
  long_description_content_type="text/markdown",
@@ -1,33 +0,0 @@
1
- import requests
2
- from llama_index.core.llms import LLM
3
- from llama_index.core.base.llms.types import ChatMessage, CompletionResponse
4
- from typing import List
5
-
6
-
7
- class EuriaiLlamaIndexLLM(LLM):
8
- def __init__(self, api_key: str, model: str = "gpt-4.1-nano", temperature: float = 0.7, max_tokens: int = 1000):
9
- self.api_key = api_key
10
- self.model = model
11
- self.temperature = temperature
12
- self.max_tokens = max_tokens
13
- self.url = "https://api.euron.one/api/v1/chat/completions"
14
-
15
- def complete(self, prompt: str, **kwargs) -> CompletionResponse:
16
- return self.chat([ChatMessage(role="user", content=prompt)])
17
-
18
- def chat(self, messages: List[ChatMessage], **kwargs) -> CompletionResponse:
19
- payload = {
20
- "messages": [{"role": m.role, "content": m.content} for m in messages],
21
- "model": self.model,
22
- "temperature": self.temperature,
23
- "max_tokens": self.max_tokens
24
- }
25
- headers = {
26
- "Content-Type": "application/json",
27
- "Authorization": f"Bearer {self.api_key}"
28
- }
29
- response = requests.post(self.url, headers=headers, json=payload)
30
- response.raise_for_status()
31
- result = response.json()
32
- content = result["choices"][0]["message"]["content"]
33
- return CompletionResponse(text=content)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes