euriai 0.3.30__tar.gz → 0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- euriai-0.4/PKG-INFO +282 -0
- euriai-0.4/README.md +236 -0
- euriai-0.4/euriai/__init__.py +27 -0
- {euriai-0.3.30 → euriai-0.4}/euriai/client.py +2 -2
- {euriai-0.3.30 → euriai-0.4}/euriai/embedding.py +1 -1
- euriai-0.4/euriai/euri_autogen.py +74 -0
- {euriai-0.3.30 → euriai-0.4}/euriai/euri_chat.py +1 -1
- euriai-0.4/euriai/euri_crewai.py +92 -0
- {euriai-0.3.30 → euriai-0.4}/euriai/euri_embed.py +1 -1
- euriai-0.4/euriai/euri_langgraph.py +64 -0
- euriai-0.4/euriai/euri_llamaindex.py +58 -0
- euriai-0.4/euriai/euri_n8n.py +30 -0
- euriai-0.4/euriai/euri_smolagents.py +44 -0
- {euriai-0.3.30 → euriai-0.4}/euriai/langchain_embed.py +4 -1
- {euriai-0.3.30 → euriai-0.4}/euriai/langchain_llm.py +4 -1
- euriai-0.4/euriai.egg-info/PKG-INFO +282 -0
- {euriai-0.3.30 → euriai-0.4}/euriai.egg-info/SOURCES.txt +10 -1
- euriai-0.4/euriai.egg-info/requires.txt +30 -0
- euriai-0.4/setup.cfg +11 -0
- euriai-0.4/setup.py +41 -0
- euriai-0.4/tests/test_euri_crewai.py +101 -0
- euriai-0.4/tests/test_langchain_llm.py +37 -0
- euriai-0.3.30/PKG-INFO +0 -135
- euriai-0.3.30/README.md +0 -108
- euriai-0.3.30/euriai/__init__.py +0 -15
- euriai-0.3.30/euriai.egg-info/PKG-INFO +0 -135
- euriai-0.3.30/euriai.egg-info/requires.txt +0 -4
- euriai-0.3.30/setup.cfg +0 -4
- euriai-0.3.30/setup.py +0 -32
- {euriai-0.3.30 → euriai-0.4}/euriai/cli.py +0 -0
- {euriai-0.3.30 → euriai-0.4}/euriai.egg-info/dependency_links.txt +0 -0
- {euriai-0.3.30 → euriai-0.4}/euriai.egg-info/entry_points.txt +0 -0
- {euriai-0.3.30 → euriai-0.4}/euriai.egg-info/top_level.txt +0 -0
euriai-0.4/PKG-INFO
ADDED
@@ -0,0 +1,282 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: euriai
|
3
|
+
Version: 0.4
|
4
|
+
Summary: Python client for Euri API (euron.one) with CLI, LangChain, and LlamaIndex integration
|
5
|
+
Author: Euri
|
6
|
+
Author-email: tech@euron.one
|
7
|
+
License: MIT
|
8
|
+
Keywords: euriai,llm,langchain,llamaindex,langgraph,smolagents,n8n,agents,ai,sdk
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
10
|
+
Classifier: Operating System :: OS Independent
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
12
|
+
Classifier: Intended Audience :: Developers
|
13
|
+
Requires-Python: >=3.6
|
14
|
+
Description-Content-Type: text/markdown
|
15
|
+
Requires-Dist: requests
|
16
|
+
Requires-Dist: numpy
|
17
|
+
Requires-Dist: pyyaml
|
18
|
+
Provides-Extra: langchain-core
|
19
|
+
Requires-Dist: langchain-core; extra == "langchain-core"
|
20
|
+
Provides-Extra: langchain
|
21
|
+
Requires-Dist: langchain; extra == "langchain"
|
22
|
+
Provides-Extra: llama-index
|
23
|
+
Requires-Dist: llama-index>=0.10.0; extra == "llama-index"
|
24
|
+
Provides-Extra: langgraph
|
25
|
+
Requires-Dist: langgraph; extra == "langgraph"
|
26
|
+
Provides-Extra: smolagents
|
27
|
+
Requires-Dist: smolagents; extra == "smolagents"
|
28
|
+
Provides-Extra: n8n
|
29
|
+
Requires-Dist: requests; extra == "n8n"
|
30
|
+
Provides-Extra: crewai
|
31
|
+
Requires-Dist: crewai; extra == "crewai"
|
32
|
+
Provides-Extra: autogen
|
33
|
+
Requires-Dist: pyautogen; extra == "autogen"
|
34
|
+
Provides-Extra: test
|
35
|
+
Requires-Dist: pytest; extra == "test"
|
36
|
+
Dynamic: author
|
37
|
+
Dynamic: author-email
|
38
|
+
Dynamic: classifier
|
39
|
+
Dynamic: description
|
40
|
+
Dynamic: description-content-type
|
41
|
+
Dynamic: license
|
42
|
+
Dynamic: provides-extra
|
43
|
+
Dynamic: requires-dist
|
44
|
+
Dynamic: requires-python
|
45
|
+
Dynamic: summary
|
46
|
+
|
47
|
+
# euriai 🧠
|
48
|
+
|
49
|
+
**EURI AI Python Client** – A simple wrapper and CLI tool for the [Euri API](https://euron.one/euri). Supports completions, streaming responses, embeddings, CLI interaction, and an interactive guided wizard!
|
50
|
+
|
51
|
+
---
|
52
|
+
|
53
|
+
## 🔧 Installation
|
54
|
+
|
55
|
+
```bash
|
56
|
+
pip install euriai
|
57
|
+
```
|
58
|
+
|
59
|
+
## 🚀 Python Usage
|
60
|
+
|
61
|
+
### Text Generation
|
62
|
+
|
63
|
+
```python
|
64
|
+
from euriai import EuriaiClient
|
65
|
+
|
66
|
+
client = EuriaiClient(
|
67
|
+
api_key="your_api_key_here",
|
68
|
+
model="gpt-4.1-nano" # You can also try: "gemini-2.0-flash-001", "llama-4-maverick", etc.
|
69
|
+
)
|
70
|
+
|
71
|
+
response = client.generate_completion(
|
72
|
+
prompt="Write a short poem about artificial intelligence.",
|
73
|
+
temperature=0.7,
|
74
|
+
max_tokens=300
|
75
|
+
)
|
76
|
+
|
77
|
+
print(response)
|
78
|
+
```
|
79
|
+
|
80
|
+
### Embeddings
|
81
|
+
|
82
|
+
```python
|
83
|
+
from euriai.embedding import EuriaiEmbeddingClient
|
84
|
+
|
85
|
+
client = EuriaiEmbeddingClient(api_key="your_key")
|
86
|
+
embedding = client.embed("Hello world")
|
87
|
+
print(embedding[:5]) # Print first 5 dimensions of the embedding vector
|
88
|
+
```
|
89
|
+
|
90
|
+
## 💻 Command-Line Interface (CLI) Usage
|
91
|
+
|
92
|
+
Run prompts directly from the terminal:
|
93
|
+
|
94
|
+
```bash
|
95
|
+
euriai --api_key YOUR_API_KEY --prompt "Tell me a joke"
|
96
|
+
```
|
97
|
+
|
98
|
+
Enable streaming output (if supported by the model):
|
99
|
+
|
100
|
+
```bash
|
101
|
+
euriai --api_key YOUR_API_KEY --prompt "Stream a fun fact" --stream
|
102
|
+
```
|
103
|
+
|
104
|
+
List all supported model IDs with recommended use-cases and temperature/token advice:
|
105
|
+
|
106
|
+
```bash
|
107
|
+
euriai --models
|
108
|
+
```
|
109
|
+
|
110
|
+
## 🤖 LangChain Integration
|
111
|
+
|
112
|
+
### Text Generation
|
113
|
+
|
114
|
+
Use Euriai with LangChain directly:
|
115
|
+
|
116
|
+
```python
|
117
|
+
from euriai import EuriaiLangChainLLM
|
118
|
+
|
119
|
+
llm = EuriaiLangChainLLM(
|
120
|
+
api_key="your_api_key",
|
121
|
+
model="gpt-4.1-nano",
|
122
|
+
temperature=0.7,
|
123
|
+
max_tokens=300
|
124
|
+
)
|
125
|
+
|
126
|
+
print(llm.invoke("Write a poem about time travel."))
|
127
|
+
```
|
128
|
+
|
129
|
+
### Embeddings
|
130
|
+
|
131
|
+
Use Euriai embeddings with LangChain:
|
132
|
+
|
133
|
+
```python
|
134
|
+
from euriai.langchain_embed import EuriaiEmbeddings
|
135
|
+
|
136
|
+
embedding_model = EuriaiEmbeddings(api_key="your_key")
|
137
|
+
print(embedding_model.embed_query("What's AI?")[:5]) # Print first 5 dimensions
|
138
|
+
```
|
139
|
+
|
140
|
+
## Usage Examples
|
141
|
+
|
142
|
+
### CrewAI Integration
|
143
|
+
```python
|
144
|
+
from euriai import EuriaiCrewAI
|
145
|
+
|
146
|
+
# Example: Create a crew from YAML config files
|
147
|
+
crew = EuriaiCrewAI.from_yaml('agents.yaml', 'tasks.yaml')
|
148
|
+
result = crew.run(inputs={"topic": "AI in Healthcare"})
|
149
|
+
print(result)
|
150
|
+
|
151
|
+
# Or programmatically
|
152
|
+
crew = EuriaiCrewAI()
|
153
|
+
crew.add_agent("researcher", {
|
154
|
+
"role": "Researcher",
|
155
|
+
"goal": "Find information about {topic}",
|
156
|
+
"llm": "openai/gpt-4o"
|
157
|
+
})
|
158
|
+
crew.add_task("research_task", {
|
159
|
+
"description": "Research the topic {topic}",
|
160
|
+
"agent": "researcher"
|
161
|
+
})
|
162
|
+
crew.build_crew()
|
163
|
+
result = crew.run(inputs={"topic": "AI in Healthcare"})
|
164
|
+
print(result)
|
165
|
+
```
|
166
|
+
|
167
|
+
### AutoGen Integration
|
168
|
+
```python
|
169
|
+
from euriai import EuriaiAutoGen
|
170
|
+
|
171
|
+
autogen = EuriaiAutoGen()
|
172
|
+
# Add an agent (see AutoGen docs for agent config details)
|
173
|
+
agent = autogen.add_agent({
|
174
|
+
"name": "assistant",
|
175
|
+
"llm_config": {"api_key": "YOUR_OPENAI_KEY", "model": "gpt-4o"}
|
176
|
+
})
|
177
|
+
# Run a chat
|
178
|
+
response = autogen.run_chat("Hello, what is the weather today?")
|
179
|
+
print(response)
|
180
|
+
# Access chat history
|
181
|
+
print(autogen.get_history())
|
182
|
+
```
|
183
|
+
|
184
|
+
### LlamaIndex Integration
|
185
|
+
```python
|
186
|
+
from euriai import EuriaiLlamaIndex
|
187
|
+
|
188
|
+
llama = EuriaiLlamaIndex()
|
189
|
+
llama.add_documents([
|
190
|
+
"Abraham Lincoln was the 16th President of the United States.",
|
191
|
+
"He led the country during the American Civil War."
|
192
|
+
])
|
193
|
+
llama.build_index()
|
194
|
+
response = llama.query("Who was Abraham Lincoln?")
|
195
|
+
print(response)
|
196
|
+
```
|
197
|
+
|
198
|
+
### LangGraph Integration
|
199
|
+
```python
|
200
|
+
from euriai import EuriaiLangGraph
|
201
|
+
|
202
|
+
def greet_node(state):
|
203
|
+
print(f"Hello, {state['name']}!")
|
204
|
+
state['greeted'] = True
|
205
|
+
return state
|
206
|
+
|
207
|
+
def farewell_node(state):
|
208
|
+
if state.get('greeted'):
|
209
|
+
print(f"Goodbye, {state['name']}!")
|
210
|
+
return state
|
211
|
+
|
212
|
+
# Create the graph
|
213
|
+
graph = EuriaiLangGraph()
|
214
|
+
graph.add_node("greet", greet_node)
|
215
|
+
graph.add_node("farewell", farewell_node)
|
216
|
+
graph.add_edge("greet", "farewell")
|
217
|
+
graph.set_state({"name": "Alice"})
|
218
|
+
result = graph.run()
|
219
|
+
print(result)
|
220
|
+
```
|
221
|
+
|
222
|
+
---
|
223
|
+
|
224
|
+
## 2. **SmolAgents Integration**
|
225
|
+
|
226
|
+
```python
|
227
|
+
from euriai import EuriaiSmolAgent
|
228
|
+
|
229
|
+
# Define a tool using the @tool decorator
|
230
|
+
try:
|
231
|
+
from smolagents import tool
|
232
|
+
except ImportError:
|
233
|
+
raise ImportError("Please install smolagents: pip install smolagents")
|
234
|
+
|
235
|
+
@tool
|
236
|
+
def add(a: int, b: int) -> int:
|
237
|
+
"""Add two numbers."""
|
238
|
+
return a + b
|
239
|
+
|
240
|
+
# Create the agent with the tool
|
241
|
+
agent = EuriaiSmolAgent(tools=[add])
|
242
|
+
response = agent.run("What is 2 + 3?")
|
243
|
+
print(response)
|
244
|
+
```
|
245
|
+
|
246
|
+
---
|
247
|
+
|
248
|
+
## 3. **n8n Integration**
|
249
|
+
|
250
|
+
```python
|
251
|
+
from euriai import EuriaiN8N
|
252
|
+
|
253
|
+
# Initialize with your n8n instance URL and (optionally) API key
|
254
|
+
n8n = EuriaiN8N(base_url="http://localhost:5678", api_key="YOUR_N8N_API_KEY")
|
255
|
+
|
256
|
+
# Trigger a workflow by its webhook ID, passing data as needed
|
257
|
+
workflow_id = "your-workflow-webhook-id"
|
258
|
+
data = {"message": "Hello from EURI SDK!"}
|
259
|
+
result = n8n.trigger_workflow(workflow_id, data)
|
260
|
+
print(result)
|
261
|
+
```
|
262
|
+
|
263
|
+
---
|
264
|
+
|
265
|
+
**You can copy-paste these code blocks into your client documentation or UI for user reference.**
|
266
|
+
If you want advanced examples (e.g., multi-tool SmolAgents, LangGraph with more nodes, or n8n with authentication), just let me know!
|
267
|
+
|
268
|
+
## 📘 Documentation
|
269
|
+
|
270
|
+
For full documentation, visit our [official docs site](https://euron.one/euri).
|
271
|
+
|
272
|
+
## 🔑 Getting an API Key
|
273
|
+
|
274
|
+
Sign up for an API key at [Euron AI Platform](https://euron.one/euri).
|
275
|
+
|
276
|
+
## 🤝 Contributing
|
277
|
+
|
278
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
279
|
+
|
280
|
+
## 📄 License
|
281
|
+
|
282
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
euriai-0.4/README.md
ADDED
@@ -0,0 +1,236 @@
|
|
1
|
+
# euriai 🧠
|
2
|
+
|
3
|
+
**EURI AI Python Client** – A simple wrapper and CLI tool for the [Euri API](https://euron.one/euri). Supports completions, streaming responses, embeddings, CLI interaction, and an interactive guided wizard!
|
4
|
+
|
5
|
+
---
|
6
|
+
|
7
|
+
## 🔧 Installation
|
8
|
+
|
9
|
+
```bash
|
10
|
+
pip install euriai
|
11
|
+
```
|
12
|
+
|
13
|
+
## 🚀 Python Usage
|
14
|
+
|
15
|
+
### Text Generation
|
16
|
+
|
17
|
+
```python
|
18
|
+
from euriai import EuriaiClient
|
19
|
+
|
20
|
+
client = EuriaiClient(
|
21
|
+
api_key="your_api_key_here",
|
22
|
+
model="gpt-4.1-nano" # You can also try: "gemini-2.0-flash-001", "llama-4-maverick", etc.
|
23
|
+
)
|
24
|
+
|
25
|
+
response = client.generate_completion(
|
26
|
+
prompt="Write a short poem about artificial intelligence.",
|
27
|
+
temperature=0.7,
|
28
|
+
max_tokens=300
|
29
|
+
)
|
30
|
+
|
31
|
+
print(response)
|
32
|
+
```
|
33
|
+
|
34
|
+
### Embeddings
|
35
|
+
|
36
|
+
```python
|
37
|
+
from euriai.embedding import EuriaiEmbeddingClient
|
38
|
+
|
39
|
+
client = EuriaiEmbeddingClient(api_key="your_key")
|
40
|
+
embedding = client.embed("Hello world")
|
41
|
+
print(embedding[:5]) # Print first 5 dimensions of the embedding vector
|
42
|
+
```
|
43
|
+
|
44
|
+
## 💻 Command-Line Interface (CLI) Usage
|
45
|
+
|
46
|
+
Run prompts directly from the terminal:
|
47
|
+
|
48
|
+
```bash
|
49
|
+
euriai --api_key YOUR_API_KEY --prompt "Tell me a joke"
|
50
|
+
```
|
51
|
+
|
52
|
+
Enable streaming output (if supported by the model):
|
53
|
+
|
54
|
+
```bash
|
55
|
+
euriai --api_key YOUR_API_KEY --prompt "Stream a fun fact" --stream
|
56
|
+
```
|
57
|
+
|
58
|
+
List all supported model IDs with recommended use-cases and temperature/token advice:
|
59
|
+
|
60
|
+
```bash
|
61
|
+
euriai --models
|
62
|
+
```
|
63
|
+
|
64
|
+
## 🤖 LangChain Integration
|
65
|
+
|
66
|
+
### Text Generation
|
67
|
+
|
68
|
+
Use Euriai with LangChain directly:
|
69
|
+
|
70
|
+
```python
|
71
|
+
from euriai import EuriaiLangChainLLM
|
72
|
+
|
73
|
+
llm = EuriaiLangChainLLM(
|
74
|
+
api_key="your_api_key",
|
75
|
+
model="gpt-4.1-nano",
|
76
|
+
temperature=0.7,
|
77
|
+
max_tokens=300
|
78
|
+
)
|
79
|
+
|
80
|
+
print(llm.invoke("Write a poem about time travel."))
|
81
|
+
```
|
82
|
+
|
83
|
+
### Embeddings
|
84
|
+
|
85
|
+
Use Euriai embeddings with LangChain:
|
86
|
+
|
87
|
+
```python
|
88
|
+
from euriai.langchain_embed import EuriaiEmbeddings
|
89
|
+
|
90
|
+
embedding_model = EuriaiEmbeddings(api_key="your_key")
|
91
|
+
print(embedding_model.embed_query("What's AI?")[:5]) # Print first 5 dimensions
|
92
|
+
```
|
93
|
+
|
94
|
+
## Usage Examples
|
95
|
+
|
96
|
+
### CrewAI Integration
|
97
|
+
```python
|
98
|
+
from euriai import EuriaiCrewAI
|
99
|
+
|
100
|
+
# Example: Create a crew from YAML config files
|
101
|
+
crew = EuriaiCrewAI.from_yaml('agents.yaml', 'tasks.yaml')
|
102
|
+
result = crew.run(inputs={"topic": "AI in Healthcare"})
|
103
|
+
print(result)
|
104
|
+
|
105
|
+
# Or programmatically
|
106
|
+
crew = EuriaiCrewAI()
|
107
|
+
crew.add_agent("researcher", {
|
108
|
+
"role": "Researcher",
|
109
|
+
"goal": "Find information about {topic}",
|
110
|
+
"llm": "openai/gpt-4o"
|
111
|
+
})
|
112
|
+
crew.add_task("research_task", {
|
113
|
+
"description": "Research the topic {topic}",
|
114
|
+
"agent": "researcher"
|
115
|
+
})
|
116
|
+
crew.build_crew()
|
117
|
+
result = crew.run(inputs={"topic": "AI in Healthcare"})
|
118
|
+
print(result)
|
119
|
+
```
|
120
|
+
|
121
|
+
### AutoGen Integration
|
122
|
+
```python
|
123
|
+
from euriai import EuriaiAutoGen
|
124
|
+
|
125
|
+
autogen = EuriaiAutoGen()
|
126
|
+
# Add an agent (see AutoGen docs for agent config details)
|
127
|
+
agent = autogen.add_agent({
|
128
|
+
"name": "assistant",
|
129
|
+
"llm_config": {"api_key": "YOUR_OPENAI_KEY", "model": "gpt-4o"}
|
130
|
+
})
|
131
|
+
# Run a chat
|
132
|
+
response = autogen.run_chat("Hello, what is the weather today?")
|
133
|
+
print(response)
|
134
|
+
# Access chat history
|
135
|
+
print(autogen.get_history())
|
136
|
+
```
|
137
|
+
|
138
|
+
### LlamaIndex Integration
|
139
|
+
```python
|
140
|
+
from euriai import EuriaiLlamaIndex
|
141
|
+
|
142
|
+
llama = EuriaiLlamaIndex()
|
143
|
+
llama.add_documents([
|
144
|
+
"Abraham Lincoln was the 16th President of the United States.",
|
145
|
+
"He led the country during the American Civil War."
|
146
|
+
])
|
147
|
+
llama.build_index()
|
148
|
+
response = llama.query("Who was Abraham Lincoln?")
|
149
|
+
print(response)
|
150
|
+
```
|
151
|
+
|
152
|
+
### LangGraph Integration
|
153
|
+
```python
|
154
|
+
from euriai import EuriaiLangGraph
|
155
|
+
|
156
|
+
def greet_node(state):
|
157
|
+
print(f"Hello, {state['name']}!")
|
158
|
+
state['greeted'] = True
|
159
|
+
return state
|
160
|
+
|
161
|
+
def farewell_node(state):
|
162
|
+
if state.get('greeted'):
|
163
|
+
print(f"Goodbye, {state['name']}!")
|
164
|
+
return state
|
165
|
+
|
166
|
+
# Create the graph
|
167
|
+
graph = EuriaiLangGraph()
|
168
|
+
graph.add_node("greet", greet_node)
|
169
|
+
graph.add_node("farewell", farewell_node)
|
170
|
+
graph.add_edge("greet", "farewell")
|
171
|
+
graph.set_state({"name": "Alice"})
|
172
|
+
result = graph.run()
|
173
|
+
print(result)
|
174
|
+
```
|
175
|
+
|
176
|
+
---
|
177
|
+
|
178
|
+
## 2. **SmolAgents Integration**
|
179
|
+
|
180
|
+
```python
|
181
|
+
from euriai import EuriaiSmolAgent
|
182
|
+
|
183
|
+
# Define a tool using the @tool decorator
|
184
|
+
try:
|
185
|
+
from smolagents import tool
|
186
|
+
except ImportError:
|
187
|
+
raise ImportError("Please install smolagents: pip install smolagents")
|
188
|
+
|
189
|
+
@tool
|
190
|
+
def add(a: int, b: int) -> int:
|
191
|
+
"""Add two numbers."""
|
192
|
+
return a + b
|
193
|
+
|
194
|
+
# Create the agent with the tool
|
195
|
+
agent = EuriaiSmolAgent(tools=[add])
|
196
|
+
response = agent.run("What is 2 + 3?")
|
197
|
+
print(response)
|
198
|
+
```
|
199
|
+
|
200
|
+
---
|
201
|
+
|
202
|
+
## 3. **n8n Integration**
|
203
|
+
|
204
|
+
```python
|
205
|
+
from euriai import EuriaiN8N
|
206
|
+
|
207
|
+
# Initialize with your n8n instance URL and (optionally) API key
|
208
|
+
n8n = EuriaiN8N(base_url="http://localhost:5678", api_key="YOUR_N8N_API_KEY")
|
209
|
+
|
210
|
+
# Trigger a workflow by its webhook ID, passing data as needed
|
211
|
+
workflow_id = "your-workflow-webhook-id"
|
212
|
+
data = {"message": "Hello from EURI SDK!"}
|
213
|
+
result = n8n.trigger_workflow(workflow_id, data)
|
214
|
+
print(result)
|
215
|
+
```
|
216
|
+
|
217
|
+
---
|
218
|
+
|
219
|
+
**You can copy-paste these code blocks into your client documentation or UI for user reference.**
|
220
|
+
If you want advanced examples (e.g., multi-tool SmolAgents, LangGraph with more nodes, or n8n with authentication), just let me know!
|
221
|
+
|
222
|
+
## 📘 Documentation
|
223
|
+
|
224
|
+
For full documentation, visit our [official docs site](https://euron.one/euri).
|
225
|
+
|
226
|
+
## 🔑 Getting an API Key
|
227
|
+
|
228
|
+
Sign up for an API key at [Euron AI Platform](https://euron.one/euri).
|
229
|
+
|
230
|
+
## 🤝 Contributing
|
231
|
+
|
232
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
233
|
+
|
234
|
+
## 📄 License
|
235
|
+
|
236
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
@@ -0,0 +1,27 @@
|
|
1
|
+
from .client import EuriaiClient
|
2
|
+
from .langchain_llm import EuriaiLangChainLLM
|
3
|
+
from .embedding import EuriaiEmbeddingClient
|
4
|
+
from .langchain_embed import EuriaiEmbeddings
|
5
|
+
from .euri_chat import EuriaiLlamaIndexLLM
|
6
|
+
from .euri_embed import EuriaiLlamaIndexEmbedding
|
7
|
+
from .euri_crewai import EuriaiCrewAI
|
8
|
+
from .euri_autogen import EuriaiAutoGen
|
9
|
+
from .euri_llamaindex import EuriaiLlamaIndex
|
10
|
+
from .euri_langgraph import EuriaiLangGraph
|
11
|
+
from .euri_smolagents import EuriaiSmolAgent
|
12
|
+
from .euri_n8n import EuriaiN8N
|
13
|
+
|
14
|
+
__all__ = [
|
15
|
+
"EuriaiClient",
|
16
|
+
"EuriaiLangChainLLM",
|
17
|
+
"EuriaiEmbeddingClient",
|
18
|
+
"EuriaiEmbeddings",
|
19
|
+
"EuriaiLlamaIndexLLM",
|
20
|
+
"EuriaiLlamaIndexEmbedding",
|
21
|
+
"EuriaiCrewAI",
|
22
|
+
"EuriaiAutoGen",
|
23
|
+
"EuriaiLlamaIndex",
|
24
|
+
"EuriaiLangGraph",
|
25
|
+
"EuriaiSmolAgent",
|
26
|
+
"EuriaiN8N",
|
27
|
+
]
|
@@ -6,14 +6,14 @@ class EuriaiClient:
|
|
6
6
|
self,
|
7
7
|
api_key: str,
|
8
8
|
model: str = "gpt-4.1-nano",
|
9
|
-
endpoint: str = "https://api.euron.one/api/v1/euri/
|
9
|
+
endpoint: str = "https://api.euron.one/api/v1/euri/chat/completions"
|
10
10
|
):
|
11
11
|
"""
|
12
12
|
Initializes the EuriaiClient.
|
13
13
|
|
14
14
|
Args:
|
15
15
|
api_key (str): Your EURI API key.
|
16
|
-
model (str, optional): Model ID to use (e.g., 'gpt-4.1-nano', 'gemini-2.
|
16
|
+
model (str, optional): Model ID to use (e.g., 'gpt-4.1-nano', 'gemini-2.5-flash').
|
17
17
|
endpoint (str, optional): API endpoint URL.
|
18
18
|
"""
|
19
19
|
self.api_key = api_key
|
@@ -5,7 +5,7 @@ class EuriaiEmbeddingClient:
|
|
5
5
|
def __init__(self, api_key: str, model: str = "text-embedding-3-small"):
|
6
6
|
self.api_key = api_key
|
7
7
|
self.model = model
|
8
|
-
self.url = "https://api.euron.one/api/v1/euri/
|
8
|
+
self.url = "https://api.euron.one/api/v1/euri/embeddings"
|
9
9
|
|
10
10
|
def embed(self, text: str) -> np.ndarray:
|
11
11
|
headers = {
|
@@ -0,0 +1,74 @@
|
|
1
|
+
from typing import Optional, Dict, Any, List
|
2
|
+
|
3
|
+
try:
|
4
|
+
import autogen
|
5
|
+
except ImportError:
|
6
|
+
autogen = None
|
7
|
+
|
8
|
+
class EuriaiAutoGen:
|
9
|
+
"""
|
10
|
+
Full-featured wrapper for AutoGen integration in the EURI SDK.
|
11
|
+
Allows programmatic agent, tool, and workflow management, and chat execution.
|
12
|
+
"""
|
13
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
14
|
+
"""
|
15
|
+
Initialize the AutoGen wrapper.
|
16
|
+
Args:
|
17
|
+
config: Dict of config options (API keys, model, etc.)
|
18
|
+
"""
|
19
|
+
if autogen is None:
|
20
|
+
raise ImportError("AutoGen is not installed. Please install with `pip install pyautogen`.")
|
21
|
+
self.config = config or {}
|
22
|
+
self.agents: List[Any] = []
|
23
|
+
self.tools: List[Any] = []
|
24
|
+
self.memory: Optional[Any] = None
|
25
|
+
self.workflow: Optional[Any] = None
|
26
|
+
self.history: List[Dict[str, Any]] = []
|
27
|
+
|
28
|
+
def add_agent(self, agent_config: Dict[str, Any]) -> Any:
|
29
|
+
"""Add an agent with config."""
|
30
|
+
agent = autogen.Agent(**agent_config)
|
31
|
+
self.agents.append(agent)
|
32
|
+
return agent
|
33
|
+
|
34
|
+
def add_tool(self, tool_config: Dict[str, Any]) -> Any:
|
35
|
+
"""Add a tool with config."""
|
36
|
+
tool = autogen.Tool(**tool_config)
|
37
|
+
self.tools.append(tool)
|
38
|
+
return tool
|
39
|
+
|
40
|
+
def set_memory(self, memory_config: Dict[str, Any]) -> None:
|
41
|
+
"""Set memory for the workflow."""
|
42
|
+
self.memory = autogen.Memory(**memory_config)
|
43
|
+
|
44
|
+
def run_chat(self, prompt: str, agent_idx: int = 0, **kwargs) -> str:
|
45
|
+
"""
|
46
|
+
Run a chat with the specified agent and prompt.
|
47
|
+
Returns the agent's response.
|
48
|
+
"""
|
49
|
+
if not self.agents:
|
50
|
+
raise ValueError("No agents defined. Use add_agent().")
|
51
|
+
agent = self.agents[agent_idx]
|
52
|
+
response = agent.chat(prompt, **kwargs)
|
53
|
+
self.history.append({"agent": agent, "prompt": prompt, "response": response})
|
54
|
+
return response
|
55
|
+
|
56
|
+
def run_workflow(self, workflow_config: Dict[str, Any], **kwargs) -> Any:
|
57
|
+
"""
|
58
|
+
Run a custom workflow (advanced usage).
|
59
|
+
"""
|
60
|
+
workflow = autogen.Workflow(**workflow_config)
|
61
|
+
self.workflow = workflow
|
62
|
+
result = workflow.run(**kwargs)
|
63
|
+
return result
|
64
|
+
|
65
|
+
def get_history(self) -> List[Dict[str, Any]]:
|
66
|
+
return self.history
|
67
|
+
|
68
|
+
def reset(self):
|
69
|
+
"""Reset agents, tools, memory, and history."""
|
70
|
+
self.agents = []
|
71
|
+
self.tools = []
|
72
|
+
self.memory = None
|
73
|
+
self.workflow = None
|
74
|
+
self.history = []
|
@@ -10,7 +10,7 @@ class EuriaiLlamaIndexLLM(LLM):
|
|
10
10
|
model: str = "gpt-4.1-nano"
|
11
11
|
temperature: float = 0.7
|
12
12
|
max_tokens: int = 1000
|
13
|
-
url: str = "https://api.euron.one/api/v1/euri/
|
13
|
+
url: str = "https://api.euron.one/api/v1/euri/chat/completions"
|
14
14
|
|
15
15
|
def __init__(
|
16
16
|
self,
|