mira-network 0.1.4__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Mira Network
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,349 @@
1
+ Metadata-Version: 2.1
2
+ Name: mira-network
3
+ Version: 0.1.6
4
+ Summary: Python SDK for Mira Network API
5
+ Author-Email: sarim2000 <sarimbleedblue@gmail.com>
6
+ License: MIT
7
+ Requires-Python: ==3.11.*
8
+ Requires-Dist: httpx>=0.28.1
9
+ Requires-Dist: pydantic>=2.10.4
10
+ Requires-Dist: typing-extensions>=4.8.0
11
+ Requires-Dist: requests>=2.32.3
12
+ Requires-Dist: pytest-cov>=6.0.0
13
+ Description-Content-Type: text/markdown
14
+
15
+ <div align="center">
16
+ <img src="https://your-domain.com/logo.png" alt="Mira Network SDK" width="200"/>
17
+ <h1>Mira Network Python SDK</h1>
18
+ <p><strong>Your Universal Gateway to AI Language Models</strong></p>
19
+ </div>
20
+
21
+ <p align="center">
22
+ <a href="https://badge.fury.io/py/mira-network"><img src="https://badge.fury.io/py/mira-network.svg" alt="PyPI version"></a>
23
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
24
+ <a href="https://github.com/mira-network/python-sdk/actions"><img src="https://github.com/mira-network/python-sdk/workflows/tests/badge.svg" alt="Build Status"></a>
25
+ <a href="https://codecov.io/gh/mira-network/python-sdk"><img src="https://codecov.io/gh/mira-network/python-sdk/branch/main/graph/badge.svg" alt="Coverage Status"></a>
26
+ <a href="https://pypi.org/project/mira-network/"><img src="https://img.shields.io/pypi/dm/mira-network.svg" alt="Downloads"></a>
27
+ <a href="https://discord.gg/mira-network"><img src="https://img.shields.io/discord/1234567890?color=7289da&label=discord" alt="Discord"></a>
28
+ </p>
29
+
30
+ <p align="center">
31
+ <b>Mira Client enables seamless integration with multiple language models while providing advanced routing, load balancing, and flow management capabilities.</b>
32
+ </p>
33
+
34
+ ---
35
+
36
+ ## 🌟 What is Mira Network?
37
+
38
+ Mira Network is your unified interface to the world of AI language models. It provides:
39
+
40
+ - 🔄 **Smart Model Routing**: Route requests across different models
41
+ - ⚖️ **Load Balancing**: Distribute workload across nodes
42
+ - 🌊 **Flow Management**: Handle request patterns efficiently
43
+ - 🔌 **Universal Integration**: Single API for multiple models
44
+ - 📊 **Usage Tracking**: Monitor your model usage
45
+
46
+ ## Why Mira Network SDK?
47
+
48
+ | Feature | Mira SDK | Traditional Approach |
49
+ | ---------------------- | -------------------------- | ----------------------- |
50
+ | 🔄 Multi-model Support | Single unified API | Separate APIs per model |
51
+ | ⚖️ Load Balancing | Built-in | Custom implementation |
52
+ | 🌊 Flow Control | Automatic handling | Manual implementation |
53
+ | 📊 Usage Tracking | Integrated | Custom tracking needed |
54
+ | 🛡️ Error Handling | Standardized across models | Model-specific handling |
55
+
56
+ ## 🎯 Perfect For
57
+
58
+ - 🤖 AI Applications
59
+ - 📝 Text Generation
60
+ - 🔍 Search Enhancement
61
+ - 🎮 Interactive Systems
62
+
63
+ ## 🏃 Quick Start
64
+
65
+ ```bash
66
+ pip install mira-network
67
+ ```
68
+
69
+ ```python
70
+ from mira_network import MiraClient
71
+
72
+ async def get_ai_response(prompt):
73
+ async with MiraClient() as client:
74
+ return await client.chat_completions_create(
75
+ model="your-chosen-model",
76
+ messages=[{"role": "user", "content": prompt}]
77
+ )
78
+ ```
79
+
80
+ ## 🏗️ Architecture
81
+
82
+ ```mermaid
83
+ graph LR
84
+ A[Your App] --> B[Mira SDK]
85
+ B --> C[Load Balancer]
86
+ C --> D[Mira Node 1]
87
+ C --> E[Mira Node 2]
88
+ C --> F[Mira Node N]
89
+ ```
90
+
91
+ ## ✨ Key Features
92
+
93
+ - 🔌 Simple, intuitive API
94
+ - 🔄 Async-first design
95
+ - 🌊 Streaming support
96
+ - 🔐 Error handling
97
+ - 🛠️ Customizable nodes
98
+ - 📊 Usage tracking
99
+
100
+ ## 📑 Table of Contents
101
+
102
+ - [Installation](#installation)
103
+ - [Quick Start](#-quick-start)
104
+ - [Basic Usage](#-basic-usage)
105
+ - [Advanced Usage](#-advanced-usage)
106
+ - [API Reference](#-reference)
107
+ - [Support](#-support)
108
+ - [Contributing](#-contributing)
109
+ - [License](#-license)
110
+
111
+ ## 🔧 Installation
112
+
113
+ Install the SDK using pip:
114
+
115
+ ```bash
116
+ pip install mira-network
117
+ ```
118
+
119
+ ## 🚀 Quick Start
120
+
121
+ Experience the power of Mira Network in just a few lines of code:
122
+
123
+ ```python
124
+ from mira_network import MiraClient
125
+
126
+ async def main():
127
+ # Initialize with your API key
128
+ client = MiraClient(api_key="your-api-key")
129
+
130
+ # Get a response from AI
131
+ response = await client.chat_completions_create(
132
+ model="your-chosen-model",
133
+ messages=[
134
+ {"role": "user", "content": "What is the capital of France?"}
135
+ ]
136
+ )
137
+
138
+ # Print the AI's response
139
+ print(response["choices"][0]["message"]["content"])
140
+
141
+ if __name__ == "__main__":
142
+ import asyncio
143
+ asyncio.run(main())
144
+ ```
145
+
146
+ ## 📝 Basic Usage
147
+
148
+ ### Having a Conversation
149
+
150
+ Engage in natural conversations with AI models. The SDK handles the complexities of managing conversation context and model interactions:
151
+
152
+ ```python
153
+ response = await client.chat_completions_create(
154
+ model="your-chosen-model",
155
+ messages=[
156
+ {"role": "system", "content": "You are a helpful assistant"},
157
+ {"role": "user", "content": "Hi! Can you help me?"},
158
+ ]
159
+ )
160
+ ```
161
+
162
+ ### Checking Available Models
163
+
164
+ Explore the diverse range of available AI models:
165
+
166
+ ```python
167
+ models = await client.list_models()
168
+ print(models)
169
+ ```
170
+
171
+ ### Checking Your Credits
172
+
173
+ Monitor your usage and available credits:
174
+
175
+ ```python
176
+ credits = await client.get_user_credits()
177
+ print(credits)
178
+ ```
179
+
180
+ ## 🔧 Advanced Usage
181
+
182
+ ### Streaming Responses
183
+
184
+ Perfect for real-time applications and interactive experiences:
185
+
186
+ ```python
187
+ stream = await client.chat_completions_create(
188
+ model="your-chosen-model",
189
+ messages=[
190
+ {"role": "user", "content": "Write a story"}
191
+ ],
192
+ stream=True
193
+ )
194
+
195
+ async for chunk in stream:
196
+ print(chunk["choices"][0]["delta"]["content"], end="")
197
+ ```
198
+
199
+ ### Custom Mira Nodes
200
+
201
+ Integrate your preferred Mira nodes seamlessly:
202
+
203
+ ```python
204
+ response = await client.chat_completions_create(
205
+ model="your-model",
206
+ messages=[{"role": "user", "content": "Hello"}],
207
+ mira_node={
208
+ "base_url": "https://custom-node.com",
209
+ "api_key": "node-api-key"
210
+ }
211
+ )
212
+ ```
213
+
214
+ ### API Token Management
215
+
216
+ Secure and flexible token management for your applications:
217
+
218
+ ```python
219
+ # Create new token
220
+ new_token = await client.create_api_token(
221
+ {"description": "Production API Key"}
222
+ )
223
+
224
+ # List tokens
225
+ tokens = await client.list_api_tokens()
226
+
227
+ # Delete token
228
+ await client.delete_api_token("token-id")
229
+ ```
230
+
231
+ ### Using as Context Manager
232
+
233
+ Efficient resource management with context managers:
234
+
235
+ ```python
236
+ async with MiraClient(api_key="your-api-key") as client:
237
+ response = await client.chat_completions_create(...)
238
+ ```
239
+
240
+ ## 📚 Reference
241
+
242
+ ### Message Structure
243
+
244
+ Understanding the core message components:
245
+
246
+ ```python
247
+ Message:
248
+ role: str # "system", "user", or "assistant"
249
+ content: str # The message content
250
+ ```
251
+
252
+ ### Error Handling
253
+
254
+ Robust error handling for production applications:
255
+
256
+ #### Validation Errors
257
+
258
+ ```python
259
+ try:
260
+ response = await client.chat_completions_create(
261
+ model="your-chosen-model",
262
+ messages=[
263
+ {"role": "invalid", "content": "Hello"} # Invalid role
264
+ ]
265
+ )
266
+ except ValueError as e:
267
+ print(f"Validation error: {e}")
268
+ ```
269
+
270
+ #### Network Errors
271
+
272
+ ```python
273
+ try:
274
+ response = await client.chat_completions_create(...)
275
+ except httpx.HTTPError as e:
276
+ print(f"HTTP error: {e}")
277
+ ```
278
+
279
+ ### Environment Configuration
280
+
281
+ Flexible configuration options for different environments:
282
+
283
+ ```python
284
+ import os
285
+ from mira_network import MiraClient
286
+
287
+ client = MiraClient(
288
+ api_key=os.getenv("MIRA_API_KEY"),
289
+ base_url=os.getenv("MIRA_API_URL", "https://apis.mira.network")
290
+ )
291
+ ```
292
+
293
+ ## 💡 Real-world Examples
294
+
295
+ ### AI-powered Customer Service
296
+
297
+ ```python
298
+ async def handle_customer_query(query: str) -> str:
299
+ async with MiraClient() as client:
300
+ response = await client.chat_completions_create(
301
+ model="your-chosen-model",
302
+ messages=[
303
+ {"role": "system", "content": "You are a helpful customer service agent."},
304
+ {"role": "user", "content": query}
305
+ ],
306
+ temperature=0.7,
307
+ max_tokens=150
308
+ )
309
+ return response.choices[0].message.content
310
+ ```
311
+
312
+ ### Content Generation Pipeline
313
+
314
+ ```python
315
+ async def generate_blog_post(topic: str) -> dict:
316
+ async with MiraClient() as client:
317
+ # Generate outline
318
+ outline = await client.chat_completions_create(...)
319
+
320
+ # Generate content
321
+ content = await client.chat_completions_create(...)
322
+
323
+ # Generate meta description
324
+ meta = await client.chat_completions_create(...)
325
+
326
+ return {"outline": outline, "content": content, "meta": meta}
327
+ ```
328
+
329
+ ## 🤝 Support
330
+
331
+ For feature requests and bug reports, please visit our [Console Feedback](https://console-feedback.arohalabs.tech/).
332
+
333
+ ## 👥 Contributing
334
+
335
+ We welcome contributions! Here's how you can help:
336
+
337
+ 1. Fork the repository
338
+ 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
339
+ 3. Commit your changes (`git commit -m 'Add amazing feature'`)
340
+ 4. Push to the branch (`git push origin feature/amazing-feature`)
341
+ 5. Open a Pull Request
342
+
343
+ ## 📄 License
344
+
345
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
346
+
347
+ ---
348
+
349
+ <p align="center">Built with ❤️ by the Mira Network team</p>
@@ -0,0 +1,335 @@
1
+ <div align="center">
2
+ <img src="https://your-domain.com/logo.png" alt="Mira Network SDK" width="200"/>
3
+ <h1>Mira Network Python SDK</h1>
4
+ <p><strong>Your Universal Gateway to AI Language Models</strong></p>
5
+ </div>
6
+
7
+ <p align="center">
8
+ <a href="https://badge.fury.io/py/mira-network"><img src="https://badge.fury.io/py/mira-network.svg" alt="PyPI version"></a>
9
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
10
+ <a href="https://github.com/mira-network/python-sdk/actions"><img src="https://github.com/mira-network/python-sdk/workflows/tests/badge.svg" alt="Build Status"></a>
11
+ <a href="https://codecov.io/gh/mira-network/python-sdk"><img src="https://codecov.io/gh/mira-network/python-sdk/branch/main/graph/badge.svg" alt="Coverage Status"></a>
12
+ <a href="https://pypi.org/project/mira-network/"><img src="https://img.shields.io/pypi/dm/mira-network.svg" alt="Downloads"></a>
13
+ <a href="https://discord.gg/mira-network"><img src="https://img.shields.io/discord/1234567890?color=7289da&label=discord" alt="Discord"></a>
14
+ </p>
15
+
16
+ <p align="center">
17
+ <b>Mira Client enables seamless integration with multiple language models while providing advanced routing, load balancing, and flow management capabilities.</b>
18
+ </p>
19
+
20
+ ---
21
+
22
+ ## 🌟 What is Mira Network?
23
+
24
+ Mira Network is your unified interface to the world of AI language models. It provides:
25
+
26
+ - 🔄 **Smart Model Routing**: Route requests across different models
27
+ - ⚖️ **Load Balancing**: Distribute workload across nodes
28
+ - 🌊 **Flow Management**: Handle request patterns efficiently
29
+ - 🔌 **Universal Integration**: Single API for multiple models
30
+ - 📊 **Usage Tracking**: Monitor your model usage
31
+
32
+ ## Why Mira Network SDK?
33
+
34
+ | Feature | Mira SDK | Traditional Approach |
35
+ | ---------------------- | -------------------------- | ----------------------- |
36
+ | 🔄 Multi-model Support | Single unified API | Separate APIs per model |
37
+ | ⚖️ Load Balancing | Built-in | Custom implementation |
38
+ | 🌊 Flow Control | Automatic handling | Manual implementation |
39
+ | 📊 Usage Tracking | Integrated | Custom tracking needed |
40
+ | 🛡️ Error Handling | Standardized across models | Model-specific handling |
41
+
42
+ ## 🎯 Perfect For
43
+
44
+ - 🤖 AI Applications
45
+ - 📝 Text Generation
46
+ - 🔍 Search Enhancement
47
+ - 🎮 Interactive Systems
48
+
49
+ ## 🏃 Quick Start
50
+
51
+ ```bash
52
+ pip install mira-network
53
+ ```
54
+
55
+ ```python
56
+ from mira_network import MiraClient
57
+
58
+ async def get_ai_response(prompt):
59
+ async with MiraClient() as client:
60
+ return await client.chat_completions_create(
61
+ model="your-chosen-model",
62
+ messages=[{"role": "user", "content": prompt}]
63
+ )
64
+ ```
65
+
66
+ ## 🏗️ Architecture
67
+
68
+ ```mermaid
69
+ graph LR
70
+ A[Your App] --> B[Mira SDK]
71
+ B --> C[Load Balancer]
72
+ C --> D[Mira Node 1]
73
+ C --> E[Mira Node 2]
74
+ C --> F[Mira Node N]
75
+ ```
76
+
77
+ ## ✨ Key Features
78
+
79
+ - 🔌 Simple, intuitive API
80
+ - 🔄 Async-first design
81
+ - 🌊 Streaming support
82
+ - 🔐 Error handling
83
+ - 🛠️ Customizable nodes
84
+ - 📊 Usage tracking
85
+
86
+ ## 📑 Table of Contents
87
+
88
+ - [Installation](#installation)
89
+ - [Quick Start](#-quick-start)
90
+ - [Basic Usage](#-basic-usage)
91
+ - [Advanced Usage](#-advanced-usage)
92
+ - [API Reference](#-reference)
93
+ - [Support](#-support)
94
+ - [Contributing](#-contributing)
95
+ - [License](#-license)
96
+
97
+ ## 🔧 Installation
98
+
99
+ Install the SDK using pip:
100
+
101
+ ```bash
102
+ pip install mira-network
103
+ ```
104
+
105
+ ## 🚀 Quick Start
106
+
107
+ Experience the power of Mira Network in just a few lines of code:
108
+
109
+ ```python
110
+ from mira_network import MiraClient
111
+
112
+ async def main():
113
+ # Initialize with your API key
114
+ client = MiraClient(api_key="your-api-key")
115
+
116
+ # Get a response from AI
117
+ response = await client.chat_completions_create(
118
+ model="your-chosen-model",
119
+ messages=[
120
+ {"role": "user", "content": "What is the capital of France?"}
121
+ ]
122
+ )
123
+
124
+ # Print the AI's response
125
+ print(response["choices"][0]["message"]["content"])
126
+
127
+ if __name__ == "__main__":
128
+ import asyncio
129
+ asyncio.run(main())
130
+ ```
131
+
132
+ ## 📝 Basic Usage
133
+
134
+ ### Having a Conversation
135
+
136
+ Engage in natural conversations with AI models. The SDK handles the complexities of managing conversation context and model interactions:
137
+
138
+ ```python
139
+ response = await client.chat_completions_create(
140
+ model="your-chosen-model",
141
+ messages=[
142
+ {"role": "system", "content": "You are a helpful assistant"},
143
+ {"role": "user", "content": "Hi! Can you help me?"},
144
+ ]
145
+ )
146
+ ```
147
+
148
+ ### Checking Available Models
149
+
150
+ Explore the diverse range of available AI models:
151
+
152
+ ```python
153
+ models = await client.list_models()
154
+ print(models)
155
+ ```
156
+
157
+ ### Checking Your Credits
158
+
159
+ Monitor your usage and available credits:
160
+
161
+ ```python
162
+ credits = await client.get_user_credits()
163
+ print(credits)
164
+ ```
165
+
166
+ ## 🔧 Advanced Usage
167
+
168
+ ### Streaming Responses
169
+
170
+ Perfect for real-time applications and interactive experiences:
171
+
172
+ ```python
173
+ stream = await client.chat_completions_create(
174
+ model="your-chosen-model",
175
+ messages=[
176
+ {"role": "user", "content": "Write a story"}
177
+ ],
178
+ stream=True
179
+ )
180
+
181
+ async for chunk in stream:
182
+ print(chunk["choices"][0]["delta"]["content"], end="")
183
+ ```
184
+
185
+ ### Custom Mira Nodes
186
+
187
+ Integrate your preferred Mira nodes seamlessly:
188
+
189
+ ```python
190
+ response = await client.chat_completions_create(
191
+ model="your-model",
192
+ messages=[{"role": "user", "content": "Hello"}],
193
+ mira_node={
194
+ "base_url": "https://custom-node.com",
195
+ "api_key": "node-api-key"
196
+ }
197
+ )
198
+ ```
199
+
200
+ ### API Token Management
201
+
202
+ Secure and flexible token management for your applications:
203
+
204
+ ```python
205
+ # Create new token
206
+ new_token = await client.create_api_token(
207
+ {"description": "Production API Key"}
208
+ )
209
+
210
+ # List tokens
211
+ tokens = await client.list_api_tokens()
212
+
213
+ # Delete token
214
+ await client.delete_api_token("token-id")
215
+ ```
216
+
217
+ ### Using as Context Manager
218
+
219
+ Efficient resource management with context managers:
220
+
221
+ ```python
222
+ async with MiraClient(api_key="your-api-key") as client:
223
+ response = await client.chat_completions_create(...)
224
+ ```
225
+
226
+ ## 📚 Reference
227
+
228
+ ### Message Structure
229
+
230
+ Understanding the core message components:
231
+
232
+ ```python
233
+ Message:
234
+ role: str # "system", "user", or "assistant"
235
+ content: str # The message content
236
+ ```
237
+
238
+ ### Error Handling
239
+
240
+ Robust error handling for production applications:
241
+
242
+ #### Validation Errors
243
+
244
+ ```python
245
+ try:
246
+ response = await client.chat_completions_create(
247
+ model="your-chosen-model",
248
+ messages=[
249
+ {"role": "invalid", "content": "Hello"} # Invalid role
250
+ ]
251
+ )
252
+ except ValueError as e:
253
+ print(f"Validation error: {e}")
254
+ ```
255
+
256
+ #### Network Errors
257
+
258
+ ```python
259
+ try:
260
+ response = await client.chat_completions_create(...)
261
+ except httpx.HTTPError as e:
262
+ print(f"HTTP error: {e}")
263
+ ```
264
+
265
+ ### Environment Configuration
266
+
267
+ Flexible configuration options for different environments:
268
+
269
+ ```python
270
+ import os
271
+ from mira_network import MiraClient
272
+
273
+ client = MiraClient(
274
+ api_key=os.getenv("MIRA_API_KEY"),
275
+ base_url=os.getenv("MIRA_API_URL", "https://apis.mira.network")
276
+ )
277
+ ```
278
+
279
+ ## 💡 Real-world Examples
280
+
281
+ ### AI-powered Customer Service
282
+
283
+ ```python
284
+ async def handle_customer_query(query: str) -> str:
285
+ async with MiraClient() as client:
286
+ response = await client.chat_completions_create(
287
+ model="your-chosen-model",
288
+ messages=[
289
+ {"role": "system", "content": "You are a helpful customer service agent."},
290
+ {"role": "user", "content": query}
291
+ ],
292
+ temperature=0.7,
293
+ max_tokens=150
294
+ )
295
+ return response.choices[0].message.content
296
+ ```
297
+
298
+ ### Content Generation Pipeline
299
+
300
+ ```python
301
+ async def generate_blog_post(topic: str) -> dict:
302
+ async with MiraClient() as client:
303
+ # Generate outline
304
+ outline = await client.chat_completions_create(...)
305
+
306
+ # Generate content
307
+ content = await client.chat_completions_create(...)
308
+
309
+ # Generate meta description
310
+ meta = await client.chat_completions_create(...)
311
+
312
+ return {"outline": outline, "content": content, "meta": meta}
313
+ ```
314
+
315
+ ## 🤝 Support
316
+
317
+ For feature requests and bug reports, please visit our [Console Feedback](https://console-feedback.arohalabs.tech/).
318
+
319
+ ## 👥 Contributing
320
+
321
+ We welcome contributions! Here's how you can help:
322
+
323
+ 1. Fork the repository
324
+ 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
325
+ 3. Commit your changes (`git commit -m 'Add amazing feature'`)
326
+ 4. Push to the branch (`git push origin feature/amazing-feature`)
327
+ 5. Open a Pull Request
328
+
329
+ ## 📄 License
330
+
331
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
332
+
333
+ ---
334
+
335
+ <p align="center">Built with ❤️ by the Mira Network team</p>
@@ -12,8 +12,8 @@ dependencies = [
12
12
  description = "Python SDK for Mira Network API"
13
13
  name = "mira-network"
14
14
  readme = "README.md"
15
- requires-python = "==3.10.*"
16
- version = "0.1.4"
15
+ requires-python = "==3.11.*"
16
+ version = "0.1.6"
17
17
 
18
18
  [project.license]
19
19
  text = "MIT"
@@ -0,0 +1,146 @@
1
+ from typing import AsyncIterator, Optional, List, Dict, AsyncGenerator, Union, Any
2
+ import httpx
3
+ from .models import (
4
+ AiRequest,
5
+ ApiTokenRequest,
6
+ Message,
7
+ )
8
+
9
+
10
+ class MiraClient:
11
+ def __init__(
12
+ self,
13
+ api_key: Optional[str] = None,
14
+ base_url: str = "https://apis.mira.network",
15
+ ):
16
+ """Initialize Mira client.
17
+
18
+ Args:
19
+ api_key: API key for authentication
20
+ base_url: Base URL of the Mira API
21
+ """
22
+ self.base_url = base_url
23
+ self.api_key = api_key
24
+ self._client = httpx.AsyncClient()
25
+
26
+ async def __aenter__(self):
27
+ return self
28
+
29
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
30
+ await self._client.aclose()
31
+
32
+ def _get_headers(self) -> Dict[str, str]:
33
+ headers = {"Content-Type": "application/json"}
34
+ if self.api_key:
35
+ headers["Authorization"] = f"Bearer {self.api_key}"
36
+ return headers
37
+
38
+ async def chat_completions_create(
39
+ self,
40
+ model: str,
41
+ messages: list[Message],
42
+ stream: bool = False,
43
+ **kwargs: Any,
44
+ ) -> Union[Dict[str, Any], AsyncIterator[Dict[str, Any]]]:
45
+ """Create a chat completion.
46
+
47
+ Args:
48
+ model: The model to use for completion
49
+ messages: A list of messages in the conversation
50
+ stream: Whether to stream the response
51
+ **kwargs: Additional parameters to pass to the API
52
+ """
53
+ request = AiRequest(
54
+ model=model,
55
+ messages=messages,
56
+ stream=stream,
57
+ **kwargs,
58
+ )
59
+
60
+ print("\n\n\n======>", request.model_dump(), "\n\n\n")
61
+
62
+ response = await self._client.post(
63
+ f"{self.base_url}/v1/chat/completions",
64
+ headers=self._get_headers(),
65
+ json=request.model_dump(),
66
+ )
67
+ response.raise_for_status()
68
+
69
+ if stream:
70
+ return self._stream_response(response)
71
+ return response.json()
72
+
73
+ async def _stream_response(
74
+ self, response: httpx.Response
75
+ ) -> AsyncIterator[Dict[str, Any]]:
76
+ """Handle streaming response.
77
+
78
+ Args:
79
+ response: The HTTP response object
80
+ """
81
+ async for line in response.aiter_lines():
82
+ if line.strip():
83
+ yield self._format_stream_response(line)
84
+
85
+ def _format_stream_response(self, line: str) -> Dict[str, Any]:
86
+ """Format streaming response to match OpenAI's format.
87
+
88
+ Args:
89
+ line: The response line
90
+ """
91
+ # Add formatting logic here if needed
92
+ return {"choices": [{"delta": {"content": line}}]}
93
+
94
+ async def list_models(self) -> List[str]:
95
+ """List available models."""
96
+ response = await self._client.get(
97
+ f"{self.base_url}/v1/models",
98
+ headers=self._get_headers(),
99
+ )
100
+ response.raise_for_status()
101
+ return response.json()
102
+
103
+ async def create_api_token(self, request: ApiTokenRequest) -> Dict:
104
+ """Create a new API token."""
105
+ response = await self._client.post(
106
+ f"{self.base_url}/api-tokens",
107
+ headers=self._get_headers(),
108
+ json=request.model_dump(),
109
+ )
110
+ response.raise_for_status()
111
+ return response.json()
112
+
113
+ async def list_api_tokens(self) -> List[Dict]:
114
+ """List all API tokens."""
115
+ response = await self._client.get(
116
+ f"{self.base_url}/api-tokens",
117
+ headers=self._get_headers(),
118
+ )
119
+ response.raise_for_status()
120
+ return response.json()
121
+
122
+ async def delete_api_token(self, token: str) -> None:
123
+ """Delete an API token."""
124
+ response = await self._client.delete(
125
+ f"{self.base_url}/api-tokens/{token}",
126
+ headers=self._get_headers(),
127
+ )
128
+ response.raise_for_status()
129
+
130
+ async def get_user_credits(self) -> Dict:
131
+ """Get user credits information."""
132
+ response = await self._client.get(
133
+ f"{self.base_url}/user-credits",
134
+ headers=self._get_headers(),
135
+ )
136
+ response.raise_for_status()
137
+ return response.json()
138
+
139
+ async def get_credits_history(self) -> List[Dict]:
140
+ """Get user credits history."""
141
+ response = await self._client.get(
142
+ f"{self.base_url}/user-credits-history",
143
+ headers=self._get_headers(),
144
+ )
145
+ response.raise_for_status()
146
+ return response.json()
@@ -9,7 +9,7 @@ from .models import (
9
9
  class MiraSyncClient:
10
10
  def __init__(
11
11
  self,
12
- base_url: str = "https://apis.mira.network/",
12
+ base_url: str = "https://apis.mira.network",
13
13
  api_token: Optional[str] = None,
14
14
  ):
15
15
  """Initialize Mira synchronous client.
@@ -1,178 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: mira-network
3
- Version: 0.1.4
4
- Summary: Python SDK for Mira Network API
5
- Author-Email: sarim2000 <sarimbleedblue@gmail.com>
6
- License: MIT
7
- Requires-Python: ==3.10.*
8
- Requires-Dist: httpx>=0.28.1
9
- Requires-Dist: pydantic>=2.10.4
10
- Requires-Dist: typing-extensions>=4.8.0
11
- Requires-Dist: requests>=2.32.3
12
- Requires-Dist: pytest-cov>=6.0.0
13
- Description-Content-Type: text/markdown
14
-
15
- # Mira Network SDK
16
-
17
- A Python SDK for interacting with the Mira Network API. This SDK provides both synchronous and asynchronous interfaces to access Mira API endpoints for model inference, API token management, and credit system operations.
18
-
19
- ## Installation
20
-
21
- ```bash
22
- pip install mira-network
23
- ```
24
-
25
- ## Quick Start
26
-
27
- ### Synchronous Usage
28
-
29
- ```python
30
- from mira_network.sync_client import MiraSyncClient
31
- from mira_network.models import AiRequest, Message
32
-
33
- # Using context manager (recommended)
34
- with MiraSyncClient(api_token="your-api-token") as client:
35
- # Example 1: Non-streaming response
36
- request = AiRequest(
37
- messages=[
38
- Message(role="system", content="You are a helpful assistant."),
39
- Message(role="user", content="Hello!")
40
- ],
41
- stream=False
42
- )
43
- response = client.generate(request)
44
- print(response)
45
-
46
- # Example 2: Streaming response
47
- stream_request = AiRequest(
48
- messages=[
49
- Message(role="system", content="You are a helpful assistant."),
50
- Message(role="user", content="Tell me a story!")
51
- ],
52
- stream=True
53
- )
54
- for chunk in client.generate(stream_request):
55
- print(chunk)
56
- ```
57
-
58
- ### Asynchronous Usage
59
-
60
- ```python
61
- import asyncio
62
- from mira_network.client import MiraClient
63
- from mira_network.models import AiRequest, Message
64
-
65
- async def main():
66
- # Using async context manager (recommended)
67
- async with MiraClient(api_token="your-api-token") as client:
68
- # Example 1: Non-streaming response
69
- request = AiRequest(
70
- messages=[
71
- Message(role="system", content="You are a helpful assistant."),
72
- Message(role="user", content="Hello!")
73
- ],
74
- stream=False
75
- )
76
- response = await client.generate(request)
77
- print(response)
78
-
79
- # Example 2: Streaming response
80
- stream_request = AiRequest(
81
- messages=[
82
- Message(role="system", content="You are a helpful assistant."),
83
- Message(role="user", content="Tell me a story!")
84
- ],
85
- stream=True
86
- )
87
- async for chunk in await client.generate(stream_request):
88
- print(chunk)
89
-
90
- if __name__ == "__main__":
91
- asyncio.run(main())
92
- ```
93
-
94
- ## API Reference
95
-
96
- ### Client Initialization
97
-
98
- The SDK provides two client classes:
99
- - `MiraSyncClient`: Synchronous client using `requests`
100
- - `MiraClient`: Asynchronous client using `httpx`
101
-
102
- Both clients support context managers for proper resource cleanup:
103
-
104
- ```python
105
- # Synchronous
106
- with MiraSyncClient(api_token="your-api-token") as client:
107
- # Your sync code here
108
-
109
- # Asynchronous
110
- async with MiraClient(api_token="your-api-token") as client:
111
- # Your async code here
112
- ```
113
-
114
- ### Models
115
-
116
- - `Message`: Represents a chat message
117
- - `role`: String ("system", "user", or "assistant")
118
- - `content`: String content of the message
119
-
120
- - `AiRequest`: Configuration for model inference
121
- - `model`: Model identifier (default: "mira/llama3.1")
122
- - `messages`: List of Message objects
123
- - `stream`: Boolean to enable streaming responses (default: False)
124
- - `model_provider`: Optional ModelProvider configuration
125
-
126
- - `ModelProvider`: Custom provider configuration
127
- - `base_url`: Provider's base URL
128
- - `api_key`: Provider's API key
129
-
130
- - `ApiTokenRequest`: Request for creating API tokens
131
- - `description`: Optional description for the token
132
-
133
- ### Available Methods
134
-
135
- Both sync and async clients provide the same methods with identical parameters. The only difference is that async methods must be awaited.
136
-
137
- #### Model Operations
138
- ```python
139
- # Sync
140
- models = client.list_models()
141
- response = client.generate(AiRequest(messages=[...], stream=False))
142
- for chunk in client.generate(AiRequest(messages=[...], stream=True)):
143
- print(chunk)
144
-
145
- # Async
146
- models = await client.list_models()
147
- response = await client.generate(AiRequest(messages=[...], stream=False))
148
- async for chunk in await client.generate(AiRequest(messages=[...], stream=True)):
149
- print(chunk)
150
- ```
151
-
152
- #### API Token Operations
153
- ```python
154
- # Sync
155
- token = client.create_api_token(ApiTokenRequest(description="My Token"))
156
- tokens = client.list_api_tokens()
157
- client.delete_api_token("token-to-delete")
158
-
159
- # Async
160
- token = await client.create_api_token(ApiTokenRequest(description="My Token"))
161
- tokens = await client.list_api_tokens()
162
- await client.delete_api_token("token-to-delete")
163
- ```
164
-
165
- #### Credit Operations
166
- ```python
167
- # Sync
168
- credits = client.get_user_credits()
169
- history = client.get_credits_history()
170
-
171
- # Async
172
- credits = await client.get_user_credits()
173
- history = await client.get_credits_history()
174
- ```
175
-
176
- ## License
177
-
178
- MIT License
@@ -1,164 +0,0 @@
1
- # Mira Network SDK
2
-
3
- A Python SDK for interacting with the Mira Network API. This SDK provides both synchronous and asynchronous interfaces to access Mira API endpoints for model inference, API token management, and credit system operations.
4
-
5
- ## Installation
6
-
7
- ```bash
8
- pip install mira-network
9
- ```
10
-
11
- ## Quick Start
12
-
13
- ### Synchronous Usage
14
-
15
- ```python
16
- from mira_network.sync_client import MiraSyncClient
17
- from mira_network.models import AiRequest, Message
18
-
19
- # Using context manager (recommended)
20
- with MiraSyncClient(api_token="your-api-token") as client:
21
- # Example 1: Non-streaming response
22
- request = AiRequest(
23
- messages=[
24
- Message(role="system", content="You are a helpful assistant."),
25
- Message(role="user", content="Hello!")
26
- ],
27
- stream=False
28
- )
29
- response = client.generate(request)
30
- print(response)
31
-
32
- # Example 2: Streaming response
33
- stream_request = AiRequest(
34
- messages=[
35
- Message(role="system", content="You are a helpful assistant."),
36
- Message(role="user", content="Tell me a story!")
37
- ],
38
- stream=True
39
- )
40
- for chunk in client.generate(stream_request):
41
- print(chunk)
42
- ```
43
-
44
- ### Asynchronous Usage
45
-
46
- ```python
47
- import asyncio
48
- from mira_network.client import MiraClient
49
- from mira_network.models import AiRequest, Message
50
-
51
- async def main():
52
- # Using async context manager (recommended)
53
- async with MiraClient(api_token="your-api-token") as client:
54
- # Example 1: Non-streaming response
55
- request = AiRequest(
56
- messages=[
57
- Message(role="system", content="You are a helpful assistant."),
58
- Message(role="user", content="Hello!")
59
- ],
60
- stream=False
61
- )
62
- response = await client.generate(request)
63
- print(response)
64
-
65
- # Example 2: Streaming response
66
- stream_request = AiRequest(
67
- messages=[
68
- Message(role="system", content="You are a helpful assistant."),
69
- Message(role="user", content="Tell me a story!")
70
- ],
71
- stream=True
72
- )
73
- async for chunk in await client.generate(stream_request):
74
- print(chunk)
75
-
76
- if __name__ == "__main__":
77
- asyncio.run(main())
78
- ```
79
-
80
- ## API Reference
81
-
82
- ### Client Initialization
83
-
84
- The SDK provides two client classes:
85
- - `MiraSyncClient`: Synchronous client using `requests`
86
- - `MiraClient`: Asynchronous client using `httpx`
87
-
88
- Both clients support context managers for proper resource cleanup:
89
-
90
- ```python
91
- # Synchronous
92
- with MiraSyncClient(api_token="your-api-token") as client:
93
- # Your sync code here
94
-
95
- # Asynchronous
96
- async with MiraClient(api_token="your-api-token") as client:
97
- # Your async code here
98
- ```
99
-
100
- ### Models
101
-
102
- - `Message`: Represents a chat message
103
- - `role`: String ("system", "user", or "assistant")
104
- - `content`: String content of the message
105
-
106
- - `AiRequest`: Configuration for model inference
107
- - `model`: Model identifier (default: "mira/llama3.1")
108
- - `messages`: List of Message objects
109
- - `stream`: Boolean to enable streaming responses (default: False)
110
- - `model_provider`: Optional ModelProvider configuration
111
-
112
- - `ModelProvider`: Custom provider configuration
113
- - `base_url`: Provider's base URL
114
- - `api_key`: Provider's API key
115
-
116
- - `ApiTokenRequest`: Request for creating API tokens
117
- - `description`: Optional description for the token
118
-
119
- ### Available Methods
120
-
121
- Both sync and async clients provide the same methods with identical parameters. The only difference is that async methods must be awaited.
122
-
123
- #### Model Operations
124
- ```python
125
- # Sync
126
- models = client.list_models()
127
- response = client.generate(AiRequest(messages=[...], stream=False))
128
- for chunk in client.generate(AiRequest(messages=[...], stream=True)):
129
- print(chunk)
130
-
131
- # Async
132
- models = await client.list_models()
133
- response = await client.generate(AiRequest(messages=[...], stream=False))
134
- async for chunk in await client.generate(AiRequest(messages=[...], stream=True)):
135
- print(chunk)
136
- ```
137
-
138
- #### API Token Operations
139
- ```python
140
- # Sync
141
- token = client.create_api_token(ApiTokenRequest(description="My Token"))
142
- tokens = client.list_api_tokens()
143
- client.delete_api_token("token-to-delete")
144
-
145
- # Async
146
- token = await client.create_api_token(ApiTokenRequest(description="My Token"))
147
- tokens = await client.list_api_tokens()
148
- await client.delete_api_token("token-to-delete")
149
- ```
150
-
151
- #### Credit Operations
152
- ```python
153
- # Sync
154
- credits = client.get_user_credits()
155
- history = client.get_credits_history()
156
-
157
- # Async
158
- credits = await client.get_user_credits()
159
- history = await client.get_credits_history()
160
- ```
161
-
162
- ## License
163
-
164
- MIT License
@@ -1,173 +0,0 @@
1
- from typing import AsyncIterator, Optional, List, Dict, AsyncGenerator, Union
2
- import httpx
3
- from .models import (
4
- AiRequest,
5
- ApiTokenRequest,
6
- )
7
-
8
-
9
- class MiraClient:
10
-
11
- def __init__(
12
- self,
13
- base_url: str = "https://apis.mira.network/",
14
- api_token: Optional[str] = None,
15
- ):
16
- """Initialize Mira client.
17
-
18
- Args:
19
- base_url: Base URL of the Mira API
20
- api_token: Optional API token for authentication
21
- """
22
- self.base_url = base_url
23
- self.api_token = api_token
24
- self._client = httpx.AsyncClient()
25
-
26
- async def __aenter__(self):
27
- return self
28
-
29
- async def __aexit__(self, exc_type, exc_val, exc_tb):
30
- await self._client.aclose()
31
-
32
- def _get_headers(self) -> Dict[str, str]:
33
- headers = {"Content-Type": "application/json"}
34
- if self.api_token:
35
- headers["Authorization"] = f"Bearer {self.api_token}"
36
- return headers
37
-
38
- async def list_models(self) -> List[str]:
39
- """List available models."""
40
- response = await self._client.get(
41
- f"{self.base_url}/v1/models",
42
- headers=self._get_headers(),
43
- )
44
- response.raise_for_status()
45
- return response.json()
46
-
47
- async def generate(self, request: AiRequest) -> Union[str, AsyncIterator[str]]:
48
- """Generate text using the specified model."""
49
- response = await self._client.post(
50
- f"{self.base_url}/v1/chat/completions",
51
- headers=self._get_headers(),
52
- json=request.model_dump(),
53
- )
54
-
55
- response.raise_for_status()
56
-
57
- if request.stream:
58
- return response.aiter_lines()
59
- else:
60
- return response.json()
61
-
62
- # async def generate_with_flow(
63
- # self, flow_id: str, request: FlowChatCompletion
64
- # ) -> Union[str, AsyncGenerator[str, None]]:
65
- # """Generate text using a specific flow."""
66
- # response = await self._client.post(
67
- # f"{self.base_url}/v1/flows/{flow_id}/chat/completions",
68
- # headers=self._get_headers(),
69
- # json=request.model_dump(),
70
- # )
71
- # response.raise_for_status()
72
- # return response.json()
73
-
74
- # async def list_flows(self) -> List[Dict]:
75
- # """List all flows."""
76
- # response = await self._client.get(
77
- # f"{self.base_url}/flows",
78
- # headers=self._get_headers(),
79
- # )
80
- # response.raise_for_status()
81
- # return response.json()
82
-
83
- # async def get_flow(self, flow_id: str) -> Dict:
84
- # """Get details of a specific flow."""
85
- # response = await self._client.get(
86
- # f"{self.base_url}/flows/{flow_id}",
87
- # headers=self._get_headers(),
88
- # )
89
- # response.raise_for_status()
90
- # return response.json()
91
-
92
- # async def create_flow(self, request: FlowRequest) -> Dict:
93
- # """Create a new flow."""
94
- # response = await self._client.post(
95
- # f"{self.base_url}/flows",
96
- # headers=self._get_headers(),
97
- # json=request.model_dump(),
98
- # )
99
- # response.raise_for_status()
100
- # return response.json()
101
-
102
- # async def update_flow(self, flow_id: str, request: FlowRequest) -> Dict:
103
- # """Update an existing flow."""
104
- # response = await self._client.put(
105
- # f"{self.base_url}/flows/{flow_id}",
106
- # headers=self._get_headers(),
107
- # json=request.model_dump(),
108
- # )
109
- # response.raise_for_status()
110
- # return response.json()
111
-
112
- # async def delete_flow(self, flow_id: str) -> None:
113
- # """Delete a flow."""
114
- # response = await self._client.delete(
115
- # f"{self.base_url}/flows/{flow_id}",
116
- # headers=self._get_headers(),
117
- # )
118
- # response.raise_for_status()
119
-
120
- async def create_api_token(self, request: ApiTokenRequest) -> Dict:
121
- """Create a new API token."""
122
- response = await self._client.post(
123
- f"{self.base_url}/api-tokens",
124
- headers=self._get_headers(),
125
- json=request.model_dump(),
126
- )
127
- response.raise_for_status()
128
- return response.json()
129
-
130
- async def list_api_tokens(self) -> List[Dict]:
131
- """List all API tokens."""
132
- response = await self._client.get(
133
- f"{self.base_url}/api-tokens",
134
- headers=self._get_headers(),
135
- )
136
- response.raise_for_status()
137
- return response.json()
138
-
139
- async def delete_api_token(self, token: str) -> None:
140
- """Delete an API token."""
141
- response = await self._client.delete(
142
- f"{self.base_url}/api-tokens/{token}",
143
- headers=self._get_headers(),
144
- )
145
- response.raise_for_status()
146
-
147
- async def get_user_credits(self) -> Dict:
148
- """Get user credits information."""
149
- response = await self._client.get(
150
- f"{self.base_url}/user-credits",
151
- headers=self._get_headers(),
152
- )
153
- response.raise_for_status()
154
- return response.json()
155
-
156
- # async def add_credit(self, request: AddCreditRequest) -> Dict:
157
- # """Add credits to a user account."""
158
- # response = await self._client.post(
159
- # f"{self.base_url}/credits",
160
- # headers=self._get_headers(),
161
- # json=request.model_dump(),
162
- # )
163
- # response.raise_for_status()
164
- # return response.json()
165
-
166
- async def get_credits_history(self) -> List[Dict]:
167
- """Get user credits history."""
168
- response = await self._client.get(
169
- f"{self.base_url}/user-credits-history",
170
- headers=self._get_headers(),
171
- )
172
- response.raise_for_status()
173
- return response.json()