mira-network 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mira_network/client.py CHANGED
@@ -1,26 +1,26 @@
1
- from typing import AsyncIterator, Optional, List, Dict, AsyncGenerator, Union
1
+ from typing import AsyncIterator, Optional, List, Dict, AsyncGenerator, Union, Any
2
2
  import httpx
3
3
  from .models import (
4
4
  AiRequest,
5
5
  ApiTokenRequest,
6
+ Message,
6
7
  )
7
8
 
8
9
 
9
10
  class MiraClient:
10
-
11
11
  def __init__(
12
12
  self,
13
+ api_key: Optional[str] = None,
13
14
  base_url: str = "https://apis.mira.network",
14
- api_token: Optional[str] = None,
15
15
  ):
16
16
  """Initialize Mira client.
17
17
 
18
18
  Args:
19
+ api_key: API key for authentication
19
20
  base_url: Base URL of the Mira API
20
- api_token: Optional API token for authentication
21
21
  """
22
22
  self.base_url = base_url
23
- self.api_token = api_token
23
+ self.api_key = api_key
24
24
  self._client = httpx.AsyncClient()
25
25
 
26
26
  async def __aenter__(self):
@@ -31,91 +31,74 @@ class MiraClient:
31
31
 
32
32
  def _get_headers(self) -> Dict[str, str]:
33
33
  headers = {"Content-Type": "application/json"}
34
- if self.api_token:
35
- headers["Authorization"] = f"Bearer {self.api_token}"
34
+ if self.api_key:
35
+ headers["Authorization"] = f"Bearer {self.api_key}"
36
36
  return headers
37
37
 
38
- async def list_models(self) -> List[str]:
39
- """List available models."""
40
- response = await self._client.get(
41
- f"{self.base_url}/v1/models",
42
- headers=self._get_headers(),
38
+ async def chat_completions_create(
39
+ self,
40
+ model: str,
41
+ messages: list[Message],
42
+ stream: bool = False,
43
+ **kwargs: Any,
44
+ ) -> Union[Dict[str, Any], AsyncIterator[Dict[str, Any]]]:
45
+ """Create a chat completion.
46
+
47
+ Args:
48
+ model: The model to use for completion
49
+ messages: A list of messages in the conversation
50
+ stream: Whether to stream the response
51
+ **kwargs: Additional parameters to pass to the API
52
+ """
53
+ request = AiRequest(
54
+ model=model,
55
+ messages=messages,
56
+ stream=stream,
57
+ **kwargs,
43
58
  )
44
- response.raise_for_status()
45
- return response.json()
46
59
 
47
- async def generate(self, request: AiRequest) -> Union[str, AsyncIterator[str]]:
48
- """Generate text using the specified model."""
60
+ print("\n\n\n======>", request.model_dump(), "\n\n\n")
61
+
49
62
  response = await self._client.post(
50
63
  f"{self.base_url}/v1/chat/completions",
51
64
  headers=self._get_headers(),
52
65
  json=request.model_dump(),
53
66
  )
54
-
55
67
  response.raise_for_status()
56
68
 
57
- if request.stream:
58
- return response.aiter_lines()
59
- else:
60
- return response.json()
61
-
62
- # async def generate_with_flow(
63
- # self, flow_id: str, request: FlowChatCompletion
64
- # ) -> Union[str, AsyncGenerator[str, None]]:
65
- # """Generate text using a specific flow."""
66
- # response = await self._client.post(
67
- # f"{self.base_url}/v1/flows/{flow_id}/chat/completions",
68
- # headers=self._get_headers(),
69
- # json=request.model_dump(),
70
- # )
71
- # response.raise_for_status()
72
- # return response.json()
73
-
74
- # async def list_flows(self) -> List[Dict]:
75
- # """List all flows."""
76
- # response = await self._client.get(
77
- # f"{self.base_url}/flows",
78
- # headers=self._get_headers(),
79
- # )
80
- # response.raise_for_status()
81
- # return response.json()
82
-
83
- # async def get_flow(self, flow_id: str) -> Dict:
84
- # """Get details of a specific flow."""
85
- # response = await self._client.get(
86
- # f"{self.base_url}/flows/{flow_id}",
87
- # headers=self._get_headers(),
88
- # )
89
- # response.raise_for_status()
90
- # return response.json()
91
-
92
- # async def create_flow(self, request: FlowRequest) -> Dict:
93
- # """Create a new flow."""
94
- # response = await self._client.post(
95
- # f"{self.base_url}/flows",
96
- # headers=self._get_headers(),
97
- # json=request.model_dump(),
98
- # )
99
- # response.raise_for_status()
100
- # return response.json()
101
-
102
- # async def update_flow(self, flow_id: str, request: FlowRequest) -> Dict:
103
- # """Update an existing flow."""
104
- # response = await self._client.put(
105
- # f"{self.base_url}/flows/{flow_id}",
106
- # headers=self._get_headers(),
107
- # json=request.model_dump(),
108
- # )
109
- # response.raise_for_status()
110
- # return response.json()
111
-
112
- # async def delete_flow(self, flow_id: str) -> None:
113
- # """Delete a flow."""
114
- # response = await self._client.delete(
115
- # f"{self.base_url}/flows/{flow_id}",
116
- # headers=self._get_headers(),
117
- # )
118
- # response.raise_for_status()
69
+ if stream:
70
+ return self._stream_response(response)
71
+ return response.json()
72
+
73
+ async def _stream_response(
74
+ self, response: httpx.Response
75
+ ) -> AsyncIterator[Dict[str, Any]]:
76
+ """Handle streaming response.
77
+
78
+ Args:
79
+ response: The HTTP response object
80
+ """
81
+ async for line in response.aiter_lines():
82
+ if line.strip():
83
+ yield self._format_stream_response(line)
84
+
85
+ def _format_stream_response(self, line: str) -> Dict[str, Any]:
86
+ """Format streaming response to match OpenAI's format.
87
+
88
+ Args:
89
+ line: The response line
90
+ """
91
+ # Add formatting logic here if needed
92
+ return {"choices": [{"delta": {"content": line}}]}
93
+
94
+ async def list_models(self) -> List[str]:
95
+ """List available models."""
96
+ response = await self._client.get(
97
+ f"{self.base_url}/v1/models",
98
+ headers=self._get_headers(),
99
+ )
100
+ response.raise_for_status()
101
+ return response.json()
119
102
 
120
103
  async def create_api_token(self, request: ApiTokenRequest) -> Dict:
121
104
  """Create a new API token."""
@@ -153,16 +136,6 @@ class MiraClient:
153
136
  response.raise_for_status()
154
137
  return response.json()
155
138
 
156
- # async def add_credit(self, request: AddCreditRequest) -> Dict:
157
- # """Add credits to a user account."""
158
- # response = await self._client.post(
159
- # f"{self.base_url}/credits",
160
- # headers=self._get_headers(),
161
- # json=request.model_dump(),
162
- # )
163
- # response.raise_for_status()
164
- # return response.json()
165
-
166
139
  async def get_credits_history(self) -> List[Dict]:
167
140
  """Get user credits history."""
168
141
  response = await self._client.get(
@@ -0,0 +1,349 @@
1
+ Metadata-Version: 2.1
2
+ Name: mira-network
3
+ Version: 0.1.6
4
+ Summary: Python SDK for Mira Network API
5
+ Author-Email: sarim2000 <sarimbleedblue@gmail.com>
6
+ License: MIT
7
+ Requires-Python: ==3.11.*
8
+ Requires-Dist: httpx>=0.28.1
9
+ Requires-Dist: pydantic>=2.10.4
10
+ Requires-Dist: typing-extensions>=4.8.0
11
+ Requires-Dist: requests>=2.32.3
12
+ Requires-Dist: pytest-cov>=6.0.0
13
+ Description-Content-Type: text/markdown
14
+
15
+ <div align="center">
16
+ <img src="https://your-domain.com/logo.png" alt="Mira Network SDK" width="200"/>
17
+ <h1>Mira Network Python SDK</h1>
18
+ <p><strong>Your Universal Gateway to AI Language Models</strong></p>
19
+ </div>
20
+
21
+ <p align="center">
22
+ <a href="https://badge.fury.io/py/mira-network"><img src="https://badge.fury.io/py/mira-network.svg" alt="PyPI version"></a>
23
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
24
+ <a href="https://github.com/mira-network/python-sdk/actions"><img src="https://github.com/mira-network/python-sdk/workflows/tests/badge.svg" alt="Build Status"></a>
25
+ <a href="https://codecov.io/gh/mira-network/python-sdk"><img src="https://codecov.io/gh/mira-network/python-sdk/branch/main/graph/badge.svg" alt="Coverage Status"></a>
26
+ <a href="https://pypi.org/project/mira-network/"><img src="https://img.shields.io/pypi/dm/mira-network.svg" alt="Downloads"></a>
27
+ <a href="https://discord.gg/mira-network"><img src="https://img.shields.io/discord/1234567890?color=7289da&label=discord" alt="Discord"></a>
28
+ </p>
29
+
30
+ <p align="center">
31
+ <b>Mira Client enables seamless integration with multiple language models while providing advanced routing, load balancing, and flow management capabilities.</b>
32
+ </p>
33
+
34
+ ---
35
+
36
+ ## 🌟 What is Mira Network?
37
+
38
+ Mira Network is your unified interface to the world of AI language models. It provides:
39
+
40
+ - 🔄 **Smart Model Routing**: Route requests across different models
41
+ - ⚖️ **Load Balancing**: Distribute workload across nodes
42
+ - 🌊 **Flow Management**: Handle request patterns efficiently
43
+ - 🔌 **Universal Integration**: Single API for multiple models
44
+ - 📊 **Usage Tracking**: Monitor your model usage
45
+
46
+ ## Why Mira Network SDK?
47
+
48
+ | Feature | Mira SDK | Traditional Approach |
49
+ | ---------------------- | -------------------------- | ----------------------- |
50
+ | 🔄 Multi-model Support | Single unified API | Separate APIs per model |
51
+ | ⚖️ Load Balancing | Built-in | Custom implementation |
52
+ | 🌊 Flow Control | Automatic handling | Manual implementation |
53
+ | 📊 Usage Tracking | Integrated | Custom tracking needed |
54
+ | 🛡️ Error Handling | Standardized across models | Model-specific handling |
55
+
56
+ ## 🎯 Perfect For
57
+
58
+ - 🤖 AI Applications
59
+ - 📝 Text Generation
60
+ - 🔍 Search Enhancement
61
+ - 🎮 Interactive Systems
62
+
63
+ ## 🏃 Quick Start
64
+
65
+ ```bash
66
+ pip install mira-network
67
+ ```
68
+
69
+ ```python
70
+ from mira_network import MiraClient
71
+
72
+ async def get_ai_response(prompt):
73
+ async with MiraClient() as client:
74
+ return await client.chat_completions_create(
75
+ model="your-chosen-model",
76
+ messages=[{"role": "user", "content": prompt}]
77
+ )
78
+ ```
79
+
80
+ ## 🏗️ Architecture
81
+
82
+ ```mermaid
83
+ graph LR
84
+ A[Your App] --> B[Mira SDK]
85
+ B --> C[Load Balancer]
86
+ C --> D[Mira Node 1]
87
+ C --> E[Mira Node 2]
88
+ C --> F[Mira Node N]
89
+ ```
90
+
91
+ ## ✨ Key Features
92
+
93
+ - 🔌 Simple, intuitive API
94
+ - 🔄 Async-first design
95
+ - 🌊 Streaming support
96
+ - 🔐 Error handling
97
+ - 🛠️ Customizable nodes
98
+ - 📊 Usage tracking
99
+
100
+ ## 📑 Table of Contents
101
+
102
+ - [Installation](#installation)
103
+ - [Quick Start](#-quick-start)
104
+ - [Basic Usage](#-basic-usage)
105
+ - [Advanced Usage](#-advanced-usage)
106
+ - [API Reference](#-reference)
107
+ - [Support](#-support)
108
+ - [Contributing](#-contributing)
109
+ - [License](#-license)
110
+
111
+ ## 🔧 Installation
112
+
113
+ Install the SDK using pip:
114
+
115
+ ```bash
116
+ pip install mira-network
117
+ ```
118
+
119
+ ## 🚀 Quick Start
120
+
121
+ Experience the power of Mira Network in just a few lines of code:
122
+
123
+ ```python
124
+ from mira_network import MiraClient
125
+
126
+ async def main():
127
+ # Initialize with your API key
128
+ client = MiraClient(api_key="your-api-key")
129
+
130
+ # Get a response from AI
131
+ response = await client.chat_completions_create(
132
+ model="your-chosen-model",
133
+ messages=[
134
+ {"role": "user", "content": "What is the capital of France?"}
135
+ ]
136
+ )
137
+
138
+ # Print the AI's response
139
+ print(response["choices"][0]["message"]["content"])
140
+
141
+ if __name__ == "__main__":
142
+ import asyncio
143
+ asyncio.run(main())
144
+ ```
145
+
146
+ ## 📝 Basic Usage
147
+
148
+ ### Having a Conversation
149
+
150
+ Engage in natural conversations with AI models. The SDK handles the complexities of managing conversation context and model interactions:
151
+
152
+ ```python
153
+ response = await client.chat_completions_create(
154
+ model="your-chosen-model",
155
+ messages=[
156
+ {"role": "system", "content": "You are a helpful assistant"},
157
+ {"role": "user", "content": "Hi! Can you help me?"},
158
+ ]
159
+ )
160
+ ```
161
+
162
+ ### Checking Available Models
163
+
164
+ Explore the diverse range of available AI models:
165
+
166
+ ```python
167
+ models = await client.list_models()
168
+ print(models)
169
+ ```
170
+
171
+ ### Checking Your Credits
172
+
173
+ Monitor your usage and available credits:
174
+
175
+ ```python
176
+ credits = await client.get_user_credits()
177
+ print(credits)
178
+ ```
179
+
180
+ ## 🔧 Advanced Usage
181
+
182
+ ### Streaming Responses
183
+
184
+ Perfect for real-time applications and interactive experiences:
185
+
186
+ ```python
187
+ stream = await client.chat_completions_create(
188
+ model="your-chosen-model",
189
+ messages=[
190
+ {"role": "user", "content": "Write a story"}
191
+ ],
192
+ stream=True
193
+ )
194
+
195
+ async for chunk in stream:
196
+ print(chunk["choices"][0]["delta"]["content"], end="")
197
+ ```
198
+
199
+ ### Custom Mira Nodes
200
+
201
+ Integrate your preferred Mira nodes seamlessly:
202
+
203
+ ```python
204
+ response = await client.chat_completions_create(
205
+ model="your-model",
206
+ messages=[{"role": "user", "content": "Hello"}],
207
+ mira_node={
208
+ "base_url": "https://custom-node.com",
209
+ "api_key": "node-api-key"
210
+ }
211
+ )
212
+ ```
213
+
214
+ ### API Token Management
215
+
216
+ Secure and flexible token management for your applications:
217
+
218
+ ```python
219
+ # Create new token
220
+ new_token = await client.create_api_token(
221
+ {"description": "Production API Key"}
222
+ )
223
+
224
+ # List tokens
225
+ tokens = await client.list_api_tokens()
226
+
227
+ # Delete token
228
+ await client.delete_api_token("token-id")
229
+ ```
230
+
231
+ ### Using as Context Manager
232
+
233
+ Efficient resource management with context managers:
234
+
235
+ ```python
236
+ async with MiraClient(api_key="your-api-key") as client:
237
+ response = await client.chat_completions_create(...)
238
+ ```
239
+
240
+ ## 📚 Reference
241
+
242
+ ### Message Structure
243
+
244
+ Understanding the core message components:
245
+
246
+ ```python
247
+ Message:
248
+ role: str # "system", "user", or "assistant"
249
+ content: str # The message content
250
+ ```
251
+
252
+ ### Error Handling
253
+
254
+ Robust error handling for production applications:
255
+
256
+ #### Validation Errors
257
+
258
+ ```python
259
+ try:
260
+ response = await client.chat_completions_create(
261
+ model="your-chosen-model",
262
+ messages=[
263
+ {"role": "invalid", "content": "Hello"} # Invalid role
264
+ ]
265
+ )
266
+ except ValueError as e:
267
+ print(f"Validation error: {e}")
268
+ ```
269
+
270
+ #### Network Errors
271
+
272
+ ```python
273
+ try:
274
+ response = await client.chat_completions_create(...)
275
+ except httpx.HTTPError as e:
276
+ print(f"HTTP error: {e}")
277
+ ```
278
+
279
+ ### Environment Configuration
280
+
281
+ Flexible configuration options for different environments:
282
+
283
+ ```python
284
+ import os
285
+ from mira_network import MiraClient
286
+
287
+ client = MiraClient(
288
+ api_key=os.getenv("MIRA_API_KEY"),
289
+ base_url=os.getenv("MIRA_API_URL", "https://apis.mira.network")
290
+ )
291
+ ```
292
+
293
+ ## 💡 Real-world Examples
294
+
295
+ ### AI-powered Customer Service
296
+
297
+ ```python
298
+ async def handle_customer_query(query: str) -> str:
299
+ async with MiraClient() as client:
300
+ response = await client.chat_completions_create(
301
+ model="your-chosen-model",
302
+ messages=[
303
+ {"role": "system", "content": "You are a helpful customer service agent."},
304
+ {"role": "user", "content": query}
305
+ ],
306
+ temperature=0.7,
307
+ max_tokens=150
308
+ )
309
+ return response.choices[0].message.content
310
+ ```
311
+
312
+ ### Content Generation Pipeline
313
+
314
+ ```python
315
+ async def generate_blog_post(topic: str) -> dict:
316
+ async with MiraClient() as client:
317
+ # Generate outline
318
+ outline = await client.chat_completions_create(...)
319
+
320
+ # Generate content
321
+ content = await client.chat_completions_create(...)
322
+
323
+ # Generate meta description
324
+ meta = await client.chat_completions_create(...)
325
+
326
+ return {"outline": outline, "content": content, "meta": meta}
327
+ ```
328
+
329
+ ## 🤝 Support
330
+
331
+ For feature requests and bug reports, please visit our [Console Feedback](https://console-feedback.arohalabs.tech/).
332
+
333
+ ## 👥 Contributing
334
+
335
+ We welcome contributions! Here's how you can help:
336
+
337
+ 1. Fork the repository
338
+ 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
339
+ 3. Commit your changes (`git commit -m 'Add amazing feature'`)
340
+ 4. Push to the branch (`git push origin feature/amazing-feature`)
341
+ 5. Open a Pull Request
342
+
343
+ ## 📄 License
344
+
345
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
346
+
347
+ ---
348
+
349
+ <p align="center">Built with ❤️ by the Mira Network team</p>
@@ -0,0 +1,9 @@
1
+ mira_network-0.1.6.dist-info/METADATA,sha256=bkV2UrnyIAIYM-cavAb_Cv1KfOlfIowH-F3muw61ZrM,9222
2
+ mira_network-0.1.6.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ mira_network-0.1.6.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ mira_network-0.1.6.dist-info/licenses/LICENSE,sha256=bOPoXuA3yH-uo70eTxPrRY4aiy8NkZDVkI8ZVagZIfc,1069
5
+ mira_network/__init__.py,sha256=82x6bhP9_ZhHaBkW-O10F4ADipp1crGQzTaxQ878buA,272
6
+ mira_network/client.py,sha256=rM1QJUPCmPdctC-Hu9migBXfP8jlDDOyLuCvdb6nQf4,4470
7
+ mira_network/models.py,sha256=aEJiDHEDFIAQMB-C8ZFY5ruRNGJ-U2Cz9vS5nF7GqVg,1805
8
+ mira_network/sync_client.py,sha256=vAiQG0SDHO9aROklnPqr0m-AsPIyL8wNfA0L7h5EhPg,3399
9
+ mira_network-0.1.6.dist-info/RECORD,,
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Mira Network
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -1,186 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: mira-network
3
- Version: 0.1.5
4
- Summary: Python SDK for Mira Network API
5
- Author-Email: sarim2000 <sarimbleedblue@gmail.com>
6
- License: MIT
7
- Requires-Python: ==3.10.*
8
- Requires-Dist: httpx>=0.28.1
9
- Requires-Dist: pydantic>=2.10.4
10
- Requires-Dist: typing-extensions>=4.8.0
11
- Requires-Dist: requests>=2.32.3
12
- Requires-Dist: pytest-cov>=6.0.0
13
- Description-Content-Type: text/markdown
14
-
15
- # Mira Network SDK
16
-
17
- A Python SDK for interacting with the Mira Network API. This SDK provides both synchronous and asynchronous interfaces to access Mira API endpoints for model inference, API token management, and credit system operations.
18
-
19
- ## Installation
20
-
21
- ```bash
22
- pip install mira-network
23
- ```
24
-
25
- ## Quick Start
26
-
27
- ### Synchronous Usage
28
-
29
- ```python
30
- from mira_network.sync_client import MiraSyncClient
31
- from mira_network.models import AiRequest, Message
32
-
33
- # Using context manager (recommended)
34
- with MiraSyncClient(api_token="your-api-token") as client: # base_url defaults to https://apis.mira.network/
35
- # Example 1: Non-streaming response
36
- request = AiRequest(
37
- messages=[
38
- Message(role="system", content="You are a helpful assistant."),
39
- Message(role="user", content="Hello!")
40
- ],
41
- stream=False
42
- )
43
- response = client.generate(request)
44
- print(response)
45
-
46
- # Example 2: Streaming response
47
- stream_request = AiRequest(
48
- messages=[
49
- Message(role="system", content="You are a helpful assistant."),
50
- Message(role="user", content="Tell me a story!")
51
- ],
52
- stream=True
53
- )
54
- for chunk in client.generate(stream_request):
55
- print(chunk)
56
- ```
57
-
58
- ### Asynchronous Usage
59
-
60
- ```python
61
- import asyncio
62
- from mira_network.client import MiraClient
63
- from mira_network.models import AiRequest, Message
64
-
65
- async def main():
66
- # Using async context manager (recommended)
67
- async with MiraClient(api_token="your-api-token") as client: # base_url defaults to https://apis.mira.network/
68
- # Example 1: Non-streaming response
69
- request = AiRequest(
70
- messages=[
71
- Message(role="system", content="You are a helpful assistant."),
72
- Message(role="user", content="Hello!")
73
- ],
74
- model="gpt-4o",
75
- model_provider=None,
76
- stream=False
77
- )
78
- response = await client.generate(request)
79
- print(response)
80
-
81
- # Example 2: Streaming response
82
- stream_request = AiRequest(
83
- messages=[
84
- Message(role="system", content="You are a helpful assistant."),
85
- Message(role="user", content="Tell me a story!")
86
- ],
87
- stream=True
88
- )
89
- async for chunk in await client.generate(stream_request):
90
- print(chunk)
91
-
92
- if __name__ == "__main__":
93
- asyncio.run(main())
94
- ```
95
-
96
- ## API Reference
97
-
98
- ### Client Initialization
99
-
100
- The SDK provides two client classes:
101
- - `MiraSyncClient`: Synchronous client using `requests`
102
- - `MiraClient`: Asynchronous client using `httpx`
103
-
104
- Both clients support context managers for proper resource cleanup:
105
-
106
- ```python
107
- # Synchronous
108
- with MiraSyncClient(
109
- api_token="your-api-token",
110
- base_url="https://apis.mira.network/" # Optional, this is the default
111
- ) as client:
112
- # Your sync code here
113
-
114
- # Asynchronous
115
- async with MiraClient(
116
- api_token="your-api-token",
117
- base_url="https://apis.mira.network/" # Optional, this is the default
118
- ) as client:
119
- # Your async code here
120
- ```
121
-
122
- ### Models
123
-
124
- - `Message`: Represents a chat message
125
- - `role`: String ("system", "user", or "assistant")
126
- - `content`: String content of the message
127
-
128
- - `AiRequest`: Configuration for model inference
129
- - `model`: Model identifier (default: "mira/llama3.1")
130
- - `messages`: List of Message objects
131
- - `stream`: Boolean to enable streaming responses (default: False)
132
- - `model_provider`: Optional ModelProvider configuration
133
-
134
- - `ModelProvider`: Custom provider configuration
135
- - `base_url`: Provider's base URL
136
- - `api_key`: Provider's API key
137
-
138
- - `ApiTokenRequest`: Request for creating API tokens
139
- - `description`: Optional description for the token
140
-
141
- ### Available Methods
142
-
143
- Both sync and async clients provide the same methods with identical parameters. The only difference is that async methods must be awaited.
144
-
145
- #### Model Operations
146
- ```python
147
- # Sync
148
- models = client.list_models()
149
- response = client.generate(AiRequest(messages=[...], stream=False))
150
- for chunk in client.generate(AiRequest(messages=[...], stream=True)):
151
- print(chunk)
152
-
153
- # Async
154
- models = await client.list_models()
155
- response = await client.generate(AiRequest(messages=[...], stream=False))
156
- async for chunk in await client.generate(AiRequest(messages=[...], stream=True)):
157
- print(chunk)
158
- ```
159
-
160
- #### API Token Operations
161
- ```python
162
- # Sync
163
- token = client.create_api_token(ApiTokenRequest(description="My Token"))
164
- tokens = client.list_api_tokens()
165
- client.delete_api_token("token-to-delete")
166
-
167
- # Async
168
- token = await client.create_api_token(ApiTokenRequest(description="My Token"))
169
- tokens = await client.list_api_tokens()
170
- await client.delete_api_token("token-to-delete")
171
- ```
172
-
173
- #### Credit Operations
174
- ```python
175
- # Sync
176
- credits = client.get_user_credits()
177
- history = client.get_credits_history()
178
-
179
- # Async
180
- credits = await client.get_user_credits()
181
- history = await client.get_credits_history()
182
- ```
183
-
184
- ## License
185
-
186
- MIT License
@@ -1,8 +0,0 @@
1
- mira_network-0.1.5.dist-info/METADATA,sha256=Q9jRYiP-Oo6ecT8iBgCIxi4pT-4nbHwuEP-8vgZ2OEc,5353
2
- mira_network-0.1.5.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- mira_network-0.1.5.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- mira_network/__init__.py,sha256=82x6bhP9_ZhHaBkW-O10F4ADipp1crGQzTaxQ878buA,272
5
- mira_network/client.py,sha256=HEqj1G1VfDZxZ3Kh2YPNBeUMlB5Pb56HBSvh80Wde2c,5738
6
- mira_network/models.py,sha256=aEJiDHEDFIAQMB-C8ZFY5ruRNGJ-U2Cz9vS5nF7GqVg,1805
7
- mira_network/sync_client.py,sha256=vAiQG0SDHO9aROklnPqr0m-AsPIyL8wNfA0L7h5EhPg,3399
8
- mira_network-0.1.5.dist-info/RECORD,,