mira-network 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mira_network-0.1.4/PKG-INFO +178 -0
- mira_network-0.1.4/README.md +164 -0
- {mira_network-0.1.2 → mira_network-0.1.4}/pyproject.toml +6 -5
- {mira_network-0.1.2/src/mira_sdk → mira_network-0.1.4/src/mira_network}/__init__.py +1 -6
- mira_network-0.1.4/src/mira_network/client.py +173 -0
- {mira_network-0.1.2/src/mira_sdk → mira_network-0.1.4/src/mira_network}/models.py +26 -24
- mira_network-0.1.4/src/mira_network/sync_client.py +111 -0
- {mira_network-0.1.2 → mira_network-0.1.4}/tests/test_client.py +32 -31
- mira_network-0.1.4/tests/test_sync_client.py +100 -0
- mira_network-0.1.2/PKG-INFO +0 -107
- mira_network-0.1.2/README.md +0 -94
- mira_network-0.1.2/src/mira_sdk/client.py +0 -180
- mira_network-0.1.2/tests/test_integration.py +0 -48
- mira_network-0.1.2/tests/test_models.py +0 -109
- {mira_network-0.1.2 → mira_network-0.1.4}/tests/__init__.py +0 -0
@@ -0,0 +1,178 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: mira-network
|
3
|
+
Version: 0.1.4
|
4
|
+
Summary: Python SDK for Mira Network API
|
5
|
+
Author-Email: sarim2000 <sarimbleedblue@gmail.com>
|
6
|
+
License: MIT
|
7
|
+
Requires-Python: ==3.10.*
|
8
|
+
Requires-Dist: httpx>=0.28.1
|
9
|
+
Requires-Dist: pydantic>=2.10.4
|
10
|
+
Requires-Dist: typing-extensions>=4.8.0
|
11
|
+
Requires-Dist: requests>=2.32.3
|
12
|
+
Requires-Dist: pytest-cov>=6.0.0
|
13
|
+
Description-Content-Type: text/markdown
|
14
|
+
|
15
|
+
# Mira Network SDK
|
16
|
+
|
17
|
+
A Python SDK for interacting with the Mira Network API. This SDK provides both synchronous and asynchronous interfaces to access Mira API endpoints for model inference, API token management, and credit system operations.
|
18
|
+
|
19
|
+
## Installation
|
20
|
+
|
21
|
+
```bash
|
22
|
+
pip install mira-network
|
23
|
+
```
|
24
|
+
|
25
|
+
## Quick Start
|
26
|
+
|
27
|
+
### Synchronous Usage
|
28
|
+
|
29
|
+
```python
|
30
|
+
from mira_network.sync_client import MiraSyncClient
|
31
|
+
from mira_network.models import AiRequest, Message
|
32
|
+
|
33
|
+
# Using context manager (recommended)
|
34
|
+
with MiraSyncClient(api_token="your-api-token") as client:
|
35
|
+
# Example 1: Non-streaming response
|
36
|
+
request = AiRequest(
|
37
|
+
messages=[
|
38
|
+
Message(role="system", content="You are a helpful assistant."),
|
39
|
+
Message(role="user", content="Hello!")
|
40
|
+
],
|
41
|
+
stream=False
|
42
|
+
)
|
43
|
+
response = client.generate(request)
|
44
|
+
print(response)
|
45
|
+
|
46
|
+
# Example 2: Streaming response
|
47
|
+
stream_request = AiRequest(
|
48
|
+
messages=[
|
49
|
+
Message(role="system", content="You are a helpful assistant."),
|
50
|
+
Message(role="user", content="Tell me a story!")
|
51
|
+
],
|
52
|
+
stream=True
|
53
|
+
)
|
54
|
+
for chunk in client.generate(stream_request):
|
55
|
+
print(chunk)
|
56
|
+
```
|
57
|
+
|
58
|
+
### Asynchronous Usage
|
59
|
+
|
60
|
+
```python
|
61
|
+
import asyncio
|
62
|
+
from mira_network.client import MiraClient
|
63
|
+
from mira_network.models import AiRequest, Message
|
64
|
+
|
65
|
+
async def main():
|
66
|
+
# Using async context manager (recommended)
|
67
|
+
async with MiraClient(api_token="your-api-token") as client:
|
68
|
+
# Example 1: Non-streaming response
|
69
|
+
request = AiRequest(
|
70
|
+
messages=[
|
71
|
+
Message(role="system", content="You are a helpful assistant."),
|
72
|
+
Message(role="user", content="Hello!")
|
73
|
+
],
|
74
|
+
stream=False
|
75
|
+
)
|
76
|
+
response = await client.generate(request)
|
77
|
+
print(response)
|
78
|
+
|
79
|
+
# Example 2: Streaming response
|
80
|
+
stream_request = AiRequest(
|
81
|
+
messages=[
|
82
|
+
Message(role="system", content="You are a helpful assistant."),
|
83
|
+
Message(role="user", content="Tell me a story!")
|
84
|
+
],
|
85
|
+
stream=True
|
86
|
+
)
|
87
|
+
async for chunk in await client.generate(stream_request):
|
88
|
+
print(chunk)
|
89
|
+
|
90
|
+
if __name__ == "__main__":
|
91
|
+
asyncio.run(main())
|
92
|
+
```
|
93
|
+
|
94
|
+
## API Reference
|
95
|
+
|
96
|
+
### Client Initialization
|
97
|
+
|
98
|
+
The SDK provides two client classes:
|
99
|
+
- `MiraSyncClient`: Synchronous client using `requests`
|
100
|
+
- `MiraClient`: Asynchronous client using `httpx`
|
101
|
+
|
102
|
+
Both clients support context managers for proper resource cleanup:
|
103
|
+
|
104
|
+
```python
|
105
|
+
# Synchronous
|
106
|
+
with MiraSyncClient(api_token="your-api-token") as client:
|
107
|
+
# Your sync code here
|
108
|
+
|
109
|
+
# Asynchronous
|
110
|
+
async with MiraClient(api_token="your-api-token") as client:
|
111
|
+
# Your async code here
|
112
|
+
```
|
113
|
+
|
114
|
+
### Models
|
115
|
+
|
116
|
+
- `Message`: Represents a chat message
|
117
|
+
- `role`: String ("system", "user", or "assistant")
|
118
|
+
- `content`: String content of the message
|
119
|
+
|
120
|
+
- `AiRequest`: Configuration for model inference
|
121
|
+
- `model`: Model identifier (default: "mira/llama3.1")
|
122
|
+
- `messages`: List of Message objects
|
123
|
+
- `stream`: Boolean to enable streaming responses (default: False)
|
124
|
+
- `model_provider`: Optional ModelProvider configuration
|
125
|
+
|
126
|
+
- `ModelProvider`: Custom provider configuration
|
127
|
+
- `base_url`: Provider's base URL
|
128
|
+
- `api_key`: Provider's API key
|
129
|
+
|
130
|
+
- `ApiTokenRequest`: Request for creating API tokens
|
131
|
+
- `description`: Optional description for the token
|
132
|
+
|
133
|
+
### Available Methods
|
134
|
+
|
135
|
+
Both sync and async clients provide the same methods with identical parameters. The only difference is that async methods must be awaited.
|
136
|
+
|
137
|
+
#### Model Operations
|
138
|
+
```python
|
139
|
+
# Sync
|
140
|
+
models = client.list_models()
|
141
|
+
response = client.generate(AiRequest(messages=[...], stream=False))
|
142
|
+
for chunk in client.generate(AiRequest(messages=[...], stream=True)):
|
143
|
+
print(chunk)
|
144
|
+
|
145
|
+
# Async
|
146
|
+
models = await client.list_models()
|
147
|
+
response = await client.generate(AiRequest(messages=[...], stream=False))
|
148
|
+
async for chunk in await client.generate(AiRequest(messages=[...], stream=True)):
|
149
|
+
print(chunk)
|
150
|
+
```
|
151
|
+
|
152
|
+
#### API Token Operations
|
153
|
+
```python
|
154
|
+
# Sync
|
155
|
+
token = client.create_api_token(ApiTokenRequest(description="My Token"))
|
156
|
+
tokens = client.list_api_tokens()
|
157
|
+
client.delete_api_token("token-to-delete")
|
158
|
+
|
159
|
+
# Async
|
160
|
+
token = await client.create_api_token(ApiTokenRequest(description="My Token"))
|
161
|
+
tokens = await client.list_api_tokens()
|
162
|
+
await client.delete_api_token("token-to-delete")
|
163
|
+
```
|
164
|
+
|
165
|
+
#### Credit Operations
|
166
|
+
```python
|
167
|
+
# Sync
|
168
|
+
credits = client.get_user_credits()
|
169
|
+
history = client.get_credits_history()
|
170
|
+
|
171
|
+
# Async
|
172
|
+
credits = await client.get_user_credits()
|
173
|
+
history = await client.get_credits_history()
|
174
|
+
```
|
175
|
+
|
176
|
+
## License
|
177
|
+
|
178
|
+
MIT License
|
@@ -0,0 +1,164 @@
|
|
1
|
+
# Mira Network SDK
|
2
|
+
|
3
|
+
A Python SDK for interacting with the Mira Network API. This SDK provides both synchronous and asynchronous interfaces to access Mira API endpoints for model inference, API token management, and credit system operations.
|
4
|
+
|
5
|
+
## Installation
|
6
|
+
|
7
|
+
```bash
|
8
|
+
pip install mira-network
|
9
|
+
```
|
10
|
+
|
11
|
+
## Quick Start
|
12
|
+
|
13
|
+
### Synchronous Usage
|
14
|
+
|
15
|
+
```python
|
16
|
+
from mira_network.sync_client import MiraSyncClient
|
17
|
+
from mira_network.models import AiRequest, Message
|
18
|
+
|
19
|
+
# Using context manager (recommended)
|
20
|
+
with MiraSyncClient(api_token="your-api-token") as client:
|
21
|
+
# Example 1: Non-streaming response
|
22
|
+
request = AiRequest(
|
23
|
+
messages=[
|
24
|
+
Message(role="system", content="You are a helpful assistant."),
|
25
|
+
Message(role="user", content="Hello!")
|
26
|
+
],
|
27
|
+
stream=False
|
28
|
+
)
|
29
|
+
response = client.generate(request)
|
30
|
+
print(response)
|
31
|
+
|
32
|
+
# Example 2: Streaming response
|
33
|
+
stream_request = AiRequest(
|
34
|
+
messages=[
|
35
|
+
Message(role="system", content="You are a helpful assistant."),
|
36
|
+
Message(role="user", content="Tell me a story!")
|
37
|
+
],
|
38
|
+
stream=True
|
39
|
+
)
|
40
|
+
for chunk in client.generate(stream_request):
|
41
|
+
print(chunk)
|
42
|
+
```
|
43
|
+
|
44
|
+
### Asynchronous Usage
|
45
|
+
|
46
|
+
```python
|
47
|
+
import asyncio
|
48
|
+
from mira_network.client import MiraClient
|
49
|
+
from mira_network.models import AiRequest, Message
|
50
|
+
|
51
|
+
async def main():
|
52
|
+
# Using async context manager (recommended)
|
53
|
+
async with MiraClient(api_token="your-api-token") as client:
|
54
|
+
# Example 1: Non-streaming response
|
55
|
+
request = AiRequest(
|
56
|
+
messages=[
|
57
|
+
Message(role="system", content="You are a helpful assistant."),
|
58
|
+
Message(role="user", content="Hello!")
|
59
|
+
],
|
60
|
+
stream=False
|
61
|
+
)
|
62
|
+
response = await client.generate(request)
|
63
|
+
print(response)
|
64
|
+
|
65
|
+
# Example 2: Streaming response
|
66
|
+
stream_request = AiRequest(
|
67
|
+
messages=[
|
68
|
+
Message(role="system", content="You are a helpful assistant."),
|
69
|
+
Message(role="user", content="Tell me a story!")
|
70
|
+
],
|
71
|
+
stream=True
|
72
|
+
)
|
73
|
+
async for chunk in await client.generate(stream_request):
|
74
|
+
print(chunk)
|
75
|
+
|
76
|
+
if __name__ == "__main__":
|
77
|
+
asyncio.run(main())
|
78
|
+
```
|
79
|
+
|
80
|
+
## API Reference
|
81
|
+
|
82
|
+
### Client Initialization
|
83
|
+
|
84
|
+
The SDK provides two client classes:
|
85
|
+
- `MiraSyncClient`: Synchronous client using `requests`
|
86
|
+
- `MiraClient`: Asynchronous client using `httpx`
|
87
|
+
|
88
|
+
Both clients support context managers for proper resource cleanup:
|
89
|
+
|
90
|
+
```python
|
91
|
+
# Synchronous
|
92
|
+
with MiraSyncClient(api_token="your-api-token") as client:
|
93
|
+
# Your sync code here
|
94
|
+
|
95
|
+
# Asynchronous
|
96
|
+
async with MiraClient(api_token="your-api-token") as client:
|
97
|
+
# Your async code here
|
98
|
+
```
|
99
|
+
|
100
|
+
### Models
|
101
|
+
|
102
|
+
- `Message`: Represents a chat message
|
103
|
+
- `role`: String ("system", "user", or "assistant")
|
104
|
+
- `content`: String content of the message
|
105
|
+
|
106
|
+
- `AiRequest`: Configuration for model inference
|
107
|
+
- `model`: Model identifier (default: "mira/llama3.1")
|
108
|
+
- `messages`: List of Message objects
|
109
|
+
- `stream`: Boolean to enable streaming responses (default: False)
|
110
|
+
- `model_provider`: Optional ModelProvider configuration
|
111
|
+
|
112
|
+
- `ModelProvider`: Custom provider configuration
|
113
|
+
- `base_url`: Provider's base URL
|
114
|
+
- `api_key`: Provider's API key
|
115
|
+
|
116
|
+
- `ApiTokenRequest`: Request for creating API tokens
|
117
|
+
- `description`: Optional description for the token
|
118
|
+
|
119
|
+
### Available Methods
|
120
|
+
|
121
|
+
Both sync and async clients provide the same methods with identical parameters. The only difference is that async methods must be awaited.
|
122
|
+
|
123
|
+
#### Model Operations
|
124
|
+
```python
|
125
|
+
# Sync
|
126
|
+
models = client.list_models()
|
127
|
+
response = client.generate(AiRequest(messages=[...], stream=False))
|
128
|
+
for chunk in client.generate(AiRequest(messages=[...], stream=True)):
|
129
|
+
print(chunk)
|
130
|
+
|
131
|
+
# Async
|
132
|
+
models = await client.list_models()
|
133
|
+
response = await client.generate(AiRequest(messages=[...], stream=False))
|
134
|
+
async for chunk in await client.generate(AiRequest(messages=[...], stream=True)):
|
135
|
+
print(chunk)
|
136
|
+
```
|
137
|
+
|
138
|
+
#### API Token Operations
|
139
|
+
```python
|
140
|
+
# Sync
|
141
|
+
token = client.create_api_token(ApiTokenRequest(description="My Token"))
|
142
|
+
tokens = client.list_api_tokens()
|
143
|
+
client.delete_api_token("token-to-delete")
|
144
|
+
|
145
|
+
# Async
|
146
|
+
token = await client.create_api_token(ApiTokenRequest(description="My Token"))
|
147
|
+
tokens = await client.list_api_tokens()
|
148
|
+
await client.delete_api_token("token-to-delete")
|
149
|
+
```
|
150
|
+
|
151
|
+
#### Credit Operations
|
152
|
+
```python
|
153
|
+
# Sync
|
154
|
+
credits = client.get_user_credits()
|
155
|
+
history = client.get_credits_history()
|
156
|
+
|
157
|
+
# Async
|
158
|
+
credits = await client.get_user_credits()
|
159
|
+
history = await client.get_credits_history()
|
160
|
+
```
|
161
|
+
|
162
|
+
## License
|
163
|
+
|
164
|
+
MIT License
|
@@ -1,7 +1,4 @@
|
|
1
1
|
[project]
|
2
|
-
name = "mira-network"
|
3
|
-
version = "0.1.2"
|
4
|
-
description = "Python SDK for Mira Network API"
|
5
2
|
authors = [
|
6
3
|
{ name = "sarim2000", email = "sarimbleedblue@gmail.com" },
|
7
4
|
]
|
@@ -10,18 +7,22 @@ dependencies = [
|
|
10
7
|
"pydantic>=2.10.4",
|
11
8
|
"typing-extensions>=4.8.0",
|
12
9
|
"requests>=2.32.3",
|
10
|
+
"pytest-cov>=6.0.0",
|
13
11
|
]
|
14
|
-
|
12
|
+
description = "Python SDK for Mira Network API"
|
13
|
+
name = "mira-network"
|
15
14
|
readme = "README.md"
|
15
|
+
requires-python = "==3.10.*"
|
16
|
+
version = "0.1.4"
|
16
17
|
|
17
18
|
[project.license]
|
18
19
|
text = "MIT"
|
19
20
|
|
20
21
|
[build-system]
|
22
|
+
build-backend = "pdm.backend"
|
21
23
|
requires = [
|
22
24
|
"pdm-backend",
|
23
25
|
]
|
24
|
-
build-backend = "pdm.backend"
|
25
26
|
|
26
27
|
[tool.pdm]
|
27
28
|
distribution = true
|
@@ -1,12 +1,10 @@
|
|
1
1
|
from .client import MiraClient
|
2
|
+
from .sync_client import MiraSyncClient
|
2
3
|
from .models import (
|
3
4
|
Message,
|
4
5
|
ModelProvider,
|
5
6
|
AiRequest,
|
6
|
-
FlowChatCompletion,
|
7
|
-
FlowRequest,
|
8
7
|
ApiTokenRequest,
|
9
|
-
AddCreditRequest,
|
10
8
|
)
|
11
9
|
|
12
10
|
__all__ = [
|
@@ -14,8 +12,5 @@ __all__ = [
|
|
14
12
|
"Message",
|
15
13
|
"ModelProvider",
|
16
14
|
"AiRequest",
|
17
|
-
"FlowChatCompletion",
|
18
|
-
"FlowRequest",
|
19
15
|
"ApiTokenRequest",
|
20
|
-
"AddCreditRequest",
|
21
16
|
]
|
@@ -0,0 +1,173 @@
|
|
1
|
+
from typing import AsyncIterator, Optional, List, Dict, AsyncGenerator, Union
|
2
|
+
import httpx
|
3
|
+
from .models import (
|
4
|
+
AiRequest,
|
5
|
+
ApiTokenRequest,
|
6
|
+
)
|
7
|
+
|
8
|
+
|
9
|
+
class MiraClient:
|
10
|
+
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
base_url: str = "https://apis.mira.network/",
|
14
|
+
api_token: Optional[str] = None,
|
15
|
+
):
|
16
|
+
"""Initialize Mira client.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
base_url: Base URL of the Mira API
|
20
|
+
api_token: Optional API token for authentication
|
21
|
+
"""
|
22
|
+
self.base_url = base_url
|
23
|
+
self.api_token = api_token
|
24
|
+
self._client = httpx.AsyncClient()
|
25
|
+
|
26
|
+
async def __aenter__(self):
|
27
|
+
return self
|
28
|
+
|
29
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
30
|
+
await self._client.aclose()
|
31
|
+
|
32
|
+
def _get_headers(self) -> Dict[str, str]:
|
33
|
+
headers = {"Content-Type": "application/json"}
|
34
|
+
if self.api_token:
|
35
|
+
headers["Authorization"] = f"Bearer {self.api_token}"
|
36
|
+
return headers
|
37
|
+
|
38
|
+
async def list_models(self) -> List[str]:
|
39
|
+
"""List available models."""
|
40
|
+
response = await self._client.get(
|
41
|
+
f"{self.base_url}/v1/models",
|
42
|
+
headers=self._get_headers(),
|
43
|
+
)
|
44
|
+
response.raise_for_status()
|
45
|
+
return response.json()
|
46
|
+
|
47
|
+
async def generate(self, request: AiRequest) -> Union[str, AsyncIterator[str]]:
|
48
|
+
"""Generate text using the specified model."""
|
49
|
+
response = await self._client.post(
|
50
|
+
f"{self.base_url}/v1/chat/completions",
|
51
|
+
headers=self._get_headers(),
|
52
|
+
json=request.model_dump(),
|
53
|
+
)
|
54
|
+
|
55
|
+
response.raise_for_status()
|
56
|
+
|
57
|
+
if request.stream:
|
58
|
+
return response.aiter_lines()
|
59
|
+
else:
|
60
|
+
return response.json()
|
61
|
+
|
62
|
+
# async def generate_with_flow(
|
63
|
+
# self, flow_id: str, request: FlowChatCompletion
|
64
|
+
# ) -> Union[str, AsyncGenerator[str, None]]:
|
65
|
+
# """Generate text using a specific flow."""
|
66
|
+
# response = await self._client.post(
|
67
|
+
# f"{self.base_url}/v1/flows/{flow_id}/chat/completions",
|
68
|
+
# headers=self._get_headers(),
|
69
|
+
# json=request.model_dump(),
|
70
|
+
# )
|
71
|
+
# response.raise_for_status()
|
72
|
+
# return response.json()
|
73
|
+
|
74
|
+
# async def list_flows(self) -> List[Dict]:
|
75
|
+
# """List all flows."""
|
76
|
+
# response = await self._client.get(
|
77
|
+
# f"{self.base_url}/flows",
|
78
|
+
# headers=self._get_headers(),
|
79
|
+
# )
|
80
|
+
# response.raise_for_status()
|
81
|
+
# return response.json()
|
82
|
+
|
83
|
+
# async def get_flow(self, flow_id: str) -> Dict:
|
84
|
+
# """Get details of a specific flow."""
|
85
|
+
# response = await self._client.get(
|
86
|
+
# f"{self.base_url}/flows/{flow_id}",
|
87
|
+
# headers=self._get_headers(),
|
88
|
+
# )
|
89
|
+
# response.raise_for_status()
|
90
|
+
# return response.json()
|
91
|
+
|
92
|
+
# async def create_flow(self, request: FlowRequest) -> Dict:
|
93
|
+
# """Create a new flow."""
|
94
|
+
# response = await self._client.post(
|
95
|
+
# f"{self.base_url}/flows",
|
96
|
+
# headers=self._get_headers(),
|
97
|
+
# json=request.model_dump(),
|
98
|
+
# )
|
99
|
+
# response.raise_for_status()
|
100
|
+
# return response.json()
|
101
|
+
|
102
|
+
# async def update_flow(self, flow_id: str, request: FlowRequest) -> Dict:
|
103
|
+
# """Update an existing flow."""
|
104
|
+
# response = await self._client.put(
|
105
|
+
# f"{self.base_url}/flows/{flow_id}",
|
106
|
+
# headers=self._get_headers(),
|
107
|
+
# json=request.model_dump(),
|
108
|
+
# )
|
109
|
+
# response.raise_for_status()
|
110
|
+
# return response.json()
|
111
|
+
|
112
|
+
# async def delete_flow(self, flow_id: str) -> None:
|
113
|
+
# """Delete a flow."""
|
114
|
+
# response = await self._client.delete(
|
115
|
+
# f"{self.base_url}/flows/{flow_id}",
|
116
|
+
# headers=self._get_headers(),
|
117
|
+
# )
|
118
|
+
# response.raise_for_status()
|
119
|
+
|
120
|
+
async def create_api_token(self, request: ApiTokenRequest) -> Dict:
|
121
|
+
"""Create a new API token."""
|
122
|
+
response = await self._client.post(
|
123
|
+
f"{self.base_url}/api-tokens",
|
124
|
+
headers=self._get_headers(),
|
125
|
+
json=request.model_dump(),
|
126
|
+
)
|
127
|
+
response.raise_for_status()
|
128
|
+
return response.json()
|
129
|
+
|
130
|
+
async def list_api_tokens(self) -> List[Dict]:
|
131
|
+
"""List all API tokens."""
|
132
|
+
response = await self._client.get(
|
133
|
+
f"{self.base_url}/api-tokens",
|
134
|
+
headers=self._get_headers(),
|
135
|
+
)
|
136
|
+
response.raise_for_status()
|
137
|
+
return response.json()
|
138
|
+
|
139
|
+
async def delete_api_token(self, token: str) -> None:
|
140
|
+
"""Delete an API token."""
|
141
|
+
response = await self._client.delete(
|
142
|
+
f"{self.base_url}/api-tokens/{token}",
|
143
|
+
headers=self._get_headers(),
|
144
|
+
)
|
145
|
+
response.raise_for_status()
|
146
|
+
|
147
|
+
async def get_user_credits(self) -> Dict:
|
148
|
+
"""Get user credits information."""
|
149
|
+
response = await self._client.get(
|
150
|
+
f"{self.base_url}/user-credits",
|
151
|
+
headers=self._get_headers(),
|
152
|
+
)
|
153
|
+
response.raise_for_status()
|
154
|
+
return response.json()
|
155
|
+
|
156
|
+
# async def add_credit(self, request: AddCreditRequest) -> Dict:
|
157
|
+
# """Add credits to a user account."""
|
158
|
+
# response = await self._client.post(
|
159
|
+
# f"{self.base_url}/credits",
|
160
|
+
# headers=self._get_headers(),
|
161
|
+
# json=request.model_dump(),
|
162
|
+
# )
|
163
|
+
# response.raise_for_status()
|
164
|
+
# return response.json()
|
165
|
+
|
166
|
+
async def get_credits_history(self) -> List[Dict]:
|
167
|
+
"""Get user credits history."""
|
168
|
+
response = await self._client.get(
|
169
|
+
f"{self.base_url}/user-credits-history",
|
170
|
+
headers=self._get_headers(),
|
171
|
+
)
|
172
|
+
response.raise_for_status()
|
173
|
+
return response.json()
|
@@ -22,7 +22,9 @@ class ModelProvider(BaseModel):
|
|
22
22
|
|
23
23
|
class AiRequest(BaseModel):
|
24
24
|
model: str = Field("mira/llama3.1", title="Model")
|
25
|
-
model_provider: Optional[ModelProvider] = Field(
|
25
|
+
model_provider: Optional[ModelProvider] = Field(
|
26
|
+
None, title="Model Provider (optional)"
|
27
|
+
)
|
26
28
|
messages: List[Message] = Field([], title="Messages")
|
27
29
|
stream: Optional[bool] = Field(False, title="Stream")
|
28
30
|
|
@@ -34,34 +36,34 @@ class AiRequest(BaseModel):
|
|
34
36
|
return v
|
35
37
|
|
36
38
|
|
37
|
-
class FlowChatCompletion(BaseModel):
|
38
|
-
|
39
|
+
# class FlowChatCompletion(BaseModel):
|
40
|
+
# variables: Optional[Dict] = Field(None, title="Variables")
|
39
41
|
|
40
42
|
|
41
|
-
class FlowRequest(BaseModel):
|
42
|
-
|
43
|
-
|
43
|
+
# class FlowRequest(BaseModel):
|
44
|
+
# system_prompt: str
|
45
|
+
# name: str
|
44
46
|
|
45
47
|
|
46
48
|
class ApiTokenRequest(BaseModel):
|
47
49
|
description: Optional[str] = None
|
48
50
|
|
49
51
|
|
50
|
-
class AddCreditRequest(BaseModel):
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
52
|
+
# class AddCreditRequest(BaseModel):
|
53
|
+
# user_id: str
|
54
|
+
# amount: float
|
55
|
+
# description: Optional[str] = None
|
56
|
+
|
57
|
+
# @field_validator("amount")
|
58
|
+
# @classmethod
|
59
|
+
# def validate_amount(cls, v: float) -> float:
|
60
|
+
# if v <= 0:
|
61
|
+
# raise ValueError("Amount must be greater than 0")
|
62
|
+
# return v
|
63
|
+
|
64
|
+
# @field_validator("user_id")
|
65
|
+
# @classmethod
|
66
|
+
# def validate_user_id(cls, v: str) -> str:
|
67
|
+
# if not v.strip():
|
68
|
+
# raise ValueError("User ID cannot be empty")
|
69
|
+
# return v
|