plexe 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plexe-0.1.0/.gitignore +31 -0
- plexe-0.1.0/LICENSE +21 -0
- plexe-0.1.0/Makefile +18 -0
- plexe-0.1.0/PKG-INFO +164 -0
- plexe-0.1.0/README.md +147 -0
- plexe-0.1.0/docs/img/plexe-logo.svg +10 -0
- plexe-0.1.0/plexe/__init__.py +78 -0
- plexe-0.1.0/plexe/client.py +222 -0
- plexe-0.1.0/plexe/tests/__init__.py +0 -0
- plexe-0.1.0/plexe/tests/test_integration.py +183 -0
- plexe-0.1.0/pyproject.toml +47 -0
plexe-0.1.0/.gitignore
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
__pycache__/
|
|
2
|
+
*.py[cod]
|
|
3
|
+
*$py.class
|
|
4
|
+
*.so
|
|
5
|
+
.Python
|
|
6
|
+
build/
|
|
7
|
+
develop-eggs/
|
|
8
|
+
dist/
|
|
9
|
+
downloads/
|
|
10
|
+
eggs/
|
|
11
|
+
.eggs/
|
|
12
|
+
lib/
|
|
13
|
+
lib64/
|
|
14
|
+
parts/
|
|
15
|
+
sdist/
|
|
16
|
+
var/
|
|
17
|
+
wheels/
|
|
18
|
+
*.egg-info/
|
|
19
|
+
.installed.cfg
|
|
20
|
+
*.egg
|
|
21
|
+
MANIFEST
|
|
22
|
+
.env
|
|
23
|
+
.venv
|
|
24
|
+
env/
|
|
25
|
+
venv/
|
|
26
|
+
ENV/
|
|
27
|
+
env.bak/
|
|
28
|
+
venv.bak/
|
|
29
|
+
.idea/
|
|
30
|
+
.vscode/
|
|
31
|
+
plexe/__pycache__
|
plexe-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Plexe AI
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
plexe-0.1.0/Makefile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
.PHONY: install clean build publish
|
|
2
|
+
|
|
3
|
+
install:
|
|
4
|
+
pip install -e .
|
|
5
|
+
|
|
6
|
+
clean:
|
|
7
|
+
rm -rf build/ dist/ *.egg-info __pycache__
|
|
8
|
+
find . -type d -name __pycache__ -exec rm -rf {} +
|
|
9
|
+
|
|
10
|
+
build: clean
|
|
11
|
+
python -m build
|
|
12
|
+
|
|
13
|
+
publish: build
|
|
14
|
+
python -m twine upload dist/*
|
|
15
|
+
|
|
16
|
+
# Update version like: make version VERSION=0.1.1
|
|
17
|
+
version:
|
|
18
|
+
sed -i 's/version="[0-9.]*"/version="$(VERSION)"/' setup.py
|
plexe-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: plexe
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Create ML models from natural language descriptions
|
|
5
|
+
Project-URL: Homepage, https://github.com/plexe-ai/plexe-python
|
|
6
|
+
Project-URL: Repository, https://github.com/plexe-ai/plexe-python
|
|
7
|
+
Author-email: Plexe AI <info@plexe.ai>
|
|
8
|
+
Requires-Python: >=3.7
|
|
9
|
+
Requires-Dist: aiofiles>=0.8.0
|
|
10
|
+
Requires-Dist: httpx>=0.24.0
|
|
11
|
+
Provides-Extra: dev
|
|
12
|
+
Requires-Dist: black>=22.0.0; extra == 'dev'
|
|
13
|
+
Requires-Dist: pytest-asyncio>=0.20.0; extra == 'dev'
|
|
14
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
|
|
15
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
|
|
18
|
+
<div align="center">
|
|
19
|
+
|
|
20
|
+
# PlexeAI
|
|
21
|
+
|
|
22
|
+
<img src="docs/img/plexe-logo.svg" alt="PlexeAI Logo" width="100" height="100"/>
|
|
23
|
+
|
|
24
|
+
### Create ML models from natural language descriptions
|
|
25
|
+
|
|
26
|
+
[](https://badge.fury.io/py/plexeai)
|
|
27
|
+
[](https://pypi.org/project/plexeai/)
|
|
28
|
+
[](https://opensource.org/licenses/MIT)
|
|
29
|
+
|
|
30
|
+
</div>
|
|
31
|
+
|
|
32
|
+
---
|
|
33
|
+
|
|
34
|
+
## 🚀 Features
|
|
35
|
+
|
|
36
|
+
- 🤖 **AI-Powered Model Creation** - Build ML models using natural language descriptions
|
|
37
|
+
- 📊 **Automated Training** - Upload your data and let PlexeAI handle the rest
|
|
38
|
+
- ⚡ **Async Support** - Built-in async interfaces for high-performance applications
|
|
39
|
+
- 🔄 **Batch Processing** - Efficient batch prediction capabilities
|
|
40
|
+
- 🛠️ **Simple API** - Intuitive interface for both beginners and experts
|
|
41
|
+
|
|
42
|
+
## 📦 Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install plexe
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## 🏃♂️ Quickstart
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
import plexe
|
|
52
|
+
|
|
53
|
+
# Create a model in seconds
|
|
54
|
+
model_version = plexe.build(
|
|
55
|
+
goal="predict customer churn based on usage patterns",
|
|
56
|
+
model_name="churn-predictor",
|
|
57
|
+
data_files="customer_data.csv"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Make predictions
|
|
61
|
+
result = plexe.infer(
|
|
62
|
+
model_name="churn-predictor",
|
|
63
|
+
model_version=model_version,
|
|
64
|
+
input_data={
|
|
65
|
+
"usage": 100,
|
|
66
|
+
"tenure": 12,
|
|
67
|
+
"plan_type": "premium"
|
|
68
|
+
}
|
|
69
|
+
)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## 🎯 Example Use Cases
|
|
73
|
+
|
|
74
|
+
- 📈 **Churn Prediction**: Predict customer churn using historical data
|
|
75
|
+
- 🏷️ **Classification**: Categorize text, images, or any structured data
|
|
76
|
+
- 📊 **Regression**: Predict numerical values like sales or pricing
|
|
77
|
+
- 🔄 **Time Series**: Forecast trends and patterns in sequential data
|
|
78
|
+
|
|
79
|
+
## 🔥 Advanced Usage
|
|
80
|
+
|
|
81
|
+
### Batch Predictions
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
results = plexe.batch_infer(
|
|
85
|
+
model_name="churn-predictor",
|
|
86
|
+
model_version=model_version,
|
|
87
|
+
inputs=[
|
|
88
|
+
{"usage": 100, "tenure": 12, "plan_type": "premium"},
|
|
89
|
+
{"usage": 50, "tenure": 6, "plan_type": "basic"}
|
|
90
|
+
]
|
|
91
|
+
)
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
### Async Support
|
|
95
|
+
|
|
96
|
+
```python
|
|
97
|
+
async def main():
|
|
98
|
+
model_version = await plexe.abuild(
|
|
99
|
+
goal="predict customer churn",
|
|
100
|
+
model_name="churn-predictor",
|
|
101
|
+
data_files="customer_data.csv"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
result = await plexe.ainfer(
|
|
105
|
+
model_name="churn-predictor",
|
|
106
|
+
model_version=model_version,
|
|
107
|
+
input_data={"usage": 100, "tenure": 12}
|
|
108
|
+
)
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Direct Client Usage
|
|
112
|
+
|
|
113
|
+
```python
|
|
114
|
+
from plexe import PlexeAI
|
|
115
|
+
|
|
116
|
+
with PlexeAI(api_key="your_api_key_here") as client:
|
|
117
|
+
# Upload data
|
|
118
|
+
upload_id = client.upload_files("customer_data.csv")
|
|
119
|
+
|
|
120
|
+
# Create and use model
|
|
121
|
+
model_version = client.build(
|
|
122
|
+
goal="predict customer churn",
|
|
123
|
+
model_name="churn-predictor",
|
|
124
|
+
upload_id=upload_id
|
|
125
|
+
)
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
## 📚 Documentation
|
|
129
|
+
|
|
130
|
+
Check out our [comprehensive documentation](https://docs.plexe.ai) for:
|
|
131
|
+
- Detailed API reference
|
|
132
|
+
- Advanced usage examples
|
|
133
|
+
- Best practices
|
|
134
|
+
- Tutorials and guides
|
|
135
|
+
|
|
136
|
+
## 🛠️ Development
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
# Clone the repository
|
|
140
|
+
git clone https://github.com/plexe-ai/plexe
|
|
141
|
+
cd plexe
|
|
142
|
+
|
|
143
|
+
# Install development dependencies
|
|
144
|
+
pip install -e ".[dev]"
|
|
145
|
+
|
|
146
|
+
# Run tests
|
|
147
|
+
pytest
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
## 🤝 Contributing
|
|
151
|
+
|
|
152
|
+
We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
|
|
153
|
+
|
|
154
|
+
## 📄 License
|
|
155
|
+
|
|
156
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
157
|
+
|
|
158
|
+
---
|
|
159
|
+
|
|
160
|
+
<div align="center">
|
|
161
|
+
|
|
162
|
+
Made with ❤️ by [Plexe AI](https://plexe.ai)
|
|
163
|
+
|
|
164
|
+
</div>
|
plexe-0.1.0/README.md
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
# PlexeAI
|
|
4
|
+
|
|
5
|
+
<img src="docs/img/plexe-logo.svg" alt="PlexeAI Logo" width="100" height="100"/>
|
|
6
|
+
|
|
7
|
+
### Create ML models from natural language descriptions
|
|
8
|
+
|
|
9
|
+
[](https://badge.fury.io/py/plexeai)
|
|
10
|
+
[](https://pypi.org/project/plexeai/)
|
|
11
|
+
[](https://opensource.org/licenses/MIT)
|
|
12
|
+
|
|
13
|
+
</div>
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
## 🚀 Features
|
|
18
|
+
|
|
19
|
+
- 🤖 **AI-Powered Model Creation** - Build ML models using natural language descriptions
|
|
20
|
+
- 📊 **Automated Training** - Upload your data and let PlexeAI handle the rest
|
|
21
|
+
- ⚡ **Async Support** - Built-in async interfaces for high-performance applications
|
|
22
|
+
- 🔄 **Batch Processing** - Efficient batch prediction capabilities
|
|
23
|
+
- 🛠️ **Simple API** - Intuitive interface for both beginners and experts
|
|
24
|
+
|
|
25
|
+
## 📦 Installation
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
pip install plexe
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## 🏃♂️ Quickstart
|
|
32
|
+
|
|
33
|
+
```python
|
|
34
|
+
import plexe
|
|
35
|
+
|
|
36
|
+
# Create a model in seconds
|
|
37
|
+
model_version = plexe.build(
|
|
38
|
+
goal="predict customer churn based on usage patterns",
|
|
39
|
+
model_name="churn-predictor",
|
|
40
|
+
data_files="customer_data.csv"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
# Make predictions
|
|
44
|
+
result = plexe.infer(
|
|
45
|
+
model_name="churn-predictor",
|
|
46
|
+
model_version=model_version,
|
|
47
|
+
input_data={
|
|
48
|
+
"usage": 100,
|
|
49
|
+
"tenure": 12,
|
|
50
|
+
"plan_type": "premium"
|
|
51
|
+
}
|
|
52
|
+
)
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## 🎯 Example Use Cases
|
|
56
|
+
|
|
57
|
+
- 📈 **Churn Prediction**: Predict customer churn using historical data
|
|
58
|
+
- 🏷️ **Classification**: Categorize text, images, or any structured data
|
|
59
|
+
- 📊 **Regression**: Predict numerical values like sales or pricing
|
|
60
|
+
- 🔄 **Time Series**: Forecast trends and patterns in sequential data
|
|
61
|
+
|
|
62
|
+
## 🔥 Advanced Usage
|
|
63
|
+
|
|
64
|
+
### Batch Predictions
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
results = plexe.batch_infer(
|
|
68
|
+
model_name="churn-predictor",
|
|
69
|
+
model_version=model_version,
|
|
70
|
+
inputs=[
|
|
71
|
+
{"usage": 100, "tenure": 12, "plan_type": "premium"},
|
|
72
|
+
{"usage": 50, "tenure": 6, "plan_type": "basic"}
|
|
73
|
+
]
|
|
74
|
+
)
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
### Async Support
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
async def main():
|
|
81
|
+
model_version = await plexe.abuild(
|
|
82
|
+
goal="predict customer churn",
|
|
83
|
+
model_name="churn-predictor",
|
|
84
|
+
data_files="customer_data.csv"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
result = await plexe.ainfer(
|
|
88
|
+
model_name="churn-predictor",
|
|
89
|
+
model_version=model_version,
|
|
90
|
+
input_data={"usage": 100, "tenure": 12}
|
|
91
|
+
)
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
### Direct Client Usage
|
|
95
|
+
|
|
96
|
+
```python
|
|
97
|
+
from plexe import PlexeAI
|
|
98
|
+
|
|
99
|
+
with PlexeAI(api_key="your_api_key_here") as client:
|
|
100
|
+
# Upload data
|
|
101
|
+
upload_id = client.upload_files("customer_data.csv")
|
|
102
|
+
|
|
103
|
+
# Create and use model
|
|
104
|
+
model_version = client.build(
|
|
105
|
+
goal="predict customer churn",
|
|
106
|
+
model_name="churn-predictor",
|
|
107
|
+
upload_id=upload_id
|
|
108
|
+
)
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## 📚 Documentation
|
|
112
|
+
|
|
113
|
+
Check out our [comprehensive documentation](https://docs.plexe.ai) for:
|
|
114
|
+
- Detailed API reference
|
|
115
|
+
- Advanced usage examples
|
|
116
|
+
- Best practices
|
|
117
|
+
- Tutorials and guides
|
|
118
|
+
|
|
119
|
+
## 🛠️ Development
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
# Clone the repository
|
|
123
|
+
git clone https://github.com/plexe-ai/plexe
|
|
124
|
+
cd plexe
|
|
125
|
+
|
|
126
|
+
# Install development dependencies
|
|
127
|
+
pip install -e ".[dev]"
|
|
128
|
+
|
|
129
|
+
# Run tests
|
|
130
|
+
pytest
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
## 🤝 Contributing
|
|
134
|
+
|
|
135
|
+
We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
|
|
136
|
+
|
|
137
|
+
## 📄 License
|
|
138
|
+
|
|
139
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
140
|
+
|
|
141
|
+
---
|
|
142
|
+
|
|
143
|
+
<div align="center">
|
|
144
|
+
|
|
145
|
+
Made with ❤️ by [Plexe AI](https://plexe.ai)
|
|
146
|
+
|
|
147
|
+
</div>
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
<svg width="29" height="28" viewBox="0 0 29 28" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<rect x="2.87109" y="8" width="23.2581" height="17" rx="8.5" stroke="black" stroke-width="2"/>
|
|
3
|
+
<rect x="26.1938" y="13.9355" width="1.87097" height="5.12903" rx="0.935484" stroke="black" stroke-width="1.87097"/>
|
|
4
|
+
<rect x="0.935484" y="13.9355" width="1.87097" height="5.12903" rx="0.935484" stroke="black" stroke-width="1.87097"/>
|
|
5
|
+
<rect x="9.35492" y="12.9355" width="1.87097" height="5.12903" rx="0.935484" fill="black" stroke="black" stroke-width="1.87097"/>
|
|
6
|
+
<rect x="17.7744" y="12.9355" width="1.87097" height="5.12903" rx="0.935484" fill="black" stroke="black" stroke-width="1.87097"/>
|
|
7
|
+
<rect x="23.3228" y="25" width="2" height="17.6452" rx="1" transform="rotate(90 23.3228 25)" stroke="black" stroke-width="2"/>
|
|
8
|
+
<line x1="14.4033" y1="7" x2="14.4033" y2="5" stroke="black" stroke-width="3"/>
|
|
9
|
+
<path d="M13.3409 4.25L14.4998 2.1043L15.6587 4.25H13.3409Z" stroke="black" stroke-width="2"/>
|
|
10
|
+
</svg>
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Optional, Union, List
|
|
3
|
+
from .client import PlexeAI
|
|
4
|
+
|
|
5
|
+
def build(goal: str,
|
|
6
|
+
model_name: str,
|
|
7
|
+
data_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None,
|
|
8
|
+
upload_id: Optional[str] = None,
|
|
9
|
+
api_key: str = "",
|
|
10
|
+
eval_criteria: Optional[str] = None) -> str:
|
|
11
|
+
"""Build a new ML model.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
goal: Description of what the model should do
|
|
15
|
+
model_name: Name for the model
|
|
16
|
+
data_files: Optional path(s) to data file(s) to upload
|
|
17
|
+
upload_id: Optional upload_id if files were already uploaded
|
|
18
|
+
api_key: API key for authentication
|
|
19
|
+
eval_criteria: Optional evaluation criteria
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
model_version: Version ID of the created model
|
|
23
|
+
"""
|
|
24
|
+
client = PlexeAI(api_key=api_key)
|
|
25
|
+
return client.build(goal=goal, model_name=model_name,
|
|
26
|
+
data_files=data_files, upload_id=upload_id,
|
|
27
|
+
eval_criteria=eval_criteria)
|
|
28
|
+
|
|
29
|
+
async def abuild(goal: str,
|
|
30
|
+
model_name: str,
|
|
31
|
+
data_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None,
|
|
32
|
+
upload_id: Optional[str] = None,
|
|
33
|
+
api_key: str = "",
|
|
34
|
+
eval_criteria: Optional[str] = None) -> str:
|
|
35
|
+
"""Build a new ML model asynchronously."""
|
|
36
|
+
client = PlexeAI(api_key=api_key)
|
|
37
|
+
return await client.abuild(goal=goal, model_name=model_name,
|
|
38
|
+
data_files=data_files, upload_id=upload_id,
|
|
39
|
+
eval_criteria=eval_criteria)
|
|
40
|
+
|
|
41
|
+
def infer(model_name: str, model_version: str, input_data: dict, api_key: str = "") -> dict:
|
|
42
|
+
"""Run inference using a built model."""
|
|
43
|
+
client = PlexeAI(api_key=api_key)
|
|
44
|
+
return client.infer(model_name=model_name, model_version=model_version, input_data=input_data)
|
|
45
|
+
|
|
46
|
+
async def ainfer(model_name: str, model_version: str, input_data: dict, api_key: str = "") -> dict:
|
|
47
|
+
"""Run inference using a model asynchronously."""
|
|
48
|
+
client = PlexeAI(api_key=api_key)
|
|
49
|
+
return await client.ainfer(model_name=model_name, model_version=model_version, input_data=input_data)
|
|
50
|
+
|
|
51
|
+
def batch_infer(model_name: str, model_version: str, inputs: List[dict], api_key: str = "") -> List[dict]:
|
|
52
|
+
"""Run batch predictions."""
|
|
53
|
+
client = PlexeAI(api_key=api_key)
|
|
54
|
+
return client.batch_infer(model_name=model_name, model_version=model_version, inputs=inputs)
|
|
55
|
+
|
|
56
|
+
def get_status(model_name: str, model_version: str, api_key: str = "") -> dict:
|
|
57
|
+
"""Get status of a model build."""
|
|
58
|
+
client = PlexeAI(api_key=api_key)
|
|
59
|
+
return client.get_status(model_name=model_name, model_version=model_version)
|
|
60
|
+
|
|
61
|
+
async def aget_status(model_name: str, model_version: str, api_key: str = "") -> dict:
|
|
62
|
+
"""Get status of a model build asynchronously."""
|
|
63
|
+
client = PlexeAI(api_key=api_key)
|
|
64
|
+
return await client.aget_status(model_name=model_name, model_version=model_version)
|
|
65
|
+
|
|
66
|
+
def cleanup_upload(upload_id: str, api_key: str = "") -> dict:
|
|
67
|
+
"""Clean up uploaded files."""
|
|
68
|
+
client = PlexeAI(api_key=api_key)
|
|
69
|
+
return client.cleanup_upload(upload_id=upload_id)
|
|
70
|
+
|
|
71
|
+
async def acleanup_upload(upload_id: str, api_key: str = "") -> dict:
|
|
72
|
+
"""Clean up uploaded files asynchronously."""
|
|
73
|
+
client = PlexeAI(api_key=api_key)
|
|
74
|
+
return await client.acleanup_upload(upload_id=upload_id)
|
|
75
|
+
|
|
76
|
+
__all__ = ['PlexeAI', 'build', 'abuild', 'infer', 'ainfer',
|
|
77
|
+
'batch_infer', 'get_status', 'aget_status',
|
|
78
|
+
'cleanup_upload', 'acleanup_upload']
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import httpx
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Dict, List, Optional, Union
|
|
6
|
+
|
|
7
|
+
class PlexeAI:
|
|
8
|
+
def __init__(self, api_key: Optional[str] = None, timeout: float = 120.0):
|
|
9
|
+
self.api_key = api_key
|
|
10
|
+
if not api_key:
|
|
11
|
+
self.api_key = os.environ.get("PLEXE_API_KEY")
|
|
12
|
+
if not self.api_key:
|
|
13
|
+
raise ValueError("PLEXE_API_KEY must be provided or set as environment variable")
|
|
14
|
+
|
|
15
|
+
self.base_url = "https://api.plexe.ai/v0"
|
|
16
|
+
self.client = httpx.Client(timeout=timeout)
|
|
17
|
+
self.async_client = httpx.AsyncClient(timeout=timeout)
|
|
18
|
+
|
|
19
|
+
def _get_headers(self) -> Dict[str, str]:
|
|
20
|
+
"""Get basic headers with API key."""
|
|
21
|
+
return {
|
|
22
|
+
"x-api-key": self.api_key or "",
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
def _get_json_headers(self) -> Dict[str, str]:
|
|
26
|
+
"""Get headers for JSON content."""
|
|
27
|
+
headers = self._get_headers()
|
|
28
|
+
headers["Content-Type"] = "application/json"
|
|
29
|
+
return headers
|
|
30
|
+
|
|
31
|
+
def _ensure_list(self, data_files: Union[str, Path, List[Union[str, Path]]]) -> List[Path]:
|
|
32
|
+
"""Convert single file path to list and ensure all paths are Path objects."""
|
|
33
|
+
if isinstance(data_files, (str, Path)):
|
|
34
|
+
data_files = [data_files]
|
|
35
|
+
return [Path(f) for f in data_files]
|
|
36
|
+
|
|
37
|
+
def upload_files(self, data_files: Union[str, Path, List[Union[str, Path]]]) -> str:
|
|
38
|
+
"""Upload data files and return upload ID."""
|
|
39
|
+
files = self._ensure_list(data_files)
|
|
40
|
+
|
|
41
|
+
upload_files = []
|
|
42
|
+
for f in files:
|
|
43
|
+
if not f.exists():
|
|
44
|
+
raise ValueError(f"File not found: {f}")
|
|
45
|
+
upload_files.append(('files', (f.name, open(f, 'rb'))))
|
|
46
|
+
|
|
47
|
+
response = self.client.post(
|
|
48
|
+
f"{self.base_url}/uploads",
|
|
49
|
+
files=upload_files,
|
|
50
|
+
headers=self._get_headers()
|
|
51
|
+
)
|
|
52
|
+
response.raise_for_status()
|
|
53
|
+
return response.json()["upload_id"]
|
|
54
|
+
|
|
55
|
+
async def aupload_files(self, data_files: Union[str, Path, List[Union[str, Path]]]) -> str:
|
|
56
|
+
"""Upload data files asynchronously."""
|
|
57
|
+
files = self._ensure_list(data_files)
|
|
58
|
+
|
|
59
|
+
upload_files = []
|
|
60
|
+
for f in files:
|
|
61
|
+
if not f.exists():
|
|
62
|
+
raise ValueError(f"File not found: {f}")
|
|
63
|
+
upload_files.append(('files', (f.name, open(f, 'rb'))))
|
|
64
|
+
|
|
65
|
+
response = await self.async_client.post(
|
|
66
|
+
f"{self.base_url}/uploads",
|
|
67
|
+
files=upload_files,
|
|
68
|
+
headers=self._get_headers()
|
|
69
|
+
)
|
|
70
|
+
response.raise_for_status()
|
|
71
|
+
return response.json()["upload_id"]
|
|
72
|
+
|
|
73
|
+
def build(self,
|
|
74
|
+
goal: str,
|
|
75
|
+
model_name: str,
|
|
76
|
+
data_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None,
|
|
77
|
+
upload_id: Optional[str] = None,
|
|
78
|
+
eval_criteria: Optional[str] = None) -> str:
|
|
79
|
+
"""Build a new ML model.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
goal: Description of what the model should do
|
|
83
|
+
model_name: Name for the model
|
|
84
|
+
data_files: Optional path(s) to data file(s) to upload
|
|
85
|
+
upload_id: Optional upload_id if files were already uploaded
|
|
86
|
+
eval_criteria: Optional evaluation criteria
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
model_version: Version ID of the created model
|
|
90
|
+
"""
|
|
91
|
+
if data_files is None and upload_id is None:
|
|
92
|
+
raise ValueError("Either data_files or upload_id must be provided")
|
|
93
|
+
|
|
94
|
+
if data_files is not None and upload_id is not None:
|
|
95
|
+
raise ValueError("Cannot provide both data_files and upload_id")
|
|
96
|
+
|
|
97
|
+
# Get upload ID - either from new upload or use provided
|
|
98
|
+
if data_files is not None:
|
|
99
|
+
upload_id = self.upload_files(data_files)
|
|
100
|
+
|
|
101
|
+
# Create model
|
|
102
|
+
response = self.client.post(
|
|
103
|
+
f"{self.base_url}/models/{model_name}/create",
|
|
104
|
+
json={
|
|
105
|
+
"upload_id": upload_id,
|
|
106
|
+
"goal": goal,
|
|
107
|
+
"eval": eval_criteria
|
|
108
|
+
},
|
|
109
|
+
headers=self._get_json_headers()
|
|
110
|
+
)
|
|
111
|
+
response.raise_for_status()
|
|
112
|
+
return response.json()["model_version"]
|
|
113
|
+
|
|
114
|
+
async def abuild(self,
|
|
115
|
+
goal: str,
|
|
116
|
+
model_name: str,
|
|
117
|
+
data_files: Optional[Union[str, Path, List[Union[str, Path]]]] = None,
|
|
118
|
+
upload_id: Optional[str] = None,
|
|
119
|
+
eval_criteria: Optional[str] = None) -> str:
|
|
120
|
+
"""Async version of build()"""
|
|
121
|
+
if data_files is None and upload_id is None:
|
|
122
|
+
raise ValueError("Either data_files or upload_id must be provided")
|
|
123
|
+
|
|
124
|
+
if data_files is not None and upload_id is not None:
|
|
125
|
+
raise ValueError("Cannot provide both data_files and upload_id")
|
|
126
|
+
|
|
127
|
+
# Get upload ID - either from new upload or use provided
|
|
128
|
+
if data_files is not None:
|
|
129
|
+
upload_id = await self.aupload_files(data_files)
|
|
130
|
+
|
|
131
|
+
response = await self.async_client.post(
|
|
132
|
+
f"{self.base_url}/models/{model_name}/create",
|
|
133
|
+
json={
|
|
134
|
+
"upload_id": upload_id,
|
|
135
|
+
"goal": goal,
|
|
136
|
+
"eval": eval_criteria
|
|
137
|
+
},
|
|
138
|
+
headers=self._get_json_headers()
|
|
139
|
+
)
|
|
140
|
+
response.raise_for_status()
|
|
141
|
+
return response.json()["model_version"]
|
|
142
|
+
|
|
143
|
+
def get_status(self, model_name: str, model_version: str) -> Dict[str, Any]:
|
|
144
|
+
"""Get status of a model build."""
|
|
145
|
+
response = self.client.get(
|
|
146
|
+
f"{self.base_url}/models/{model_name}/{model_version}/status",
|
|
147
|
+
headers=self._get_headers()
|
|
148
|
+
)
|
|
149
|
+
response.raise_for_status()
|
|
150
|
+
return response.json()
|
|
151
|
+
|
|
152
|
+
async def aget_status(self, model_name: str, model_version: str) -> Dict[str, Any]:
|
|
153
|
+
"""Async version of get_status()"""
|
|
154
|
+
response = await self.async_client.get(
|
|
155
|
+
f"{self.base_url}/models/{model_name}/{model_version}/status",
|
|
156
|
+
headers=self._get_headers()
|
|
157
|
+
)
|
|
158
|
+
response.raise_for_status()
|
|
159
|
+
return response.json()
|
|
160
|
+
|
|
161
|
+
def infer(self, model_name: str, model_version: str, input_data: dict) -> Dict[str, Any]:
|
|
162
|
+
"""Run inference using a model."""
|
|
163
|
+
response = self.client.post(
|
|
164
|
+
f"{self.base_url}/models/{model_name}/{model_version}/infer",
|
|
165
|
+
json=input_data,
|
|
166
|
+
headers=self._get_json_headers()
|
|
167
|
+
)
|
|
168
|
+
response.raise_for_status()
|
|
169
|
+
return response.json()
|
|
170
|
+
|
|
171
|
+
async def ainfer(self, model_name: str, model_version: str, input_data: dict) -> Dict[str, Any]:
|
|
172
|
+
"""Async version of infer()"""
|
|
173
|
+
response = await self.async_client.post(
|
|
174
|
+
f"{self.base_url}/models/{model_name}/{model_version}/infer",
|
|
175
|
+
json=input_data,
|
|
176
|
+
headers=self._get_json_headers()
|
|
177
|
+
)
|
|
178
|
+
response.raise_for_status()
|
|
179
|
+
return response.json()
|
|
180
|
+
|
|
181
|
+
def batch_infer(self, model_name: str, model_version: str, inputs: List[dict]) -> List[Dict[str, Any]]:
|
|
182
|
+
"""Run batch predictions."""
|
|
183
|
+
async def run_batch():
|
|
184
|
+
tasks = [
|
|
185
|
+
self.ainfer(model_name=model_name, model_version=model_version, input_data=x)
|
|
186
|
+
for x in inputs
|
|
187
|
+
]
|
|
188
|
+
return await asyncio.gather(*tasks)
|
|
189
|
+
|
|
190
|
+
return asyncio.run(run_batch())
|
|
191
|
+
|
|
192
|
+
def cleanup_upload(self, upload_id: str) -> Dict[str, Any]:
|
|
193
|
+
"""Clean up uploaded files."""
|
|
194
|
+
response = self.client.delete(
|
|
195
|
+
f"{self.base_url}/uploads/{upload_id}",
|
|
196
|
+
headers=self._get_headers()
|
|
197
|
+
)
|
|
198
|
+
response.raise_for_status()
|
|
199
|
+
return response.json()
|
|
200
|
+
|
|
201
|
+
async def acleanup_upload(self, upload_id: str) -> Dict[str, Any]:
|
|
202
|
+
"""Async version of cleanup_upload()"""
|
|
203
|
+
response = await self.async_client.delete(
|
|
204
|
+
f"{self.base_url}/uploads/{upload_id}",
|
|
205
|
+
headers=self._get_headers()
|
|
206
|
+
)
|
|
207
|
+
response.raise_for_status()
|
|
208
|
+
return response.json()
|
|
209
|
+
|
|
210
|
+
def __enter__(self):
|
|
211
|
+
return self
|
|
212
|
+
|
|
213
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
214
|
+
self.client.close()
|
|
215
|
+
asyncio.run(self.async_client.aclose())
|
|
216
|
+
|
|
217
|
+
async def __aenter__(self):
|
|
218
|
+
return self
|
|
219
|
+
|
|
220
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
221
|
+
self.client.close()
|
|
222
|
+
await self.async_client.aclose()
|
|
File without changes
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import pytest
|
|
4
|
+
import asyncio
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from plexe import PlexeAI, build, abuild, infer, ainfer, batch_infer
|
|
7
|
+
|
|
8
|
+
API_KEY = os.getenv("PLEXE_API_KEY") or ""
|
|
9
|
+
if not API_KEY:
|
|
10
|
+
pytest.skip("PLEXE_API_KEY environment variable not set", allow_module_level=True)
|
|
11
|
+
|
|
12
|
+
TEST_MODEL_NAME = "test_prediction_model"
|
|
13
|
+
TEST_GOAL = "Predict the outcomes of english premier league games based on prior results using the attached dataset"
|
|
14
|
+
|
|
15
|
+
@pytest.fixture
|
|
16
|
+
def client():
|
|
17
|
+
"""Create a PlexeAI client instance for testing."""
|
|
18
|
+
return PlexeAI(api_key=API_KEY)
|
|
19
|
+
|
|
20
|
+
@pytest.fixture
|
|
21
|
+
def sample_data_file(tmp_path):
|
|
22
|
+
"""Create a temporary sample data file for testing."""
|
|
23
|
+
data_file = tmp_path / "test_data.csv"
|
|
24
|
+
data_content = """text,sentiment
|
|
25
|
+
This product is amazing!,positive
|
|
26
|
+
I love this service,positive
|
|
27
|
+
Terrible experience,negative
|
|
28
|
+
Not worth the money,negative
|
|
29
|
+
Pretty good overall,positive"""
|
|
30
|
+
data_file.write_text(data_content)
|
|
31
|
+
return data_file
|
|
32
|
+
|
|
33
|
+
@pytest.fixture
|
|
34
|
+
def sample_input_data():
|
|
35
|
+
"""Sample input data for inference testing."""
|
|
36
|
+
return {"text": "This is a great product!"}
|
|
37
|
+
|
|
38
|
+
def wait_for_model(client, model_name: str, model_version: str, timeout: int = 300):
|
|
39
|
+
"""Wait for model to be ready."""
|
|
40
|
+
start_time = time.time()
|
|
41
|
+
while time.time() - start_time < timeout:
|
|
42
|
+
status = client.get_status(model_name, model_version)
|
|
43
|
+
if status["status"] == "completed":
|
|
44
|
+
return True
|
|
45
|
+
elif status["status"] == "failed":
|
|
46
|
+
raise Exception(f"Model failed: {status.get('error', 'Unknown error')}")
|
|
47
|
+
time.sleep(10)
|
|
48
|
+
raise TimeoutError(f"Model did not complete within {timeout} seconds")
|
|
49
|
+
|
|
50
|
+
async def async_wait_for_model(client, model_name: str, model_version: str, timeout: int = 300):
|
|
51
|
+
"""Wait for model to be ready asynchronously."""
|
|
52
|
+
start_time = time.time()
|
|
53
|
+
while time.time() - start_time < timeout:
|
|
54
|
+
status = await client.aget_status(model_name, model_version)
|
|
55
|
+
if status["status"] == "completed":
|
|
56
|
+
return True
|
|
57
|
+
elif status["status"] == "failed":
|
|
58
|
+
raise Exception(f"Model failed: {status.get('error', 'Unknown error')}")
|
|
59
|
+
await asyncio.sleep(10)
|
|
60
|
+
raise TimeoutError(f"Model did not complete within {timeout} seconds")
|
|
61
|
+
|
|
62
|
+
class TestPlexeAIIntegration:
|
|
63
|
+
"""Integration tests for PlexeAI client."""
|
|
64
|
+
|
|
65
|
+
def test_client_initialization(self):
|
|
66
|
+
"""Test client initialization with API key."""
|
|
67
|
+
client = PlexeAI(api_key=API_KEY)
|
|
68
|
+
assert client.api_key == API_KEY
|
|
69
|
+
assert client.base_url == "https://api.plexe.ai/v0"
|
|
70
|
+
|
|
71
|
+
def test_build_and_inference_flow(self, client, sample_data_file, sample_input_data):
|
|
72
|
+
"""Test full flow: build model with direct data files to avoid timing issues."""
|
|
73
|
+
try:
|
|
74
|
+
model_version = build(
|
|
75
|
+
goal=TEST_GOAL,
|
|
76
|
+
model_name=TEST_MODEL_NAME,
|
|
77
|
+
upload_id="2d4da8f9-aaf1-4262-a36c-5e9167ca4d5b",
|
|
78
|
+
api_key=API_KEY
|
|
79
|
+
)
|
|
80
|
+
assert isinstance(model_version, str)
|
|
81
|
+
|
|
82
|
+
# Wait for model to be ready
|
|
83
|
+
wait_for_model(client, TEST_MODEL_NAME, model_version)
|
|
84
|
+
|
|
85
|
+
# Run inference
|
|
86
|
+
result = infer(
|
|
87
|
+
model_name=TEST_MODEL_NAME,
|
|
88
|
+
model_version=model_version,
|
|
89
|
+
input_data=sample_input_data,
|
|
90
|
+
api_key=API_KEY
|
|
91
|
+
)
|
|
92
|
+
assert isinstance(result, dict)
|
|
93
|
+
assert "prediction" in result
|
|
94
|
+
|
|
95
|
+
# Run batch inference
|
|
96
|
+
batch_inputs = [
|
|
97
|
+
{"text": "Great service!"},
|
|
98
|
+
{"text": "Not satisfied with the product"}
|
|
99
|
+
]
|
|
100
|
+
results = batch_infer(
|
|
101
|
+
model_name=TEST_MODEL_NAME,
|
|
102
|
+
model_version=model_version,
|
|
103
|
+
inputs=batch_inputs,
|
|
104
|
+
api_key=API_KEY
|
|
105
|
+
)
|
|
106
|
+
assert isinstance(results, list)
|
|
107
|
+
assert len(results) == len(batch_inputs)
|
|
108
|
+
|
|
109
|
+
except Exception as e:
|
|
110
|
+
raise e
|
|
111
|
+
|
|
112
|
+
@pytest.mark.asyncio
|
|
113
|
+
async def test_async_build_and_inference_flow(self, client, sample_data_file, sample_input_data):
|
|
114
|
+
"""Test full async flow: build model with direct data files to avoid timing issues."""
|
|
115
|
+
try:
|
|
116
|
+
# Build model asynchronously using data_files directly
|
|
117
|
+
model_version = await abuild(
|
|
118
|
+
goal=TEST_GOAL,
|
|
119
|
+
model_name=f"{TEST_MODEL_NAME}_async",
|
|
120
|
+
upload_id="2d4da8f9-aaf1-4262-a36c-5e9167ca4d5b",
|
|
121
|
+
api_key=API_KEY
|
|
122
|
+
)
|
|
123
|
+
assert isinstance(model_version, str)
|
|
124
|
+
|
|
125
|
+
# Wait for model to be ready
|
|
126
|
+
await async_wait_for_model(client, f"{TEST_MODEL_NAME}_async", model_version)
|
|
127
|
+
|
|
128
|
+
# Run inference asynchronously
|
|
129
|
+
result = await ainfer(
|
|
130
|
+
model_name=f"{TEST_MODEL_NAME}_async",
|
|
131
|
+
model_version=model_version,
|
|
132
|
+
input_data=sample_input_data,
|
|
133
|
+
api_key=API_KEY
|
|
134
|
+
)
|
|
135
|
+
assert isinstance(result, dict)
|
|
136
|
+
assert "prediction" in result
|
|
137
|
+
|
|
138
|
+
# Optional batch inference test
|
|
139
|
+
batch_inputs = [
|
|
140
|
+
{"text": "Great service!"},
|
|
141
|
+
{"text": "Not satisfied with the product"}
|
|
142
|
+
]
|
|
143
|
+
results = batch_infer(
|
|
144
|
+
model_name=TEST_MODEL_NAME,
|
|
145
|
+
model_version=model_version,
|
|
146
|
+
inputs=batch_inputs,
|
|
147
|
+
api_key=API_KEY
|
|
148
|
+
)
|
|
149
|
+
assert isinstance(results, list)
|
|
150
|
+
assert len(results) == len(batch_inputs)
|
|
151
|
+
|
|
152
|
+
except Exception as e:
|
|
153
|
+
raise e
|
|
154
|
+
|
|
155
|
+
def test_file_upload_and_cleanup(self, client, sample_data_file):
|
|
156
|
+
"""Test file upload and cleanup."""
|
|
157
|
+
upload_id = client.upload_files(sample_data_file)
|
|
158
|
+
assert isinstance(upload_id, str)
|
|
159
|
+
|
|
160
|
+
# Wait a bit to ensure file is processed
|
|
161
|
+
time.sleep(2)
|
|
162
|
+
|
|
163
|
+
cleanup_result = client.cleanup_upload(upload_id)
|
|
164
|
+
assert isinstance(cleanup_result, dict)
|
|
165
|
+
|
|
166
|
+
def test_error_handling(self, client):
|
|
167
|
+
"""Test error handling for invalid requests."""
|
|
168
|
+
with pytest.raises(ValueError):
|
|
169
|
+
build(
|
|
170
|
+
goal=TEST_GOAL,
|
|
171
|
+
model_name=TEST_MODEL_NAME,
|
|
172
|
+
data_files=None,
|
|
173
|
+
upload_id=None,
|
|
174
|
+
api_key=API_KEY
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
with pytest.raises(ValueError):
|
|
178
|
+
build(
|
|
179
|
+
goal=TEST_GOAL,
|
|
180
|
+
model_name=TEST_MODEL_NAME,
|
|
181
|
+
data_files="nonexistent.csv",
|
|
182
|
+
api_key=API_KEY
|
|
183
|
+
)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "plexe"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Create ML models from natural language descriptions"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.7"
|
|
11
|
+
authors = [
|
|
12
|
+
{ name = "Plexe AI", email = "info@plexe.ai" }
|
|
13
|
+
]
|
|
14
|
+
dependencies = [
|
|
15
|
+
"httpx>=0.24.0",
|
|
16
|
+
"aiofiles>=0.8.0",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
[project.optional-dependencies]
|
|
20
|
+
dev = [
|
|
21
|
+
"pytest>=7.0",
|
|
22
|
+
"pytest-asyncio>=0.20.0",
|
|
23
|
+
"pytest-cov>=4.0.0",
|
|
24
|
+
"black>=22.0.0",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
[tool.pytest.ini_options]
|
|
28
|
+
testpaths = ["tests"]
|
|
29
|
+
python_files = ["test_*.py"]
|
|
30
|
+
|
|
31
|
+
[tool.coverage.run]
|
|
32
|
+
source = ["plexe"]
|
|
33
|
+
omit = ["tests/*"]
|
|
34
|
+
|
|
35
|
+
[tool.coverage.report]
|
|
36
|
+
exclude_lines = [
|
|
37
|
+
"pragma: no cover",
|
|
38
|
+
"def __repr__",
|
|
39
|
+
"raise NotImplementedError",
|
|
40
|
+
"if __name__ == .__main__.:",
|
|
41
|
+
"pass",
|
|
42
|
+
"raise ImportError"
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
[project.urls]
|
|
46
|
+
Homepage = "https://github.com/plexe-ai/plexe-python"
|
|
47
|
+
Repository = "https://github.com/plexe-ai/plexe-python"
|