gmicloud 0.1.3__tar.gz → 0.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {gmicloud-0.1.3 → gmicloud-0.1.5}/PKG-INFO +88 -56
- {gmicloud-0.1.3 → gmicloud-0.1.5}/README.md +87 -55
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/__init__.py +12 -1
- gmicloud-0.1.5/gmicloud/_internal/_client/_artifact_client.py +212 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_client/_http_client.py +5 -2
- gmicloud-0.1.5/gmicloud/_internal/_client/_iam_client.py +189 -0
- gmicloud-0.1.5/gmicloud/_internal/_client/_task_client.py +153 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_enums.py +8 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_manager/_artifact_manager.py +17 -5
- gmicloud-0.1.5/gmicloud/_internal/_manager/_iam_manager.py +36 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_manager/_task_manager.py +19 -12
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_models.py +129 -11
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/client.py +26 -5
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/tests/test_artifacts.py +14 -15
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/tests/test_tasks.py +1 -1
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud.egg-info/PKG-INFO +88 -56
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud.egg-info/SOURCES.txt +1 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/pyproject.toml +1 -1
- gmicloud-0.1.3/gmicloud/_internal/_client/_artifact_client.py +0 -142
- gmicloud-0.1.3/gmicloud/_internal/_client/_iam_client.py +0 -101
- gmicloud-0.1.3/gmicloud/_internal/_client/_task_client.py +0 -108
- {gmicloud-0.1.3 → gmicloud-0.1.5}/examples/__init__.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/__init__.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_client/__init__.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_client/_decorator.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_client/_file_upload_client.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_config.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_constants.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_exceptions.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/_internal/_manager/__init__.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/tests/__init__.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud/utils/uninstall_packages.py +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud.egg-info/dependency_links.txt +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/gmicloud.egg-info/top_level.txt +0 -0
- {gmicloud-0.1.3 → gmicloud-0.1.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: gmicloud
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.5
|
4
4
|
Summary: GMI Cloud Python SDK
|
5
5
|
Author-email: GMI <gmi@gmitec.net>
|
6
6
|
License: MIT
|
@@ -13,16 +13,11 @@ Description-Content-Type: text/markdown
|
|
13
13
|
# GMICloud SDK (Beta)
|
14
14
|
|
15
15
|
## Overview
|
16
|
-
|
17
16
|
Before you start: Our service and GPU resource is currenly invite-only so please contact our team (getstarted@gmicloud.ai) to get invited if you don't have one yet.
|
18
17
|
|
19
|
-
The GMI Inference Engine SDK provides a Python interface for deploying and managing machine learning models in
|
20
|
-
production environments. It allows users to create model artifacts, schedule tasks for serving models, and call
|
21
|
-
inference APIs easily.
|
18
|
+
The GMI Inference Engine SDK provides a Python interface for deploying and managing machine learning models in production environments. It allows users to create model artifacts, schedule tasks for serving models, and call inference APIs easily.
|
22
19
|
|
23
|
-
This SDK streamlines the process of utilizing GMI Cloud capabilities such as deploying models with Kubernetes-based Ray
|
24
|
-
services, managing resources automatically, and accessing model inference endpoints. With minimal setup, developers can
|
25
|
-
focus on building ML solutions instead of infrastructure.
|
20
|
+
This SDK streamlines the process of utilizing GMI Cloud capabilities such as deploying models with Kubernetes-based Ray services, managing resources automatically, and accessing model inference endpoints. With minimal setup, developers can focus on building ML solutions instead of infrastructure.
|
26
21
|
|
27
22
|
## Features
|
28
23
|
|
@@ -50,6 +45,7 @@ Set the following environment variables:
|
|
50
45
|
export GMI_CLOUD_CLIENT_ID=<YOUR_CLIENT_ID>
|
51
46
|
export GMI_CLOUD_EMAIL=<YOUR_EMAIL>
|
52
47
|
export GMI_CLOUD_PASSWORD=<YOUR_PASSWORD>
|
48
|
+
export GMI_CLOUD_API_KEY=<YOUR_API_KEY>
|
53
49
|
```
|
54
50
|
|
55
51
|
### Option 2: Passing Credentials as Parameters
|
@@ -64,7 +60,18 @@ client = Client(client_id="<YOUR_CLIENT_ID>", email="<YOUR_EMAIL>", password="<Y
|
|
64
60
|
|
65
61
|
## Quick Start
|
66
62
|
|
67
|
-
### 1.
|
63
|
+
### 1. How to run the code in the example folder
|
64
|
+
```bash
|
65
|
+
cd path/to/gmicloud-sdk
|
66
|
+
# Create a virtual environment
|
67
|
+
python -m venv venv
|
68
|
+
source venv/bin/activate
|
69
|
+
|
70
|
+
pip install -r requirements.txt
|
71
|
+
python -m examples.create_task_from_artifact_template.py
|
72
|
+
```
|
73
|
+
|
74
|
+
### 2. Create a Task from an Artifact Template
|
68
75
|
|
69
76
|
This is the simplest example to deploy an existing artifact template:
|
70
77
|
|
@@ -93,24 +100,31 @@ response = call_chat_completion(client, task.task_id)
|
|
93
100
|
print(response)
|
94
101
|
```
|
95
102
|
|
96
|
-
###
|
103
|
+
### 3. Step-by-Step Example: Create Artifact, Task, and Query the Endpoint
|
97
104
|
|
98
105
|
#### (a) Create an Artifact from a Template
|
99
106
|
|
100
107
|
First, you’ll retrieve all templates and create an artifact based on the desired template (e.g., "Llama3.1 8B"):
|
101
108
|
|
102
109
|
```python
|
103
|
-
|
110
|
+
from gmicloud import *
|
111
|
+
|
112
|
+
|
113
|
+
def create_artifact_from_template(client: Client) -> str:
|
104
114
|
artifact_manager = client.artifact_manager
|
105
115
|
|
106
|
-
#
|
107
|
-
templates = artifact_manager.
|
116
|
+
# Get all artifact templates
|
117
|
+
templates = artifact_manager.get_public_templates()
|
108
118
|
for template in templates:
|
109
119
|
if template.artifact_template_id == "qwen_2.5_14b_instruct_template_001":
|
110
|
-
|
111
|
-
|
120
|
+
# Create an artifact from a template
|
121
|
+
artifact_id = artifact_manager.create_artifact_from_template(
|
122
|
+
artifact_template_id=template.artifact_template_id,
|
112
123
|
)
|
113
|
-
|
124
|
+
|
125
|
+
return artifact_id
|
126
|
+
|
127
|
+
return ""
|
114
128
|
```
|
115
129
|
|
116
130
|
#### (b) Create a Task from the Artifact
|
@@ -118,43 +132,55 @@ def create_artifact_from_template(client):
|
|
118
132
|
Wait until the artifact becomes "ready" and then deploy it using task scheduling:
|
119
133
|
|
120
134
|
```python
|
121
|
-
|
122
|
-
|
135
|
+
from gmicloud import *
|
136
|
+
import time
|
137
|
+
from datetime import datetime
|
123
138
|
|
124
|
-
|
139
|
+
def create_task_and_start(client: Client, artifact_id: str) -> str:
|
140
|
+
artifact_manager = client.artifact_manager
|
141
|
+
# Wait for the artifact to be ready
|
125
142
|
while True:
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
143
|
+
try:
|
144
|
+
artifact = artifact_manager.get_artifact(artifact_id)
|
145
|
+
print(f"Artifact status: {artifact.build_status}")
|
146
|
+
# Wait until the artifact is ready
|
147
|
+
if artifact.build_status == BuildStatus.SUCCESS:
|
148
|
+
break
|
149
|
+
except Exception as e:
|
150
|
+
raise e
|
151
|
+
# Wait for 2 seconds
|
130
152
|
time.sleep(2)
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
153
|
+
try:
|
154
|
+
task_manager = client.task_manager
|
155
|
+
# Create a task
|
156
|
+
task = task_manager.create_task(Task(
|
157
|
+
config=TaskConfig(
|
158
|
+
ray_task_config=RayTaskConfig(
|
159
|
+
ray_version="2.40.0-py310-gpu",
|
160
|
+
file_path="serve",
|
161
|
+
artifact_id=artifact_id,
|
162
|
+
deployment_name="app",
|
163
|
+
replica_resource=ReplicaResource(
|
164
|
+
cpu=10,
|
165
|
+
ram_gb=100,
|
166
|
+
gpu=1,
|
167
|
+
),
|
168
|
+
),
|
169
|
+
task_scheduling=TaskScheduling(
|
170
|
+
scheduling_oneoff=OneOffScheduling(
|
171
|
+
trigger_timestamp=int(datetime.now().timestamp()) + 10,
|
172
|
+
min_replicas=1,
|
173
|
+
max_replicas=10,
|
174
|
+
)
|
145
175
|
),
|
146
176
|
),
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
),
|
154
|
-
),
|
155
|
-
))
|
177
|
+
))
|
178
|
+
|
179
|
+
# Start the task
|
180
|
+
task_manager.start_task(task.task_id)
|
181
|
+
except Exception as e:
|
182
|
+
raise e
|
156
183
|
|
157
|
-
task_manager.start_task(task.task_id)
|
158
184
|
return task.task_id
|
159
185
|
```
|
160
186
|
|
@@ -163,14 +189,20 @@ def create_task_and_start(client, artifact_id):
|
|
163
189
|
Once the task is running, use the endpoint for inference:
|
164
190
|
|
165
191
|
```python
|
192
|
+
from gmicloud import *
|
166
193
|
from examples.completion import call_chat_completion
|
167
194
|
|
168
|
-
|
169
|
-
|
170
|
-
task_id = create_task_and_start(client, artifact_id)
|
195
|
+
# Initialize the Client
|
196
|
+
cli = Client()
|
171
197
|
|
172
|
-
|
173
|
-
|
198
|
+
# Create an artifact from a template
|
199
|
+
artifact_id = create_artifact_from_template(cli)
|
200
|
+
|
201
|
+
# Create a task and start it
|
202
|
+
task_id = create_task_and_start(cli, artifact_id)
|
203
|
+
|
204
|
+
# Call chat completion
|
205
|
+
print(call_chat_completion(cli, task_id))
|
174
206
|
```
|
175
207
|
|
176
208
|
## API Reference
|
@@ -199,10 +231,10 @@ password: Optional[str] = ""
|
|
199
231
|
|
200
232
|
## Notes & Troubleshooting
|
201
233
|
|
202
|
-
Ensure Credentials are Correct: Double-check your environment variables or parameters passed into the Client object.
|
203
|
-
Artifact Status: It may take a few minutes for an artifact or task to transition to the "running" state.
|
204
|
-
Inference Endpoint Readiness: Use the task endpoint only after the task status changes to "running".
|
205
|
-
Default OpenAI Key: By default, the OpenAI API base URL is derived from the endpoint provided by GMI.
|
234
|
+
* Ensure Credentials are Correct: Double-check your environment variables or parameters passed into the Client object.
|
235
|
+
* Artifact Status: It may take a few minutes for an artifact or task to transition to the "running" state.
|
236
|
+
* Inference Endpoint Readiness: Use the task endpoint only after the task status changes to "running".
|
237
|
+
* Default OpenAI Key: By default, the OpenAI API base URL is derived from the endpoint provided by GMI.
|
206
238
|
|
207
239
|
## Contributing
|
208
240
|
|
@@ -1,16 +1,11 @@
|
|
1
1
|
# GMICloud SDK (Beta)
|
2
2
|
|
3
3
|
## Overview
|
4
|
-
|
5
4
|
Before you start: Our service and GPU resource is currenly invite-only so please contact our team (getstarted@gmicloud.ai) to get invited if you don't have one yet.
|
6
5
|
|
7
|
-
The GMI Inference Engine SDK provides a Python interface for deploying and managing machine learning models in
|
8
|
-
production environments. It allows users to create model artifacts, schedule tasks for serving models, and call
|
9
|
-
inference APIs easily.
|
6
|
+
The GMI Inference Engine SDK provides a Python interface for deploying and managing machine learning models in production environments. It allows users to create model artifacts, schedule tasks for serving models, and call inference APIs easily.
|
10
7
|
|
11
|
-
This SDK streamlines the process of utilizing GMI Cloud capabilities such as deploying models with Kubernetes-based Ray
|
12
|
-
services, managing resources automatically, and accessing model inference endpoints. With minimal setup, developers can
|
13
|
-
focus on building ML solutions instead of infrastructure.
|
8
|
+
This SDK streamlines the process of utilizing GMI Cloud capabilities such as deploying models with Kubernetes-based Ray services, managing resources automatically, and accessing model inference endpoints. With minimal setup, developers can focus on building ML solutions instead of infrastructure.
|
14
9
|
|
15
10
|
## Features
|
16
11
|
|
@@ -38,6 +33,7 @@ Set the following environment variables:
|
|
38
33
|
export GMI_CLOUD_CLIENT_ID=<YOUR_CLIENT_ID>
|
39
34
|
export GMI_CLOUD_EMAIL=<YOUR_EMAIL>
|
40
35
|
export GMI_CLOUD_PASSWORD=<YOUR_PASSWORD>
|
36
|
+
export GMI_CLOUD_API_KEY=<YOUR_API_KEY>
|
41
37
|
```
|
42
38
|
|
43
39
|
### Option 2: Passing Credentials as Parameters
|
@@ -52,7 +48,18 @@ client = Client(client_id="<YOUR_CLIENT_ID>", email="<YOUR_EMAIL>", password="<Y
|
|
52
48
|
|
53
49
|
## Quick Start
|
54
50
|
|
55
|
-
### 1.
|
51
|
+
### 1. How to run the code in the example folder
|
52
|
+
```bash
|
53
|
+
cd path/to/gmicloud-sdk
|
54
|
+
# Create a virtual environment
|
55
|
+
python -m venv venv
|
56
|
+
source venv/bin/activate
|
57
|
+
|
58
|
+
pip install -r requirements.txt
|
59
|
+
python -m examples.create_task_from_artifact_template.py
|
60
|
+
```
|
61
|
+
|
62
|
+
### 2. Create a Task from an Artifact Template
|
56
63
|
|
57
64
|
This is the simplest example to deploy an existing artifact template:
|
58
65
|
|
@@ -81,24 +88,31 @@ response = call_chat_completion(client, task.task_id)
|
|
81
88
|
print(response)
|
82
89
|
```
|
83
90
|
|
84
|
-
###
|
91
|
+
### 3. Step-by-Step Example: Create Artifact, Task, and Query the Endpoint
|
85
92
|
|
86
93
|
#### (a) Create an Artifact from a Template
|
87
94
|
|
88
95
|
First, you’ll retrieve all templates and create an artifact based on the desired template (e.g., "Llama3.1 8B"):
|
89
96
|
|
90
97
|
```python
|
91
|
-
|
98
|
+
from gmicloud import *
|
99
|
+
|
100
|
+
|
101
|
+
def create_artifact_from_template(client: Client) -> str:
|
92
102
|
artifact_manager = client.artifact_manager
|
93
103
|
|
94
|
-
#
|
95
|
-
templates = artifact_manager.
|
104
|
+
# Get all artifact templates
|
105
|
+
templates = artifact_manager.get_public_templates()
|
96
106
|
for template in templates:
|
97
107
|
if template.artifact_template_id == "qwen_2.5_14b_instruct_template_001":
|
98
|
-
|
99
|
-
|
108
|
+
# Create an artifact from a template
|
109
|
+
artifact_id = artifact_manager.create_artifact_from_template(
|
110
|
+
artifact_template_id=template.artifact_template_id,
|
100
111
|
)
|
101
|
-
|
112
|
+
|
113
|
+
return artifact_id
|
114
|
+
|
115
|
+
return ""
|
102
116
|
```
|
103
117
|
|
104
118
|
#### (b) Create a Task from the Artifact
|
@@ -106,43 +120,55 @@ def create_artifact_from_template(client):
|
|
106
120
|
Wait until the artifact becomes "ready" and then deploy it using task scheduling:
|
107
121
|
|
108
122
|
```python
|
109
|
-
|
110
|
-
|
123
|
+
from gmicloud import *
|
124
|
+
import time
|
125
|
+
from datetime import datetime
|
111
126
|
|
112
|
-
|
127
|
+
def create_task_and_start(client: Client, artifact_id: str) -> str:
|
128
|
+
artifact_manager = client.artifact_manager
|
129
|
+
# Wait for the artifact to be ready
|
113
130
|
while True:
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
131
|
+
try:
|
132
|
+
artifact = artifact_manager.get_artifact(artifact_id)
|
133
|
+
print(f"Artifact status: {artifact.build_status}")
|
134
|
+
# Wait until the artifact is ready
|
135
|
+
if artifact.build_status == BuildStatus.SUCCESS:
|
136
|
+
break
|
137
|
+
except Exception as e:
|
138
|
+
raise e
|
139
|
+
# Wait for 2 seconds
|
118
140
|
time.sleep(2)
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
141
|
+
try:
|
142
|
+
task_manager = client.task_manager
|
143
|
+
# Create a task
|
144
|
+
task = task_manager.create_task(Task(
|
145
|
+
config=TaskConfig(
|
146
|
+
ray_task_config=RayTaskConfig(
|
147
|
+
ray_version="2.40.0-py310-gpu",
|
148
|
+
file_path="serve",
|
149
|
+
artifact_id=artifact_id,
|
150
|
+
deployment_name="app",
|
151
|
+
replica_resource=ReplicaResource(
|
152
|
+
cpu=10,
|
153
|
+
ram_gb=100,
|
154
|
+
gpu=1,
|
155
|
+
),
|
156
|
+
),
|
157
|
+
task_scheduling=TaskScheduling(
|
158
|
+
scheduling_oneoff=OneOffScheduling(
|
159
|
+
trigger_timestamp=int(datetime.now().timestamp()) + 10,
|
160
|
+
min_replicas=1,
|
161
|
+
max_replicas=10,
|
162
|
+
)
|
133
163
|
),
|
134
164
|
),
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
),
|
142
|
-
),
|
143
|
-
))
|
165
|
+
))
|
166
|
+
|
167
|
+
# Start the task
|
168
|
+
task_manager.start_task(task.task_id)
|
169
|
+
except Exception as e:
|
170
|
+
raise e
|
144
171
|
|
145
|
-
task_manager.start_task(task.task_id)
|
146
172
|
return task.task_id
|
147
173
|
```
|
148
174
|
|
@@ -151,14 +177,20 @@ def create_task_and_start(client, artifact_id):
|
|
151
177
|
Once the task is running, use the endpoint for inference:
|
152
178
|
|
153
179
|
```python
|
180
|
+
from gmicloud import *
|
154
181
|
from examples.completion import call_chat_completion
|
155
182
|
|
156
|
-
|
157
|
-
|
158
|
-
task_id = create_task_and_start(client, artifact_id)
|
183
|
+
# Initialize the Client
|
184
|
+
cli = Client()
|
159
185
|
|
160
|
-
|
161
|
-
|
186
|
+
# Create an artifact from a template
|
187
|
+
artifact_id = create_artifact_from_template(cli)
|
188
|
+
|
189
|
+
# Create a task and start it
|
190
|
+
task_id = create_task_and_start(cli, artifact_id)
|
191
|
+
|
192
|
+
# Call chat completion
|
193
|
+
print(call_chat_completion(cli, task_id))
|
162
194
|
```
|
163
195
|
|
164
196
|
## API Reference
|
@@ -187,10 +219,10 @@ password: Optional[str] = ""
|
|
187
219
|
|
188
220
|
## Notes & Troubleshooting
|
189
221
|
|
190
|
-
Ensure Credentials are Correct: Double-check your environment variables or parameters passed into the Client object.
|
191
|
-
Artifact Status: It may take a few minutes for an artifact or task to transition to the "running" state.
|
192
|
-
Inference Endpoint Readiness: Use the task endpoint only after the task status changes to "running".
|
193
|
-
Default OpenAI Key: By default, the OpenAI API base URL is derived from the endpoint provided by GMI.
|
222
|
+
* Ensure Credentials are Correct: Double-check your environment variables or parameters passed into the Client object.
|
223
|
+
* Artifact Status: It may take a few minutes for an artifact or task to transition to the "running" state.
|
224
|
+
* Inference Endpoint Readiness: Use the task endpoint only after the task status changes to "running".
|
225
|
+
* Default OpenAI Key: By default, the OpenAI API base URL is derived from the endpoint provided by GMI.
|
194
226
|
|
195
227
|
## Contributing
|
196
228
|
|
@@ -1,3 +1,6 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
|
1
4
|
from ._internal._models import (
|
2
5
|
Artifact,
|
3
6
|
ArtifactData,
|
@@ -16,7 +19,8 @@ from ._internal._models import (
|
|
16
19
|
)
|
17
20
|
from ._internal._enums import (
|
18
21
|
BuildStatus,
|
19
|
-
TaskEndpointStatus
|
22
|
+
TaskEndpointStatus,
|
23
|
+
TaskStatus
|
20
24
|
)
|
21
25
|
from .client import Client
|
22
26
|
|
@@ -39,3 +43,10 @@ __all__ = [
|
|
39
43
|
"BuildStatus",
|
40
44
|
"TaskEndpointStatus",
|
41
45
|
]
|
46
|
+
|
47
|
+
# Configure logging
|
48
|
+
log_level = os.getenv("GMI_CLOUD_LOG_LEVEL", "INFO").upper()
|
49
|
+
logging.basicConfig(
|
50
|
+
level=log_level,
|
51
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
52
|
+
)
|
@@ -0,0 +1,212 @@
|
|
1
|
+
from typing import List
|
2
|
+
import logging
|
3
|
+
from requests.exceptions import RequestException
|
4
|
+
|
5
|
+
from ._http_client import HTTPClient
|
6
|
+
from ._iam_client import IAMClient
|
7
|
+
from ._decorator import handle_refresh_token
|
8
|
+
from .._models import *
|
9
|
+
from .._config import ARTIFACT_SERVICE_BASE_URL
|
10
|
+
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
|
13
|
+
|
14
|
+
class ArtifactClient:
|
15
|
+
"""
|
16
|
+
Client for interacting with the Artifact Service API.
|
17
|
+
|
18
|
+
This client provides methods to perform CRUD operations on artifacts,
|
19
|
+
as well as generating signed URLs for uploading large files.
|
20
|
+
"""
|
21
|
+
|
22
|
+
def __init__(self, iam_client: IAMClient):
|
23
|
+
"""
|
24
|
+
Initializes the ArtifactClient with an HTTPClient configured
|
25
|
+
to communicate with the Artifact Service base URL.
|
26
|
+
"""
|
27
|
+
self.client = HTTPClient(ARTIFACT_SERVICE_BASE_URL)
|
28
|
+
self.iam_client = iam_client
|
29
|
+
|
30
|
+
@handle_refresh_token
|
31
|
+
def get_artifact(self, artifact_id: str) -> Optional[Artifact]:
|
32
|
+
"""
|
33
|
+
Fetches an artifact by its ID.
|
34
|
+
|
35
|
+
:param artifact_id: The ID of the artifact to fetch.
|
36
|
+
:return: The Artifact object or None if an error occurs.
|
37
|
+
"""
|
38
|
+
try:
|
39
|
+
response = self.client.get(
|
40
|
+
"/get_artifact",
|
41
|
+
self.iam_client.get_custom_headers(),
|
42
|
+
{"artifact_id": artifact_id}
|
43
|
+
)
|
44
|
+
return Artifact.model_validate(response) if response else None
|
45
|
+
except (RequestException, ValueError) as e:
|
46
|
+
logger.error(f"Failed to fetch artifact {artifact_id}: {e}")
|
47
|
+
return None
|
48
|
+
|
49
|
+
@handle_refresh_token
|
50
|
+
def get_all_artifacts(self) -> List[Artifact]:
|
51
|
+
"""
|
52
|
+
Fetches all artifacts.
|
53
|
+
|
54
|
+
:return: A list of Artifact objects. If an error occurs, returns an empty list.
|
55
|
+
"""
|
56
|
+
try:
|
57
|
+
response = self.client.get("/get_all_artifacts", self.iam_client.get_custom_headers())
|
58
|
+
if not response:
|
59
|
+
logger.error("Empty response from /get_all_artifacts")
|
60
|
+
return []
|
61
|
+
return [Artifact.model_validate(item) for item in response]
|
62
|
+
except (RequestException, ValueError) as e:
|
63
|
+
logger.error(f"Failed to fetch all artifacts: {e}")
|
64
|
+
return []
|
65
|
+
|
66
|
+
@handle_refresh_token
|
67
|
+
def create_artifact(self, request: CreateArtifactRequest) -> Optional[CreateArtifactResponse]:
|
68
|
+
"""
|
69
|
+
Creates a new artifact in the service.
|
70
|
+
|
71
|
+
:param request: The request object containing artifact details.
|
72
|
+
:return: The response object containing the created artifact details, or None on error.
|
73
|
+
"""
|
74
|
+
try:
|
75
|
+
response = self.client.post(
|
76
|
+
"/create_artifact",
|
77
|
+
self.iam_client.get_custom_headers(),
|
78
|
+
request.model_dump()
|
79
|
+
)
|
80
|
+
return CreateArtifactResponse.model_validate(response) if response else None
|
81
|
+
except (RequestException, ValueError) as e:
|
82
|
+
logger.error(f"Failed to create artifact: {e}")
|
83
|
+
return None
|
84
|
+
|
85
|
+
@handle_refresh_token
|
86
|
+
def create_artifact_from_template(self, artifact_template_id: str) -> Optional[CreateArtifactFromTemplateResponse]:
|
87
|
+
"""
|
88
|
+
Creates a new artifact in the service.
|
89
|
+
|
90
|
+
:param artifact_template_id: The ID of the artifact template to use.
|
91
|
+
:return: The response object containing the created artifact details or None if an error occurs.
|
92
|
+
"""
|
93
|
+
try:
|
94
|
+
response = self.client.post(
|
95
|
+
"/create_artifact_from_template",
|
96
|
+
self.iam_client.get_custom_headers(),
|
97
|
+
{"artifact_template_id": artifact_template_id}
|
98
|
+
)
|
99
|
+
return CreateArtifactFromTemplateResponse.model_validate(response) if response else None
|
100
|
+
except (RequestException, ValueError) as e:
|
101
|
+
logger.error(f"Failed to create artifact from template {artifact_template_id}: {e}")
|
102
|
+
return None
|
103
|
+
|
104
|
+
@handle_refresh_token
|
105
|
+
def rebuild_artifact(self, artifact_id: str) -> Optional[RebuildArtifactResponse]:
|
106
|
+
"""
|
107
|
+
Rebuilds an artifact in the service.
|
108
|
+
|
109
|
+
:param artifact_id: The ID of the artifact to rebuild.
|
110
|
+
:return: The response object containing the rebuilt artifact details or None if an error occurs.
|
111
|
+
"""
|
112
|
+
try:
|
113
|
+
response = self.client.post(
|
114
|
+
"/rebuild_artifact",
|
115
|
+
self.iam_client.get_custom_headers(),
|
116
|
+
{"artifact_id": artifact_id}
|
117
|
+
)
|
118
|
+
return RebuildArtifactResponse.model_validate(response) if response else None
|
119
|
+
except (RequestException, ValueError) as e:
|
120
|
+
logger.error(f"Failed to rebuild artifact {artifact_id}: {e}")
|
121
|
+
return None
|
122
|
+
|
123
|
+
@handle_refresh_token
|
124
|
+
def delete_artifact(self, artifact_id: str) -> Optional[DeleteArtifactResponse]:
|
125
|
+
"""
|
126
|
+
Deletes an artifact by its ID.
|
127
|
+
|
128
|
+
:param artifact_id: The ID of the artifact to delete.
|
129
|
+
:return: The response object containing the deleted artifact details or None if an error occurs.
|
130
|
+
"""
|
131
|
+
try:
|
132
|
+
response = self.client.delete(
|
133
|
+
"/delete_artifact",
|
134
|
+
self.iam_client.get_custom_headers(),
|
135
|
+
{"artifact_id": artifact_id}
|
136
|
+
)
|
137
|
+
return DeleteArtifactResponse.model_validate(response) if response else None
|
138
|
+
except (RequestException, ValueError) as e:
|
139
|
+
logger.error(f"Failed to delete artifact {artifact_id}: {e}")
|
140
|
+
return None
|
141
|
+
|
142
|
+
@handle_refresh_token
|
143
|
+
def get_bigfile_upload_url(self, request: GetBigFileUploadUrlRequest) -> Optional[GetBigFileUploadUrlResponse]:
|
144
|
+
"""
|
145
|
+
Generates a pre-signed URL for uploading a large file.
|
146
|
+
|
147
|
+
:param request: The request object containing the artifact ID, file name, and file type.
|
148
|
+
:return: The response object containing the pre-signed URL and upload details, or None if an error occurs.
|
149
|
+
"""
|
150
|
+
try:
|
151
|
+
response = self.client.post("/get_bigfile_upload_url",
|
152
|
+
self.iam_client.get_custom_headers(),
|
153
|
+
request.model_dump())
|
154
|
+
|
155
|
+
if not response:
|
156
|
+
logger.error("Empty response from /get_bigfile_upload_url")
|
157
|
+
return None
|
158
|
+
|
159
|
+
return GetBigFileUploadUrlResponse.model_validate(response)
|
160
|
+
|
161
|
+
except (RequestException, ValueError) as e:
|
162
|
+
logger.error(f"Failed to generate upload URL: {e}")
|
163
|
+
return None
|
164
|
+
|
165
|
+
@handle_refresh_token
|
166
|
+
def delete_bigfile(self, request: DeleteBigfileRequest) -> Optional[DeleteBigfileResponse]:
|
167
|
+
"""
|
168
|
+
Deletes a large file associated with an artifact.
|
169
|
+
|
170
|
+
:param request: The request object containing the artifact ID and file name.
|
171
|
+
:return: The response object containing the deletion status, or None if an error occurs.
|
172
|
+
"""
|
173
|
+
try:
|
174
|
+
response = self.client.delete("/delete_bigfile",
|
175
|
+
self.iam_client.get_custom_headers(),
|
176
|
+
request.model_dump())
|
177
|
+
|
178
|
+
if not response:
|
179
|
+
logger.error("Empty response from /delete_bigfile")
|
180
|
+
return None
|
181
|
+
|
182
|
+
return DeleteBigfileResponse.model_validate(response)
|
183
|
+
|
184
|
+
except (RequestException, ValueError) as e:
|
185
|
+
logger.error(f"Failed to delete big file: {e}")
|
186
|
+
return None
|
187
|
+
|
188
|
+
@handle_refresh_token
|
189
|
+
def get_public_templates(self) -> List[ArtifactTemplate]:
|
190
|
+
"""
|
191
|
+
Fetches all artifact templates.
|
192
|
+
|
193
|
+
:return: A list of ArtifactTemplate objects.
|
194
|
+
:rtype: List[ArtifactTemplate]
|
195
|
+
"""
|
196
|
+
try:
|
197
|
+
response = self.client.get("/get_public_templates", self.iam_client.get_custom_headers())
|
198
|
+
|
199
|
+
if not response:
|
200
|
+
logger.error("Empty response received from /get_public_templates API")
|
201
|
+
return []
|
202
|
+
|
203
|
+
try:
|
204
|
+
result = GetPublicTemplatesResponse.model_validate(response)
|
205
|
+
return result.artifact_templates
|
206
|
+
except ValueError as ve:
|
207
|
+
logger.error(f"Failed to validate response data: {ve}")
|
208
|
+
return []
|
209
|
+
|
210
|
+
except RequestException as e:
|
211
|
+
logger.error(f"Request to /get_public_templates failed: {e}")
|
212
|
+
return []
|