gmicloud 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. gmicloud-0.1.0/PKG-INFO +208 -0
  2. gmicloud-0.1.0/README.md +191 -0
  3. gmicloud-0.1.0/examples/__init__.py +0 -0
  4. gmicloud-0.1.0/examples/example.py +145 -0
  5. gmicloud-0.1.0/gmicloud/__init__.py +41 -0
  6. gmicloud-0.1.0/gmicloud/_internal/__init__.py +0 -0
  7. gmicloud-0.1.0/gmicloud/_internal/_client/__init__.py +0 -0
  8. gmicloud-0.1.0/gmicloud/_internal/_client/_artifact_client.py +179 -0
  9. gmicloud-0.1.0/gmicloud/_internal/_client/_decorator.py +22 -0
  10. gmicloud-0.1.0/gmicloud/_internal/_client/_file_upload_client.py +115 -0
  11. gmicloud-0.1.0/gmicloud/_internal/_client/_http_client.py +150 -0
  12. gmicloud-0.1.0/gmicloud/_internal/_client/_iam_client.py +92 -0
  13. gmicloud-0.1.0/gmicloud/_internal/_client/_task_client.py +142 -0
  14. gmicloud-0.1.0/gmicloud/_internal/_config.py +3 -0
  15. gmicloud-0.1.0/gmicloud/_internal/_constants.py +13 -0
  16. gmicloud-0.1.0/gmicloud/_internal/_enums.py +23 -0
  17. gmicloud-0.1.0/gmicloud/_internal/_exceptions.py +19 -0
  18. gmicloud-0.1.0/gmicloud/_internal/_manager/__init__.py +0 -0
  19. gmicloud-0.1.0/gmicloud/_internal/_manager/_artifact_manager.py +303 -0
  20. gmicloud-0.1.0/gmicloud/_internal/_manager/_task_manager.py +221 -0
  21. gmicloud-0.1.0/gmicloud/_internal/_models.py +336 -0
  22. gmicloud-0.1.0/gmicloud/client.py +51 -0
  23. gmicloud-0.1.0/gmicloud/tests/__init__.py +0 -0
  24. gmicloud-0.1.0/gmicloud/tests/test_artifacts.py +274 -0
  25. gmicloud-0.1.0/gmicloud/tests/test_tasks.py +207 -0
  26. gmicloud-0.1.0/gmicloud.egg-info/PKG-INFO +208 -0
  27. gmicloud-0.1.0/gmicloud.egg-info/SOURCES.txt +30 -0
  28. gmicloud-0.1.0/gmicloud.egg-info/dependency_links.txt +1 -0
  29. gmicloud-0.1.0/gmicloud.egg-info/top_level.txt +2 -0
  30. gmicloud-0.1.0/pyproject.toml +23 -0
  31. gmicloud-0.1.0/setup.cfg +4 -0
  32. gmicloud-0.1.0/setup.py +28 -0
@@ -0,0 +1,208 @@
1
+ Metadata-Version: 2.2
2
+ Name: gmicloud
3
+ Version: 0.1.0
4
+ Summary: GMI Cloud Python SDK
5
+ Home-page: https://github.com/GMISWE/python-sdk
6
+ Author: GMI
7
+ Author-email: GMI <gmi@gmitec.net>
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ Dynamic: author
15
+ Dynamic: home-page
16
+ Dynamic: requires-python
17
+
18
+ # GMICloud
19
+
20
+ ## Overview
21
+
22
+ This project is an open-source SDK for interacting with the GMICLOUD platform. It provides functionalities to manage
23
+ artifacts, tasks, and usage data efficiently.
24
+
25
+ ## Features
26
+
27
+ - Create and manage artifacts
28
+ - Create and manage tasks
29
+ - Fetch usage data
30
+ - Integration with OpenAI for chat completions
31
+
32
+ ## Installation
33
+
34
+ To install the SDK, use pip:
35
+
36
+ ```bash
37
+ pip install gmicloud-sdk
38
+ ```
39
+
40
+ ## Usage
41
+
42
+ ### Initialize the Client
43
+
44
+ ```python
45
+ from gmicloud import Client
46
+
47
+ client = Client(username="your_username", password="your_password")
48
+ ```
49
+
50
+ ### Create an Artifact with a File
51
+
52
+ ```python
53
+ artifact_id = client.artifact_manager.create_artifact_with_file(
54
+ artifact_name="Llama3.1 8B",
55
+ artifact_file_path="./files/Llama-3.1-8B-Instruct.zip",
56
+ description="This is a test artifact",
57
+ tags=['example', 'test']
58
+ )
59
+ ```
60
+
61
+ ### Create an Artifact from a Template
62
+
63
+ ```python
64
+ artifact_id = client.artifact_manager.create_artifact_from_template(
65
+ artifact_template_id="template_id"
66
+ )
67
+ ```
68
+
69
+ ### Create and Start a Task
70
+
71
+ ```python
72
+ task_id = client.task_manager.create_task(Task(
73
+ config=TaskConfig(
74
+ ray_task_config=RayTaskConfig(
75
+ ray_version="2.40.0-py310-gpu",
76
+ file_path="serve",
77
+ artifact_id=artifact_id,
78
+ deployment_name="app",
79
+ replica_resource=ReplicaResource(
80
+ cpu=2,
81
+ ram_gb=128,
82
+ gpu=2,
83
+ ),
84
+ ),
85
+ task_scheduling=TaskScheduling(
86
+ scheduling_oneoff=OneOffScheduling(
87
+ trigger_timestamp=int(datetime.now().timestamp()) + 60,
88
+ min_replicas=1,
89
+ max_replicas=10,
90
+ )
91
+ ),
92
+ ),
93
+ ))
94
+
95
+ client.task_manager.start_task(task_id)
96
+ ```
97
+
98
+ ### Call Chat Completion
99
+
100
+ ```python
101
+ from openai import OpenAI
102
+
103
+ task = client.task_manager.get_task(task_id)
104
+ open_ai = OpenAI(
105
+ api_key="YOUR_API_KEY",
106
+ base_url=task.info.endpoint
107
+ )
108
+
109
+ completion = open_ai.chat.completions.create(
110
+ model="meta-llama/Llama-3.1-8B-Instruct",
111
+ messages=[
112
+ {"role": "system", "content": "You are a helpful assistant."},
113
+ {"role": "user", "content": "Translate the sentences to Chinese"},
114
+ ],
115
+ max_tokens=200,
116
+ temperature=0.7
117
+ )
118
+
119
+ print(completion.choices[0].message.content)
120
+ ```
121
+
122
+ ## Configuration
123
+
124
+ ### One-off Task Configuration
125
+
126
+ `examples/config/one-off_task.json`:
127
+
128
+ ```json
129
+ {
130
+ "config": {
131
+ "ray_task_config": {
132
+ "ray_version": "2.40.0",
133
+ "file_path": "serve",
134
+ "deployment_name": "string",
135
+ "replica_resource": {
136
+ "cpu": 2,
137
+ "ram_gb": 12,
138
+ "gpu": 1
139
+ }
140
+ },
141
+ "task_scheduling": {
142
+ "scheduling_oneoff": {
143
+ "min_replicas": 1,
144
+ "max_replicas": 1
145
+ }
146
+ }
147
+ }
148
+ }
149
+ ```
150
+
151
+ ### Daily Task Configuration
152
+
153
+ `examples/config/daily_task.json`:
154
+
155
+ ```json
156
+ {
157
+ "config": {
158
+ "ray_task_config": {
159
+ "ray_version": "2.40.0-py310-gpu",
160
+ "file_path": "serve",
161
+ "deployment_name": "string",
162
+ "replica_resource": {
163
+ "cpu": 6,
164
+ "ram_gb": 64,
165
+ "gpu": 2
166
+ }
167
+ },
168
+ "task_scheduling": {
169
+ "scheduling_daily": {
170
+ "triggers": [
171
+ {
172
+ "timezone": "UTC",
173
+ "Hour": 0,
174
+ "minute": 10,
175
+ "second": 0,
176
+ "min_replicas": 1,
177
+ "max_replicas": 2
178
+ },
179
+ {
180
+ "timezone": "UTC",
181
+ "Hour": 0,
182
+ "minute": 10,
183
+ "second": 30,
184
+ "min_replicas": 1,
185
+ "max_replicas": 4
186
+ }
187
+ ]
188
+ }
189
+ }
190
+ }
191
+ }
192
+ ```
193
+
194
+ ## Running Tests
195
+
196
+ To run the unit tests, use the following command:
197
+
198
+ ```bash
199
+ pytest
200
+ ```
201
+
202
+ ## Contributing
203
+
204
+ Contributions are welcome! Please open an issue or submit a pull request.
205
+
206
+ ## License
207
+
208
+ This project is licensed under the MIT License. See the `LICENSE` file for details.
@@ -0,0 +1,191 @@
1
+ # GMICloud
2
+
3
+ ## Overview
4
+
5
+ This project is an open-source SDK for interacting with the GMICLOUD platform. It provides functionalities to manage
6
+ artifacts, tasks, and usage data efficiently.
7
+
8
+ ## Features
9
+
10
+ - Create and manage artifacts
11
+ - Create and manage tasks
12
+ - Fetch usage data
13
+ - Integration with OpenAI for chat completions
14
+
15
+ ## Installation
16
+
17
+ To install the SDK, use pip:
18
+
19
+ ```bash
20
+ pip install gmicloud-sdk
21
+ ```
22
+
23
+ ## Usage
24
+
25
+ ### Initialize the Client
26
+
27
+ ```python
28
+ from gmicloud import Client
29
+
30
+ client = Client(username="your_username", password="your_password")
31
+ ```
32
+
33
+ ### Create an Artifact with a File
34
+
35
+ ```python
36
+ artifact_id = client.artifact_manager.create_artifact_with_file(
37
+ artifact_name="Llama3.1 8B",
38
+ artifact_file_path="./files/Llama-3.1-8B-Instruct.zip",
39
+ description="This is a test artifact",
40
+ tags=['example', 'test']
41
+ )
42
+ ```
43
+
44
+ ### Create an Artifact from a Template
45
+
46
+ ```python
47
+ artifact_id = client.artifact_manager.create_artifact_from_template(
48
+ artifact_template_id="template_id"
49
+ )
50
+ ```
51
+
52
+ ### Create and Start a Task
53
+
54
+ ```python
55
+ task_id = client.task_manager.create_task(Task(
56
+ config=TaskConfig(
57
+ ray_task_config=RayTaskConfig(
58
+ ray_version="2.40.0-py310-gpu",
59
+ file_path="serve",
60
+ artifact_id=artifact_id,
61
+ deployment_name="app",
62
+ replica_resource=ReplicaResource(
63
+ cpu=2,
64
+ ram_gb=128,
65
+ gpu=2,
66
+ ),
67
+ ),
68
+ task_scheduling=TaskScheduling(
69
+ scheduling_oneoff=OneOffScheduling(
70
+ trigger_timestamp=int(datetime.now().timestamp()) + 60,
71
+ min_replicas=1,
72
+ max_replicas=10,
73
+ )
74
+ ),
75
+ ),
76
+ ))
77
+
78
+ client.task_manager.start_task(task_id)
79
+ ```
80
+
81
+ ### Call Chat Completion
82
+
83
+ ```python
84
+ from openai import OpenAI
85
+
86
+ task = client.task_manager.get_task(task_id)
87
+ open_ai = OpenAI(
88
+ api_key="YOUR_API_KEY",
89
+ base_url=task.info.endpoint
90
+ )
91
+
92
+ completion = open_ai.chat.completions.create(
93
+ model="meta-llama/Llama-3.1-8B-Instruct",
94
+ messages=[
95
+ {"role": "system", "content": "You are a helpful assistant."},
96
+ {"role": "user", "content": "Translate the sentences to Chinese"},
97
+ ],
98
+ max_tokens=200,
99
+ temperature=0.7
100
+ )
101
+
102
+ print(completion.choices[0].message.content)
103
+ ```
104
+
105
+ ## Configuration
106
+
107
+ ### One-off Task Configuration
108
+
109
+ `examples/config/one-off_task.json`:
110
+
111
+ ```json
112
+ {
113
+ "config": {
114
+ "ray_task_config": {
115
+ "ray_version": "2.40.0",
116
+ "file_path": "serve",
117
+ "deployment_name": "string",
118
+ "replica_resource": {
119
+ "cpu": 2,
120
+ "ram_gb": 12,
121
+ "gpu": 1
122
+ }
123
+ },
124
+ "task_scheduling": {
125
+ "scheduling_oneoff": {
126
+ "min_replicas": 1,
127
+ "max_replicas": 1
128
+ }
129
+ }
130
+ }
131
+ }
132
+ ```
133
+
134
+ ### Daily Task Configuration
135
+
136
+ `examples/config/daily_task.json`:
137
+
138
+ ```json
139
+ {
140
+ "config": {
141
+ "ray_task_config": {
142
+ "ray_version": "2.40.0-py310-gpu",
143
+ "file_path": "serve",
144
+ "deployment_name": "string",
145
+ "replica_resource": {
146
+ "cpu": 6,
147
+ "ram_gb": 64,
148
+ "gpu": 2
149
+ }
150
+ },
151
+ "task_scheduling": {
152
+ "scheduling_daily": {
153
+ "triggers": [
154
+ {
155
+ "timezone": "UTC",
156
+ "Hour": 0,
157
+ "minute": 10,
158
+ "second": 0,
159
+ "min_replicas": 1,
160
+ "max_replicas": 2
161
+ },
162
+ {
163
+ "timezone": "UTC",
164
+ "Hour": 0,
165
+ "minute": 10,
166
+ "second": 30,
167
+ "min_replicas": 1,
168
+ "max_replicas": 4
169
+ }
170
+ ]
171
+ }
172
+ }
173
+ }
174
+ }
175
+ ```
176
+
177
+ ## Running Tests
178
+
179
+ To run the unit tests, use the following command:
180
+
181
+ ```bash
182
+ pytest
183
+ ```
184
+
185
+ ## Contributing
186
+
187
+ Contributions are welcome! Please open an issue or submit a pull request.
188
+
189
+ ## License
190
+
191
+ This project is licensed under the MIT License. See the `LICENSE` file for details.
File without changes
@@ -0,0 +1,145 @@
1
+ import os
2
+ import time
3
+ from datetime import datetime
4
+
5
+ from openai import OpenAI
6
+
7
+ from gmicloud import *
8
+
9
+
10
+ def create_artifact_with_file(client: Client) -> str:
11
+ artifact_manager = client.artifact_manager
12
+
13
+ # Create an artifact with a file
14
+ artifact_id = artifact_manager.create_artifact_with_file(
15
+ artifact_name="Llama3.1 8B",
16
+ artifact_file_path="./files/Llama-3.1-8B-Instruct.zip",
17
+ description="This is a test artifact",
18
+ tags=['example', 'test']
19
+ )
20
+
21
+ return artifact_id
22
+
23
+
24
+ def create_artifact_from_template(client: Client) -> str:
25
+ artifact_manager = client.artifact_manager
26
+
27
+ # Get all artifact templates
28
+ templates = artifact_manager.get_artifact_templates()
29
+ print(templates)
30
+ for template in templates:
31
+ if template.artifact_name == "Llama3.1 8B":
32
+ # Create an artifact from a template
33
+ artifact_id = artifact_manager.create_artifact_from_template(
34
+ artifact_template_id=template.artifact_template_id,
35
+ )
36
+
37
+ return artifact_id
38
+
39
+ return ""
40
+
41
+
42
+ def create_task_and_start(client: Client, artifact_id: str) -> str:
43
+ artifact_manager = client.artifact_manager
44
+ # Wait for the artifact to be ready
45
+ while True:
46
+ try:
47
+ artifact = artifact_manager.get_artifact(artifact_id)
48
+ print(f"Artifact status: {artifact.build_status}")
49
+ # Wait until the artifact is ready
50
+ if artifact.build_status == BuildStatus.SUCCESS:
51
+ break
52
+ except Exception as e:
53
+ raise e
54
+ # Wait for 2 seconds
55
+ time.sleep(2)
56
+ try:
57
+ task_manager = client.task_manager
58
+ # Create a task
59
+ task = task_manager.create_task(Task(
60
+ config=TaskConfig(
61
+ ray_task_config=RayTaskConfig(
62
+ ray_version="latest-py311-gpu",
63
+ file_path="serve",
64
+ artifact_id=artifact_id,
65
+ deployment_name="app",
66
+ replica_resource=ReplicaResource(
67
+ cpu=24,
68
+ ram_gb=128,
69
+ gpu=2,
70
+ ),
71
+ ),
72
+ task_scheduling=TaskScheduling(
73
+ scheduling_oneoff=OneOffScheduling(
74
+ trigger_timestamp=int(datetime.now().timestamp()) + 60,
75
+ min_replicas=1,
76
+ max_replicas=10,
77
+ )
78
+ ),
79
+ ),
80
+ ))
81
+
82
+ # Start the task
83
+ task_manager.start_task(task.task_id)
84
+ except Exception as e:
85
+ raise e
86
+
87
+ return task.task_id
88
+
89
+
90
+ def call_chat_completion(client: Client, task_id: str):
91
+ task_manager = client.task_manager
92
+ # Wait for the task to be ready
93
+ while True:
94
+ try:
95
+ task = task_manager.get_task(task_id)
96
+ print(f"task status: {task.task_status}")
97
+ # Wait until the task is ready
98
+ if task.task_status == "ready":
99
+ break
100
+ except Exception as e:
101
+ print(e)
102
+ return
103
+ # Wait for 2 seconds
104
+ time.sleep(2)
105
+
106
+ if not task.info.endpoint or not task.info.endpoint.strip():
107
+ raise Exception("Task endpoint is not ready yet")
108
+
109
+ open_ai = OpenAI(
110
+ api_key=os.getenv("OPENAI_API_KEY", "YOUR_DEFAULT_API_KEY"),
111
+ base_url=os.getenv("OPENAI_API_BASE", task.info.endpoint)
112
+ )
113
+ # Make a chat completion request using the new OpenAI client.
114
+ completion = open_ai.chat.completions.create(
115
+ model="meta-llama/Llama-3.1-8B-Instruct",
116
+ messages=[
117
+ {"role": "system", "content": "You are a helpful assistant."},
118
+ {"role": "user",
119
+ "content": f"Translate the sentences to Chinese"},
120
+ ],
121
+ max_tokens=200,
122
+ temperature=0.7
123
+ )
124
+
125
+ print(completion.choices[0].message.content)
126
+
127
+
128
+ if __name__ == '__main__':
129
+ # Initialize the Client
130
+ cli = Client()
131
+
132
+ # print(cli.artifact_manager.get_all_artifacts())
133
+
134
+ # Create an artifact with a file
135
+ # artifact_id = create_artifact_with_file(cli)
136
+
137
+ # Create an artifact from a template
138
+ #artifact_id = create_artifact_from_template(cli)
139
+ artifact_id = "cba6db2f-315a-4765-9e94-1e692f7fdb39"
140
+
141
+ # Create a task and start it
142
+ task_id = create_task_and_start(cli, artifact_id)
143
+
144
+ # Call chat completion
145
+ call_chat_completion(cli, task_id)
@@ -0,0 +1,41 @@
1
+ from ._internal._models import (
2
+ Artifact,
3
+ ArtifactData,
4
+ ArtifactMetadata,
5
+ Task,
6
+ TaskOwner,
7
+ TaskConfig,
8
+ TaskInfo,
9
+ RayTaskConfig,
10
+ TaskScheduling,
11
+ ReplicaResource,
12
+ OneOffScheduling,
13
+ DailyScheduling,
14
+ DailyTrigger,
15
+ ArtifactTemplate
16
+ )
17
+ from ._internal._enums import (
18
+ BuildStatus,
19
+ TaskEndpointStatus
20
+ )
21
+ from .client import Client
22
+
23
+ __all__ = [
24
+ "Client",
25
+ "Artifact",
26
+ "ArtifactData",
27
+ "ArtifactMetadata",
28
+ "Task",
29
+ "TaskOwner",
30
+ "TaskConfig",
31
+ "TaskInfo",
32
+ "RayTaskConfig",
33
+ "TaskScheduling",
34
+ "ReplicaResource",
35
+ "OneOffScheduling",
36
+ "DailyScheduling",
37
+ "DailyTrigger",
38
+ "ArtifactTemplate",
39
+ "BuildStatus",
40
+ "TaskEndpointStatus"
41
+ ]
File without changes
File without changes