aiauto-client 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiauto/http_client.py +13 -4
- aiauto/serializer.py +1 -2
- {aiauto_client-0.1.11.dist-info → aiauto_client-0.1.13.dist-info}/METADATA +26 -4
- aiauto_client-0.1.13.dist-info/RECORD +10 -0
- aiauto_client-0.1.11.dist-info/RECORD +0 -10
- {aiauto_client-0.1.11.dist-info → aiauto_client-0.1.13.dist-info}/WHEEL +0 -0
- {aiauto_client-0.1.11.dist-info → aiauto_client-0.1.13.dist-info}/top_level.txt +0 -0
aiauto/http_client.py
CHANGED
@@ -35,10 +35,19 @@ class ConnectRPCClient:
|
|
35
35
|
except requests.exceptions.HTTPError as e:
|
36
36
|
# Connect RPC error format
|
37
37
|
if e.response and e.response.headers.get('content-type', '').startswith('application/json'):
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
38
|
+
try:
|
39
|
+
error_data = e.response.json()
|
40
|
+
# Connect RPC returns error in 'message' field
|
41
|
+
error_msg = error_data.get('message', '')
|
42
|
+
if error_msg:
|
43
|
+
raise RuntimeError(f"Server error: {error_msg}") from e
|
44
|
+
# Fallback to full error data if no message
|
45
|
+
raise RuntimeError(f"Server error: {error_data}") from e
|
46
|
+
except ValueError:
|
47
|
+
# JSON decode failed
|
48
|
+
pass
|
49
|
+
# Fallback to basic HTTP error
|
50
|
+
raise RuntimeError(f"HTTP {e.response.status_code} error: {e.response.text if e.response else str(e)}") from e
|
42
51
|
except requests.exceptions.RequestException as e:
|
43
52
|
raise RuntimeError(f"Request failed: {e}") from e
|
44
53
|
|
aiauto/serializer.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: aiauto-client
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.13
|
4
4
|
Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
|
5
5
|
Author-email: AIAuto Team <ainode@zeroone.ai>
|
6
6
|
Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
|
@@ -73,6 +73,7 @@ for image in aiauto.RUNTIME_IMAGES:
|
|
73
73
|
- 아래 코드 처럼 발급한 token 을 넣어 AIAutoController singleton 객체를 초기화, OptunaWorkspace 를 활성화 시킨다
|
74
74
|
```python
|
75
75
|
import aiauto
|
76
|
+
import time
|
76
77
|
|
77
78
|
ac = aiauto.AIAutoController('<token>')
|
78
79
|
```
|
@@ -83,6 +84,7 @@ study_wrapper = ac.create_study(
|
|
83
84
|
study_name='test',
|
84
85
|
direction='maximize', # or 'minimize'
|
85
86
|
)
|
87
|
+
time.sleep(5)
|
86
88
|
```
|
87
89
|
- 아래 코드 처럼 생성한 study 애서 objective 함수를 작성하여 넘겨주면 optimize 를 호출하면 `https://dashboard.common.aiauto.pangyo.ainode.ai/trialbatch` 에서 확인할 수 있고 optuna-dashboard 링크에서도 확인 가능
|
88
90
|
```python
|
@@ -93,6 +95,7 @@ study_wrapper.optimize(
|
|
93
95
|
use_gpu=False,
|
94
96
|
runtime_image=aiauto.RUNTIME_IMAGES[0],
|
95
97
|
)
|
98
|
+
time.sleep(5)
|
96
99
|
```
|
97
100
|
- 종료 됐는지 optuna-dashboard 가 아닌 코드로 확인하는 법
|
98
101
|
```python
|
@@ -132,11 +135,14 @@ def objective(trial: optuna.trial.Trial):
|
|
132
135
|
- 저장한 함수를 import해서 사용
|
133
136
|
```python
|
134
137
|
import aiauto
|
138
|
+
import time
|
135
139
|
from my_objective import objective
|
136
140
|
|
137
141
|
ac = aiauto.AIAutoController('<token>')
|
138
142
|
study = ac.create_study('test', 'minimize')
|
143
|
+
time.sleep(5)
|
139
144
|
study.optimize(objective, n_trials=10, parallelism=2)
|
145
|
+
time.sleep(5)
|
140
146
|
```
|
141
147
|
|
142
148
|
## 빠른 시작
|
@@ -146,6 +152,7 @@ study.optimize(objective, n_trials=10, parallelism=2)
|
|
146
152
|
```python
|
147
153
|
import optuna
|
148
154
|
import aiauto
|
155
|
+
import time
|
149
156
|
|
150
157
|
|
151
158
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후 aiauto 의 token 을 발급
|
@@ -159,6 +166,7 @@ study_wrapper = ac.create_study(
|
|
159
166
|
direction="minimize"
|
160
167
|
# sampler=optuna.samplers.TPESampler(), # optuna 에서 제공하는 sampler 그대로 사용 가능, 참고 https://optuna.readthedocs.io/en/stable/reference/samplers/index.html
|
161
168
|
)
|
169
|
+
time.sleep(5)
|
162
170
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
|
163
171
|
|
164
172
|
# objective 함수 정의
|
@@ -174,6 +182,7 @@ study_wrapper.optimize(
|
|
174
182
|
n_trials=100,
|
175
183
|
parallelism=4 # 동시 실행 Pod 수
|
176
184
|
)
|
185
|
+
time.sleep(5)
|
177
186
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai/workspace` 에서 생성된 optuna-dashboard 링크에서 결과 확인 가능
|
178
187
|
```
|
179
188
|
|
@@ -182,6 +191,7 @@ study_wrapper.optimize(
|
|
182
191
|
```python
|
183
192
|
import optuna
|
184
193
|
import aiauto
|
194
|
+
import time
|
185
195
|
|
186
196
|
|
187
197
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후 aiauto 의 token 을 발급
|
@@ -199,6 +209,7 @@ study_wrapper = ac.create_study(
|
|
199
209
|
patience=4,
|
200
210
|
),
|
201
211
|
)
|
212
|
+
time.sleep(5)
|
202
213
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
|
203
214
|
|
204
215
|
# objective 함수 정의
|
@@ -306,6 +317,7 @@ study_wrapper.optimize(
|
|
306
317
|
use_gpu=True, # GPU 사용
|
307
318
|
requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
|
308
319
|
)
|
320
|
+
time.sleep(5)
|
309
321
|
```
|
310
322
|
|
311
323
|
### 3. Multi-Objective 최적화 (Accuracy + FLOPS)
|
@@ -313,6 +325,7 @@ study_wrapper.optimize(
|
|
313
325
|
```python
|
314
326
|
import optuna
|
315
327
|
import aiauto
|
328
|
+
import time
|
316
329
|
|
317
330
|
|
318
331
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후 aiauto 의 token 을 발급
|
@@ -326,6 +339,7 @@ study_wrapper = ac.create_study(
|
|
326
339
|
direction=["minimize", "minimize"], # loss minimize, FLOPS minimize
|
327
340
|
# sampler=optuna.samplers.TPESampler(), # optuna 에서 제공하는 sampler 그대로 사용 가능, 참고 https://optuna.readthedocs.io/en/stable/reference/samplers/index.html
|
328
341
|
)
|
342
|
+
time.sleep(5)
|
329
343
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
|
330
344
|
|
331
345
|
# objective 함수 정의
|
@@ -434,6 +448,7 @@ study_wrapper.optimize(
|
|
434
448
|
use_gpu=True, # GPU 사용
|
435
449
|
requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
|
436
450
|
)
|
451
|
+
time.sleep(5)
|
437
452
|
```
|
438
453
|
|
439
454
|
### 4. Ask/Tell 패턴 및 Optuna 자체의 Study
|
@@ -441,6 +456,7 @@ study_wrapper.optimize(
|
|
441
456
|
```python
|
442
457
|
import optuna
|
443
458
|
import aiauto
|
459
|
+
import time
|
444
460
|
|
445
461
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후 aiauto 의 token 을 발급
|
446
462
|
# AIAutoController singleton 객체를 초기화 하여, OptunaWorkspace 를 활성화 시킨다 (토큰은 한 번만 설정)
|
@@ -457,6 +473,7 @@ study_wrapper = ac.create_study(
|
|
457
473
|
# patience=4,
|
458
474
|
# )
|
459
475
|
)
|
476
|
+
time.sleep(5)
|
460
477
|
# `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
|
461
478
|
|
462
479
|
# 실제 optuna.Study 객체 획득 (로컬에서 ask/tell 가능)
|
@@ -521,10 +538,11 @@ make build push
|
|
521
538
|
|
522
539
|
#### (A) 분산 실행
|
523
540
|
```python
|
524
|
-
import aiauto, optuna
|
541
|
+
import aiauto, optuna, time
|
525
542
|
|
526
543
|
controller = aiauto.AIAutoController('aiauto_xxx')
|
527
544
|
study = controller.create_study('exp1', direction='minimize')
|
545
|
+
time.sleep(5)
|
528
546
|
|
529
547
|
def objective(trial):
|
530
548
|
import numpy as np
|
@@ -537,14 +555,16 @@ n_trials=64,
|
|
537
555
|
parallelism=8,
|
538
556
|
requirements_list=['numpy'],
|
539
557
|
)
|
558
|
+
time.sleep(5)
|
540
559
|
```
|
541
560
|
|
542
561
|
#### (B) ask/tell (실제 optuna.Study 사용)
|
543
562
|
```python
|
544
|
-
import aiauto, optuna
|
563
|
+
import aiauto, optuna, time
|
545
564
|
|
546
565
|
controller = aiauto.AIAutoController('aiauto_xxx')
|
547
566
|
sw = controller.create_study('manual', direction='minimize')
|
567
|
+
time.sleep(5)
|
548
568
|
|
549
569
|
real = sw.get_study() # 실제 optuna.Study 로드 (gRPC: h2c 13000)
|
550
570
|
t = real.ask()
|
@@ -576,7 +596,7 @@ real.tell(t, (x - 2) ** 2)
|
|
576
596
|
|
577
597
|
#### 예시: Sampler/Pruner 그대로 사용
|
578
598
|
```python
|
579
|
-
import optuna, aiauto
|
599
|
+
import optuna, aiauto, time
|
580
600
|
|
581
601
|
controller = aiauto.AIAutoController('aiauto_xxx')
|
582
602
|
study = controller.create_study(
|
@@ -585,6 +605,7 @@ direction='minimize',
|
|
585
605
|
sampler=optuna.samplers.TPESampler(seed=42),
|
586
606
|
pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
|
587
607
|
)
|
608
|
+
time.sleep(5)
|
588
609
|
|
589
610
|
def objective(trial):
|
590
611
|
import numpy as np
|
@@ -592,6 +613,7 @@ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
|
|
592
613
|
return (np.log10(lr) + 2) ** 2
|
593
614
|
|
594
615
|
study.optimize(objective, n_trials=50, parallelism=4)
|
616
|
+
time.sleep(5)
|
595
617
|
```
|
596
618
|
|
597
619
|
#### 예시: get_study() 후 Optuna API 그대로 사용
|
@@ -0,0 +1,10 @@
|
|
1
|
+
aiauto/__init__.py,sha256=sF7sJaXg7-MqolSYLxsaXAir1dBzARhXLrHo7zLsupg,345
|
2
|
+
aiauto/_config.py,sha256=hTFh2bH9m-HuX6QCpNtBC0j6rEB0S97hhPKjbEjv4Tg,89
|
3
|
+
aiauto/constants.py,sha256=rBibGOQHHrdkwaai92-3I8-N0cu-B4CoCoQbG9-Cl8k,821
|
4
|
+
aiauto/core.py,sha256=eEwit5oL8DIfglOVe2km_7MAtuZquEd5Xvkbq6EaW9o,9945
|
5
|
+
aiauto/http_client.py,sha256=v_nPdb-2tIeH1XrOYqzMGvFfXLKEDbQoSaQYPsB0Hik,2587
|
6
|
+
aiauto/serializer.py,sha256=nT2F-Jyrd_3uw1QhlrfMx8vprNsL7OF7MCJYBKhkwoY,1905
|
7
|
+
aiauto_client-0.1.13.dist-info/METADATA,sha256=21_Kdp2DmLjhe5dR2K4F5UqbvJ229oRsvnk5fEKwcJA,25022
|
8
|
+
aiauto_client-0.1.13.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
9
|
+
aiauto_client-0.1.13.dist-info/top_level.txt,sha256=Sk2ctO9_Bf_tAPwq1x6Vfl6OuL29XzwMTO4F_KG6oJE,7
|
10
|
+
aiauto_client-0.1.13.dist-info/RECORD,,
|
@@ -1,10 +0,0 @@
|
|
1
|
-
aiauto/__init__.py,sha256=sF7sJaXg7-MqolSYLxsaXAir1dBzARhXLrHo7zLsupg,345
|
2
|
-
aiauto/_config.py,sha256=hTFh2bH9m-HuX6QCpNtBC0j6rEB0S97hhPKjbEjv4Tg,89
|
3
|
-
aiauto/constants.py,sha256=rBibGOQHHrdkwaai92-3I8-N0cu-B4CoCoQbG9-Cl8k,821
|
4
|
-
aiauto/core.py,sha256=eEwit5oL8DIfglOVe2km_7MAtuZquEd5Xvkbq6EaW9o,9945
|
5
|
-
aiauto/http_client.py,sha256=gVDlgnqjC6FcAbZ4rsjzZHfkusotixOShDCdWBdd-sk,2100
|
6
|
-
aiauto/serializer.py,sha256=KqQeH0xp4LQuZE6r8kzXQsWY6QgC3hqn8MSuWTt4QmU,1938
|
7
|
-
aiauto_client-0.1.11.dist-info/METADATA,sha256=QmBI6pLnq8uRzKbBp56g0ypWF4Y4Avn_oxhD3Zj9Lqk,24708
|
8
|
-
aiauto_client-0.1.11.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
9
|
-
aiauto_client-0.1.11.dist-info/top_level.txt,sha256=Sk2ctO9_Bf_tAPwq1x6Vfl6OuL29XzwMTO4F_KG6oJE,7
|
10
|
-
aiauto_client-0.1.11.dist-info/RECORD,,
|
File without changes
|
File without changes
|