aiauto-client 0.1.18__tar.gz → 0.1.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {aiauto_client-0.1.18/src/aiauto_client.egg-info → aiauto_client-0.1.20}/PKG-INFO +8 -5
  2. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/README.md +7 -4
  3. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/examples/example_simple.py +4 -0
  4. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/examples/example_torch_multiple_objective.py +14 -4
  5. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/examples/example_torch_single_objective.py +8 -0
  6. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/pyproject.toml +1 -1
  7. {aiauto_client-0.1.18 → aiauto_client-0.1.20/src/aiauto_client.egg-info}/PKG-INFO +8 -5
  8. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/MANIFEST.in +0 -0
  9. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/setup.cfg +0 -0
  10. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto/__init__.py +0 -0
  11. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto/_config.py +0 -0
  12. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto/constants.py +0 -0
  13. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto/core.py +0 -0
  14. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto/http_client.py +0 -0
  15. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto/serializer.py +0 -0
  16. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto_client.egg-info/SOURCES.txt +0 -0
  17. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto_client.egg-info/dependency_links.txt +0 -0
  18. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto_client.egg-info/requires.txt +0 -0
  19. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/src/aiauto_client.egg-info/top_level.txt +0 -0
  20. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/tests/test_pruners.py +0 -0
  21. {aiauto_client-0.1.18 → aiauto_client-0.1.20}/tests/test_samplers.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.18
3
+ Version: 0.1.20
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
@@ -67,7 +67,7 @@ for image in aiauto.RUNTIME_IMAGES:
67
67
  ```
68
68
 
69
69
  ## 실행 흐름
70
- ### token 발급 # TODO
70
+ ### token 발급
71
71
  - `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후
72
72
  - `https://dashboard.common.aiauto.pangyo.ainode.ai/token` 으로 이동하여 aiauto 의 token 을 발급
73
73
  - 아래 코드 처럼 발급한 token 을 넣어 AIAutoController singleton 객체를 초기화, OptunaWorkspace 를 활성화 시킨다
@@ -101,12 +101,15 @@ time.sleep(5)
101
101
  ```python
102
102
  study_wrapper.get_status()
103
103
  # {'study_name': 'test', 'count_active': 0, 'count_succeeded': 10, 'count_pruned': 0, 'count_failed': 0, 'count_total': 10, 'count_completed': 10, 'dashboard_url': 'https://optuna-dashboard-10f804bb-52be-48e8-aa06-9f5411ed4b0d.aiauto.pangyo.ainode.ai', 'last_error': '', 'updated_at': '2025-09-01T11:31:49.375Z'}
104
- while study_wrapper.get_status()['count_completed'] <= study_wrapper.get_status()['count_total']:
105
- sleep(10) # 10 마다 한 번 씩
104
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
105
+ time.sleep(10) # 10초마다 확인
106
106
  ```
107
107
  - best trial 을 가져오는 법
108
108
  ```python
109
- TODO
109
+ # 진짜 optuna study 를 받아옴
110
+ real_study = study_wrapper.get_study()
111
+ best_trial = real_study.best_trial
112
+ print(best_trial.params)
110
113
  ```
111
114
 
112
115
  ## Jupyter Notebook 사용 시 주의사항
@@ -40,7 +40,7 @@ for image in aiauto.RUNTIME_IMAGES:
40
40
  ```
41
41
 
42
42
  ## 실행 흐름
43
- ### token 발급 # TODO
43
+ ### token 발급
44
44
  - `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후
45
45
  - `https://dashboard.common.aiauto.pangyo.ainode.ai/token` 으로 이동하여 aiauto 의 token 을 발급
46
46
  - 아래 코드 처럼 발급한 token 을 넣어 AIAutoController singleton 객체를 초기화, OptunaWorkspace 를 활성화 시킨다
@@ -74,12 +74,15 @@ time.sleep(5)
74
74
  ```python
75
75
  study_wrapper.get_status()
76
76
  # {'study_name': 'test', 'count_active': 0, 'count_succeeded': 10, 'count_pruned': 0, 'count_failed': 0, 'count_total': 10, 'count_completed': 10, 'dashboard_url': 'https://optuna-dashboard-10f804bb-52be-48e8-aa06-9f5411ed4b0d.aiauto.pangyo.ainode.ai', 'last_error': '', 'updated_at': '2025-09-01T11:31:49.375Z'}
77
- while study_wrapper.get_status()['count_completed'] <= study_wrapper.get_status()['count_total']:
78
- sleep(10) # 10 마다 한 번 씩
77
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
78
+ time.sleep(10) # 10초마다 확인
79
79
  ```
80
80
  - best trial 을 가져오는 법
81
81
  ```python
82
- TODO
82
+ # 진짜 optuna study 를 받아옴
83
+ real_study = study_wrapper.get_study()
84
+ best_trial = real_study.best_trial
85
+ print(best_trial.params)
83
86
  ```
84
87
 
85
88
  ## Jupyter Notebook 사용 시 주의사항
@@ -75,6 +75,10 @@ if __name__ == '__main__':
75
75
  )
76
76
  time.sleep(5)
77
77
 
78
+ # 최적화가 끝날 때까지 대기
79
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
80
+ time.sleep(10) # 10초마다 확인
81
+
78
82
  study = study_wrapper.get_study()
79
83
 
80
84
  print('\nBest trials:')
@@ -136,7 +136,7 @@ def objective_multi(trial):
136
136
 
137
137
  # 데이터 샘플링 옵션 - 튜닝 시에만 사용
138
138
  data_fraction_number = trial.suggest_categorical('data_fraction_number', [4, 8])
139
- data_subset_idx = trial.suggest_int('data_subset_idx', 0, data_fraction - 1)
139
+ data_subset_idx = trial.suggest_int('data_subset_idx', 0, data_fraction_number - 1)
140
140
 
141
141
  tc.log(f'data_fraction_number={data_fraction_number}, data_subset_idx={data_subset_idx}')
142
142
 
@@ -448,7 +448,7 @@ if __name__ == '__main__':
448
448
 
449
449
  # ========================= subset data optimize ===========================
450
450
  study_wrapper.optimize(
451
- objective_multi(),
451
+ objective_multi,
452
452
  n_trials=100,
453
453
  parallelism=4, # n_jobs 대신 parallelism 사용
454
454
  use_gpu=True, # GPU 사용
@@ -467,6 +467,10 @@ if __name__ == '__main__':
467
467
  )
468
468
  time.sleep(5)
469
469
 
470
+ # 최적화가 끝날 때까지 대기
471
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
472
+ time.sleep(10) # 10초마다 확인
473
+
470
474
  study = study_wrapper.get_study()
471
475
 
472
476
  for trial in study.best_trials[:5]: # 상위 5개만
@@ -477,8 +481,10 @@ if __name__ == '__main__':
477
481
  print()
478
482
 
479
483
  # ========================== full data optimize ============================
480
- # best_trial 파라미터를 사용해서 전체 데이터로 학습
481
- study.enqueue_trial(study.best_trial)
484
+ # best_trials 중에서 사용자가 원하는 trial 선택해서 전체 데이터로 학습
485
+ # directions가 두 개이므로 accuracy가 가장 높은 trial 또는 FLOPS가 가장 낮은 trial에서 선택
486
+ selected_trial = study.best_trials[0] # 첫 번째 Pareto optimal trial 선택
487
+ study.enqueue_trial(selected_trial)
482
488
  study.optimize(
483
489
  objective_detailed,
484
490
  n_trials=1, # enqueue 한 만큼만 실행
@@ -491,3 +497,7 @@ if __name__ == '__main__':
491
497
  "memory": "4Gi",
492
498
  },
493
499
  )
500
+
501
+ # 전체 데이터 학습이 끝날 때까지 대기
502
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
503
+ time.sleep(10) # 10초마다 확인
@@ -471,6 +471,10 @@ if __name__ == '__main__':
471
471
  )
472
472
  time.sleep(5)
473
473
 
474
+ # 최적화가 끝날 때까지 대기
475
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
476
+ time.sleep(10) # 10초마다 확인
477
+
474
478
  study = study_wrapper.get_study()
475
479
 
476
480
  print('\nBest trials:')
@@ -495,3 +499,7 @@ if __name__ == '__main__':
495
499
  "memory": "4Gi",
496
500
  },
497
501
  )
502
+
503
+ # 전체 데이터 학습이 끝날 때까지 대기
504
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
505
+ time.sleep(10) # 10초마다 확인
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "aiauto-client"
7
- version = "0.1.18"
7
+ version = "0.1.20"
8
8
  description = "AI Auto HPO (Hyperparameter Optimization) Client Library"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.18
3
+ Version: 0.1.20
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
@@ -67,7 +67,7 @@ for image in aiauto.RUNTIME_IMAGES:
67
67
  ```
68
68
 
69
69
  ## 실행 흐름
70
- ### token 발급 # TODO
70
+ ### token 발급
71
71
  - `https://dashboard.common.aiauto.pangyo.ainode.ai` 에 접속하여 ainode 에 로그인 한 후
72
72
  - `https://dashboard.common.aiauto.pangyo.ainode.ai/token` 으로 이동하여 aiauto 의 token 을 발급
73
73
  - 아래 코드 처럼 발급한 token 을 넣어 AIAutoController singleton 객체를 초기화, OptunaWorkspace 를 활성화 시킨다
@@ -101,12 +101,15 @@ time.sleep(5)
101
101
  ```python
102
102
  study_wrapper.get_status()
103
103
  # {'study_name': 'test', 'count_active': 0, 'count_succeeded': 10, 'count_pruned': 0, 'count_failed': 0, 'count_total': 10, 'count_completed': 10, 'dashboard_url': 'https://optuna-dashboard-10f804bb-52be-48e8-aa06-9f5411ed4b0d.aiauto.pangyo.ainode.ai', 'last_error': '', 'updated_at': '2025-09-01T11:31:49.375Z'}
104
- while study_wrapper.get_status()['count_completed'] <= study_wrapper.get_status()['count_total']:
105
- sleep(10) # 10 마다 한 번 씩
104
+ while study_wrapper.get_status()['count_completed'] < study_wrapper.get_status()['count_total']:
105
+ time.sleep(10) # 10초마다 확인
106
106
  ```
107
107
  - best trial 을 가져오는 법
108
108
  ```python
109
- TODO
109
+ # 진짜 optuna study 를 받아옴
110
+ real_study = study_wrapper.get_study()
111
+ best_trial = real_study.best_trial
112
+ print(best_trial.params)
110
113
  ```
111
114
 
112
115
  ## Jupyter Notebook 사용 시 주의사항
File without changes