aiauto-client 0.1.0__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://aiauto.cloude.ainode.ai
@@ -21,9 +21,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
21
  Requires-Python: >=3.8
22
22
  Description-Content-Type: text/markdown
23
23
  Requires-Dist: optuna>=3.0.0
24
- Requires-Dist: grpcio>=1.50.0
25
- Requires-Dist: grpcio-tools>=1.50.0
26
- Requires-Dist: protobuf>=4.0.0
24
+ Requires-Dist: requests>=2.25.0
27
25
 
28
26
  # AIAuto - Hyperparameter Optimization Client Library
29
27
 
@@ -31,44 +29,80 @@ AIAuto는 Kubernetes 기반의 분산 HPO(Hyperparameter Optimization) 시스템
31
29
  사용자 python lib <-> Next.js 서버 사이 gRPC 통신 담당
32
30
 
33
31
  ## lib build
34
- - pypi build, upload 종속성 다운로드 `pip install build twine`
35
- - build lib `python -m build --wheel --sdist`
36
- - `aiauto-0.1.0.whl` 생성
37
- - `aiauto-0.1.0.tar.gz` 생성
38
- - `aiauto.egg-info` 생성
39
- - `twine upload --repository testpypi dist/*`
32
+ - pypi build, upload 종속성 다운로드 `uv add --dev twine`
33
+ - build lib `uv build`
34
+ - `aiauto_client-0.1.1-py3-none-any.whl` 생성
35
+ - `aiauto_client-0.1.1.tar.gz` 생성
36
+ - `aiauto_client.egg-info` 생성
37
+ - `~/.pypirc` 파일에 설정 확인
38
+ - `twine upload --repository aiauto-client dist/*`
40
39
  - `twine upload dist/*`
41
40
  - upload 시 pypi token 을 입력하라고 나옴, pypi 로그인 계정 설정가면 있다
42
41
 
43
42
  ## 설치
44
- - `pip install aiauto`
43
+ - `uv add aiauto-client`
45
44
 
46
45
  ## 빠른 시작
46
+
47
+ ### Study 생성 및 Ask/Tell 패턴
47
48
  ```python
48
49
  import aiauto
49
50
 
50
- # 컨트롤러 초기화
51
- ac = aiauto.AIAutoController()
51
+ # StudyWrapper 생성 (JWT 토큰 필요)
52
+ studyWrapper = aiauto.create_study(
53
+ study_name='my_optimization',
54
+ token='your_jwt_token',
55
+ direction='maximize'
56
+ )
57
+
58
+ # 실제 optuna.Study 객체 획득 (로컬에서 ask/tell 가능)
59
+ study = studyWrapper.get_study()
60
+
61
+ # Ask/Tell 패턴으로 최적화
62
+ trial = study.ask()
63
+ params = trial.params
64
+
65
+ # 사용자 모델 학습
66
+ accuracy = train_model(params)
67
+
68
+ # 결과 보고
69
+ study.tell(trial, accuracy)
70
+ ```
71
+
72
+ ### 분산 최적화 (Pod 실행)
73
+ ```python
74
+ import aiauto
52
75
 
53
- # Objective 함수 정의
54
76
  def objective(trial):
55
77
  tc = aiauto.TrialController(trial)
56
78
 
57
79
  # 하이퍼파라미터 샘플링
58
80
  lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
81
+ batch_size = trial.suggest_int('batch_size', 16, 128)
59
82
 
60
- # 모델 학습 및 평가 로직
61
- # ...
62
- tc.log(f'full dataset: train {len(dataset)}, test {len(dataset_test)}, batch_size {batch_size}')
83
+ # 모델 학습 로직
84
+ accuracy = train_model(lr, batch_size)
85
+
86
+ tc.log(f'lr: {lr}, batch_size: {batch_size}, accuracy: {accuracy}')
63
87
 
64
88
  return accuracy
65
89
 
66
- # Study 생성 및 최적화 실행
67
- study = optuna.create_study(
68
- study_name='my_optimization',
69
- storage=ac.get_storage(),
90
+ # StudyWrapper 생성
91
+ studyWrapper = aiauto.create_study(
92
+ study_name='distributed_optimization',
93
+ token='your_jwt_token',
70
94
  direction='maximize'
71
95
  )
72
96
 
73
- study.optimize(objective, n_trials=100)
97
+ # 분산 최적화 실행 (Kubernetes Pod에서 실행)
98
+ studyWrapper.optimize(
99
+ objective=objective,
100
+ n_trials=100,
101
+ parallelism=4,
102
+ requirements_list=['torch==2.0.0', 'torchvision==0.15.0']
103
+ )
104
+
105
+ # 실시간 상태 모니터링
106
+ status = studyWrapper.get_status()
107
+ print(f"Active: {status['count_active']}, Completed: {status['count_completed']}")
74
108
  ```
@@ -0,0 +1,83 @@
1
+ # AIAuto - Hyperparameter Optimization Client Library
2
+
3
+ AIAuto는 Kubernetes 기반의 분산 HPO(Hyperparameter Optimization) 시스템을 위한 클라이언트 라이브러리입니다.
4
+ 사용자 python lib <-> Next.js 서버 사이 gRPC 통신 담당
5
+
6
+ ## lib build
7
+ - pypi build, upload 종속성 다운로드 `uv add --dev twine`
8
+ - build lib `uv build`
9
+ - `aiauto_client-0.1.1-py3-none-any.whl` 생성
10
+ - `aiauto_client-0.1.1.tar.gz` 생성
11
+ - `aiauto_client.egg-info` 생성
12
+ - `~/.pypirc` 파일에 설정 확인
13
+ - `twine upload --repository aiauto-client dist/*`
14
+ - `twine upload dist/*`
15
+ - upload 시 pypi token 을 입력하라고 나옴, pypi 로그인 계정 설정가면 있다
16
+
17
+ ## 설치
18
+ - `uv add aiauto-client`
19
+
20
+ ## 빠른 시작
21
+
22
+ ### Study 생성 및 Ask/Tell 패턴
23
+ ```python
24
+ import aiauto
25
+
26
+ # StudyWrapper 생성 (JWT 토큰 필요)
27
+ studyWrapper = aiauto.create_study(
28
+ study_name='my_optimization',
29
+ token='your_jwt_token',
30
+ direction='maximize'
31
+ )
32
+
33
+ # 실제 optuna.Study 객체 획득 (로컬에서 ask/tell 가능)
34
+ study = studyWrapper.get_study()
35
+
36
+ # Ask/Tell 패턴으로 최적화
37
+ trial = study.ask()
38
+ params = trial.params
39
+
40
+ # 사용자 모델 학습
41
+ accuracy = train_model(params)
42
+
43
+ # 결과 보고
44
+ study.tell(trial, accuracy)
45
+ ```
46
+
47
+ ### 분산 최적화 (Pod 실행)
48
+ ```python
49
+ import aiauto
50
+
51
+ def objective(trial):
52
+ tc = aiauto.TrialController(trial)
53
+
54
+ # 하이퍼파라미터 샘플링
55
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
56
+ batch_size = trial.suggest_int('batch_size', 16, 128)
57
+
58
+ # 모델 학습 로직
59
+ accuracy = train_model(lr, batch_size)
60
+
61
+ tc.log(f'lr: {lr}, batch_size: {batch_size}, accuracy: {accuracy}')
62
+
63
+ return accuracy
64
+
65
+ # StudyWrapper 생성
66
+ studyWrapper = aiauto.create_study(
67
+ study_name='distributed_optimization',
68
+ token='your_jwt_token',
69
+ direction='maximize'
70
+ )
71
+
72
+ # 분산 최적화 실행 (Kubernetes Pod에서 실행)
73
+ studyWrapper.optimize(
74
+ objective=objective,
75
+ n_trials=100,
76
+ parallelism=4,
77
+ requirements_list=['torch==2.0.0', 'torchvision==0.15.0']
78
+ )
79
+
80
+ # 실시간 상태 모니터링
81
+ status = studyWrapper.get_status()
82
+ print(f"Active: {status['count_active']}, Completed: {status['count_completed']}")
83
+ ```
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "aiauto-client"
7
- version = "0.1.0"
7
+ version = "0.1.1"
8
8
  description = "AI Auto HPO (Hyperparameter Optimization) Client Library"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -27,9 +27,7 @@ classifiers = [
27
27
  ]
28
28
  dependencies = [
29
29
  "optuna>=3.0.0",
30
- "grpcio>=1.50.0",
31
- "grpcio-tools>=1.50.0",
32
- "protobuf>=4.0.0",
30
+ "requests>=2.25.0",
33
31
  ]
34
32
 
35
33
  [project.urls]
@@ -58,3 +56,9 @@ testpaths = ["tests"]
58
56
  python_files = ["test_*.py", "*_test.py"]
59
57
  python_classes = ["Test*"]
60
58
  python_functions = ["test_*"]
59
+
60
+ [dependency-groups]
61
+ dev = [
62
+ "pytest>=8.3.5",
63
+ "twine>=6.1.0",
64
+ ]
@@ -0,0 +1,16 @@
1
+ from .core import AIAutoController, TrialController, CallbackTopNArtifact, StudyWrapper
2
+ from .api import create_study
3
+ from ._config import AIAUTO_API_TARGET
4
+ from .constants import RUNTIME_IMAGES
5
+
6
+ __version__ = "0.1.0"
7
+
8
+ __all__ = [
9
+ 'AIAutoController',
10
+ 'TrialController',
11
+ 'CallbackTopNArtifact',
12
+ 'StudyWrapper',
13
+ 'create_study',
14
+ 'AIAUTO_API_TARGET',
15
+ 'RUNTIME_IMAGES',
16
+ ]
@@ -0,0 +1,3 @@
1
+ # For Connect RPC over HTTP
2
+ # This will be converted to https://api.aiauto.cloud.ainode.ai in production
3
+ AIAUTO_API_TARGET = "api.aiauto.cloud.ainode.ai:443"
@@ -0,0 +1,46 @@
1
+ from typing import Optional, List, Union
2
+ from .serializer import object_to_json
3
+ from .core import StudyWrapper, AIAutoController
4
+
5
+
6
+ def create_study(
7
+ study_name: str,
8
+ token: str,
9
+ direction: Optional[str] = None,
10
+ directions: Optional[List[str]] = None,
11
+ sampler: Union[object, dict, None] = None,
12
+ pruner: Union[object, dict, None] = None
13
+ ) -> StudyWrapper:
14
+ if not direction and not directions:
15
+ raise ValueError("Either 'direction' or 'directions' must be specified")
16
+
17
+ if direction and directions:
18
+ raise ValueError("Cannot specify both 'direction' and 'directions'")
19
+
20
+ try:
21
+ # Initialize controller (which ensures workspace)
22
+ controller = AIAutoController(token)
23
+
24
+ # Prepare request data for CreateStudy
25
+ request_data = {
26
+ "spec": {
27
+ "studyName": study_name,
28
+ "direction": direction or "",
29
+ "directions": directions or [],
30
+ "samplerJson": object_to_json(sampler),
31
+ "prunerJson": object_to_json(pruner)
32
+ }
33
+ }
34
+
35
+ # Call CreateStudy RPC
36
+ response = controller.client.call_rpc("CreateStudy", request_data)
37
+
38
+ # Return StudyWrapper
39
+ return StudyWrapper(
40
+ study_name=response.get("studyName", study_name),
41
+ storage=controller.storage,
42
+ controller=controller
43
+ )
44
+
45
+ except Exception as e:
46
+ raise RuntimeError(f"Failed to create study: {e}") from e
@@ -0,0 +1,28 @@
1
+ # Runtime image constants for AIAuto
2
+ # These images are pre-tested and guaranteed to work with AIAuto
3
+
4
+ # Available runtime images
5
+ RUNTIME_IMAGES = [
6
+ # CPU Images
7
+ "ghcr.io/astral-sh/uv:python3.8-bookworm-slim",
8
+ "ghcr.io/astral-sh/uv:python3.9-bookworm-slim",
9
+ "ghcr.io/astral-sh/uv:python3.10-bookworm-slim",
10
+ "ghcr.io/astral-sh/uv:python3.11-bookworm-slim",
11
+ "ghcr.io/astral-sh/uv:python3.12-bookworm-slim",
12
+
13
+ # GPU Images (PyTorch)
14
+ "pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime",
15
+ "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime",
16
+ "pytorch/pytorch:2.4.0-cuda12.4-cudnn9-runtime",
17
+
18
+ # GPU Images (TensorFlow)
19
+ "tensorflow/tensorflow:2.15.0-gpu",
20
+ "tensorflow/tensorflow:2.15.0-gpu-jupyter",
21
+
22
+ # JAX Images
23
+ "nvcr.io/nvidia/jax:23.10-py3",
24
+ "nvcr.io/nvidia/jax:24.04-py3",
25
+
26
+ # Custom/Legacy images
27
+ "ghcr.io/01ai/zipline:latest", # Custom zipline trading library
28
+ ]
@@ -0,0 +1,197 @@
1
+ from os import makedirs
2
+ import tempfile
3
+ from typing import Union, Optional, List, Dict, Callable
4
+ import optuna
5
+ from .http_client import ConnectRPCClient
6
+ from .serializer import serialize, build_requirements
7
+ from ._config import AIAUTO_API_TARGET
8
+
9
+
10
+ class AIAutoController:
11
+ _instances = {}
12
+
13
+ def __new__(cls, token: str):
14
+ if token not in cls._instances:
15
+ cls._instances[token] = super().__new__(cls)
16
+ return cls._instances[token]
17
+
18
+ def __init__(self, token: str):
19
+ if hasattr(self, 'token') and self.token == token:
20
+ return
21
+
22
+ self.token = token
23
+ self.client = ConnectRPCClient(token)
24
+
25
+ # EnsureWorkspace 호출해서 journal_grpc_storage_proxy_host_external 받아와서 storage 초기화
26
+ try:
27
+ response = self.client.call_rpc("EnsureWorkspace", {})
28
+
29
+ # 받아온 journal_grpc_storage_proxy_host_external로 storage 초기화
30
+ host_external = response.get('journalGrpcStorageProxyHostExternal', '')
31
+ if not host_external:
32
+ raise RuntimeError("No storage host returned from EnsureWorkspace")
33
+
34
+ host, port = host_external.split(':')
35
+ self.storage = optuna.storages.GrpcStorageProxy(host=host, port=int(port))
36
+
37
+ # Store the internal host for CRD usage (if needed later)
38
+ self.storage_host_internal = response.get('journalGrpcStorageProxyHostInternal', '')
39
+ self.dashboard_url = response.get('dashboardUrl', '')
40
+
41
+ except Exception as e:
42
+ raise RuntimeError(f"Failed to initialize workspace: {e}") from e
43
+
44
+ # artifact storage
45
+ makedirs('./artifacts', exist_ok=True)
46
+ self.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
47
+ self.tmp_dir = tempfile.mkdtemp(prefix=f'ai_auto_tmp_')
48
+
49
+ def get_storage(self):
50
+ return self.storage
51
+
52
+ def get_artifact_store(self) -> Union[
53
+ optuna.artifacts.FileSystemArtifactStore,
54
+ optuna.artifacts.Boto3ArtifactStore,
55
+ optuna.artifacts.GCSArtifactStore,
56
+ ]:
57
+ return self.artifact_store
58
+
59
+ def get_artifact_tmp_dir(self):
60
+ return self.tmp_dir
61
+
62
+
63
+ class TrialController:
64
+ def __init__(self, trial: optuna.trial.Trial):
65
+ self.trial = trial
66
+ self.logger = optuna.logging.get_logger("optuna")
67
+ self.logs = []
68
+
69
+ def get_trial(self) -> optuna.trial.Trial:
70
+ return self.trial
71
+
72
+ def log(self, value: str):
73
+ # optuna dashboard 에 log 를 확인하는 기능이 없어서 user_attribute 에 log를 확인할 수 있게 추가
74
+ self.logs.append(value)
75
+ self.trial.set_user_attr('logs', ' '.join([f"[{i+1:05d}] {log}" for i, log in enumerate(self.logs)]))
76
+ # 실제 log 를 trial_number 랑 같이 확인할 수 있게
77
+ self.logger.info(f'\ntrial_number: {self.trial.number}, {value}')
78
+
79
+
80
+ # 용량 제한으로 상위 N개의 trial artifact 만 유지
81
+ class CallbackTopNArtifact:
82
+ def __init__(
83
+ self,
84
+ artifact_store: Union[
85
+ optuna.artifacts.FileSystemArtifactStore,
86
+ optuna.artifacts.Boto3ArtifactStore,
87
+ optuna.artifacts.GCSArtifactStore,
88
+ ],
89
+ artifact_attr_name: str = 'artifact_id',
90
+ n_keep: int = 5,
91
+ ):
92
+ self.artifact_store = artifact_store
93
+ self.check_attr_name = artifact_attr_name
94
+ self.n_keep = n_keep
95
+
96
+ def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial):
97
+ # COMPLETE 상태이고 artifact를 가진 trial들만 정렬
98
+ finished_with_artifacts = [
99
+ t for t in study.trials
100
+ if t.state == optuna.trial.TrialState.COMPLETE and self.check_attr_name in t.user_attrs
101
+ ]
102
+
103
+ # 방향에 따라 정렬 (maximize면 내림차순, minimize면 오름차순)
104
+ reverse_sort = study.direction == optuna.study.StudyDirection.MAXIMIZE
105
+ finished_with_artifacts.sort(key=lambda t: t.value, reverse=reverse_sort)
106
+
107
+ # 상위 n_keep개 초과하는 trial들의 artifact 삭제
108
+ for old_trial in finished_with_artifacts[self.n_keep:]:
109
+ artifact_id = old_trial.user_attrs.get(self.check_attr_name)
110
+ if artifact_id:
111
+ try:
112
+ self.artifact_store.remove(artifact_id)
113
+ # user_attr에서도 제거
114
+ study._storage.set_trial_user_attr(old_trial._trial_id, self.check_attr_name, None)
115
+ except Exception as e:
116
+ print(f"Warning: Failed to remove artifact {artifact_id}: {e}")
117
+
118
+
119
+ class StudyWrapper:
120
+ def __init__(self, study_name: str, storage, controller: AIAutoController):
121
+ self.study_name = study_name
122
+ self._storage = storage
123
+ self._controller = controller
124
+ self._study = None
125
+
126
+ def get_study(self) -> optuna.Study:
127
+ if self._study is None:
128
+ try:
129
+ self._study = optuna.create_study(
130
+ study_name=self.study_name,
131
+ storage=self._storage,
132
+ load_if_exists=True
133
+ )
134
+ except Exception as e:
135
+ raise RuntimeError("Study not ready. Call get_status() and wait for phase=Ready.") from e
136
+ return self._study
137
+
138
+ def optimize(
139
+ self,
140
+ objective: Callable,
141
+ n_trials: int,
142
+ parallelism: int,
143
+ requirements_file: Optional[str] = None,
144
+ requirements_list: Optional[List[str]] = None,
145
+ resources_requests: Optional[Dict[str, str]] = None,
146
+ resources_limits: Optional[Dict[str, str]] = None,
147
+ runtime_image: Optional[str] = None,
148
+ use_gpu: bool = False
149
+ ) -> None:
150
+ try:
151
+ request_data = {
152
+ "objective": {
153
+ "sourceCode": serialize(objective),
154
+ "requirementsTxt": build_requirements(requirements_file, requirements_list)
155
+ },
156
+ "batch": {
157
+ "studyName": self.study_name,
158
+ "nTrials": n_trials,
159
+ "parallelism": parallelism,
160
+ "runtimeImage": runtime_image or "",
161
+ "resourcesRequests": resources_requests or {},
162
+ "resourcesLimits": resources_limits or {},
163
+ "useGpu": use_gpu
164
+ }
165
+ }
166
+
167
+ self._controller.client.call_rpc("Optimize", request_data)
168
+
169
+ except Exception as e:
170
+ raise RuntimeError(f"Failed to start optimization: {e}") from e
171
+
172
+ def get_status(self) -> dict:
173
+ try:
174
+ response = self._controller.client.call_rpc(
175
+ "GetStatus",
176
+ {"studyName": self.study_name}
177
+ )
178
+
179
+ # Convert camelCase to snake_case for backward compatibility
180
+ return {
181
+ "study_name": response.get("studyName", ""),
182
+ "count_active": response.get("countActive", 0),
183
+ "count_succeeded": response.get("countSucceeded", 0),
184
+ "count_pruned": response.get("countPruned", 0),
185
+ "count_failed": response.get("countFailed", 0),
186
+ "count_total": response.get("countTotal", 0),
187
+ "count_completed": response.get("countCompleted", 0),
188
+ "dashboard_url": response.get("dashboardUrl", ""),
189
+ "last_error": response.get("lastError", ""),
190
+ "updated_at": response.get("updatedAt", "")
191
+ }
192
+
193
+ except Exception as e:
194
+ raise RuntimeError(f"Failed to get status: {e}") from e
195
+
196
+ def __repr__(self) -> str:
197
+ return f"StudyWrapper(study_name='{self.study_name}', storage={self._storage})"
@@ -0,0 +1,50 @@
1
+ """HTTP client for Connect RPC communication with Next.js server."""
2
+ import requests
3
+ from typing import Dict, Any, Optional
4
+ from ._config import AIAUTO_API_TARGET
5
+
6
+
7
+ class ConnectRPCClient:
8
+ """Client for calling Connect RPC endpoints via HTTP/JSON."""
9
+
10
+ def __init__(self, token: str, base_url: Optional[str] = None):
11
+ self.token = token
12
+ # Convert gRPC target to HTTP URL
13
+ if base_url:
14
+ self.base_url = base_url
15
+ else:
16
+ # AIAUTO_API_TARGET is like "api.aiauto.cloud.ainode.ai:443"
17
+ # Convert to "https://api.aiauto.cloud.ainode.ai"
18
+ host = AIAUTO_API_TARGET.split(':')[0]
19
+ self.base_url = f"https://{host}"
20
+
21
+ self.headers = {
22
+ "Authorization": f"Bearer {token}",
23
+ "Content-Type": "application/json",
24
+ "Connect-Protocol-Version": "1"
25
+ }
26
+
27
+ def call_rpc(self, method: str, request_data: Dict[str, Any]) -> Dict[str, Any]:
28
+ """Call a Connect RPC method and return the response."""
29
+ url = f"{self.base_url}/api/aiauto.v1.AIAutoService/{method}"
30
+
31
+ try:
32
+ response = requests.post(url, json=request_data, headers=self.headers)
33
+ response.raise_for_status()
34
+ return response.json()
35
+ except requests.exceptions.HTTPError as e:
36
+ # Connect RPC error format
37
+ if e.response and e.response.headers.get('content-type', '').startswith('application/json'):
38
+ error_data = e.response.json()
39
+ error_msg = error_data.get('message', str(e))
40
+ raise RuntimeError(f"RPC error: {error_msg}") from e
41
+ raise RuntimeError(f"HTTP error: {e}") from e
42
+ except requests.exceptions.RequestException as e:
43
+ raise RuntimeError(f"Request failed: {e}") from e
44
+
45
+
46
+ def map_http_error(exc: Exception) -> Exception:
47
+ """Convert HTTP/Connect RPC errors to standard exceptions."""
48
+ # For now, just pass through the exception
49
+ # In the future, we can add more sophisticated error mapping
50
+ return exc
@@ -0,0 +1,55 @@
1
+ import inspect
2
+ import json
3
+ from typing import Callable, Union, List, Optional
4
+
5
+
6
+ def serialize(objective: Callable) -> str:
7
+ try:
8
+ return inspect.getsource(objective)
9
+ except Exception as e:
10
+ raise ValueError("objective는 모듈 최상위 def만 허용합니다(데코레이터/로컬/람다 불가)") from e
11
+
12
+
13
+ def build_requirements(file_path: Optional[str] = None, reqs: Optional[List[str]] = None) -> str:
14
+ if file_path and reqs:
15
+ raise ValueError("requirements_file과 requirements_list는 동시에 지정할 수 없습니다")
16
+
17
+ if file_path:
18
+ with open(file_path, 'r') as f:
19
+ return f.read()
20
+ elif reqs:
21
+ return "\n".join(reqs)
22
+ else:
23
+ return ""
24
+
25
+
26
+ def object_to_json(obj: Union[object, dict, None]) -> str:
27
+ if obj is None:
28
+ return ""
29
+
30
+ if isinstance(obj, dict):
31
+ return json.dumps(obj)
32
+
33
+ cls = type(obj)
34
+ module_name = cls.__module__
35
+ class_name = cls.__name__
36
+
37
+ if not module_name.startswith('optuna.'):
38
+ raise ValueError(f"optuna 코어 클래스만 지원합니다: {class_name}")
39
+
40
+ sig = inspect.signature(cls)
41
+ kwargs = {}
42
+
43
+ for param_name, param in sig.parameters.items():
44
+ if param_name == 'self':
45
+ continue
46
+ if hasattr(obj, param_name):
47
+ value = getattr(obj, param_name)
48
+ if param.default != value:
49
+ kwargs[param_name] = value
50
+
51
+ return json.dumps({
52
+ "module": module_name,
53
+ "class": class_name,
54
+ "kwargs": kwargs
55
+ })
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://aiauto.cloude.ainode.ai
@@ -21,9 +21,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
21
  Requires-Python: >=3.8
22
22
  Description-Content-Type: text/markdown
23
23
  Requires-Dist: optuna>=3.0.0
24
- Requires-Dist: grpcio>=1.50.0
25
- Requires-Dist: grpcio-tools>=1.50.0
26
- Requires-Dist: protobuf>=4.0.0
24
+ Requires-Dist: requests>=2.25.0
27
25
 
28
26
  # AIAuto - Hyperparameter Optimization Client Library
29
27
 
@@ -31,44 +29,80 @@ AIAuto는 Kubernetes 기반의 분산 HPO(Hyperparameter Optimization) 시스템
31
29
  사용자 python lib <-> Next.js 서버 사이 gRPC 통신 담당
32
30
 
33
31
  ## lib build
34
- - pypi build, upload 종속성 다운로드 `pip install build twine`
35
- - build lib `python -m build --wheel --sdist`
36
- - `aiauto-0.1.0.whl` 생성
37
- - `aiauto-0.1.0.tar.gz` 생성
38
- - `aiauto.egg-info` 생성
39
- - `twine upload --repository testpypi dist/*`
32
+ - pypi build, upload 종속성 다운로드 `uv add --dev twine`
33
+ - build lib `uv build`
34
+ - `aiauto_client-0.1.1-py3-none-any.whl` 생성
35
+ - `aiauto_client-0.1.1.tar.gz` 생성
36
+ - `aiauto_client.egg-info` 생성
37
+ - `~/.pypirc` 파일에 설정 확인
38
+ - `twine upload --repository aiauto-client dist/*`
40
39
  - `twine upload dist/*`
41
40
  - upload 시 pypi token 을 입력하라고 나옴, pypi 로그인 계정 설정가면 있다
42
41
 
43
42
  ## 설치
44
- - `pip install aiauto`
43
+ - `uv add aiauto-client`
45
44
 
46
45
  ## 빠른 시작
46
+
47
+ ### Study 생성 및 Ask/Tell 패턴
47
48
  ```python
48
49
  import aiauto
49
50
 
50
- # 컨트롤러 초기화
51
- ac = aiauto.AIAutoController()
51
+ # StudyWrapper 생성 (JWT 토큰 필요)
52
+ studyWrapper = aiauto.create_study(
53
+ study_name='my_optimization',
54
+ token='your_jwt_token',
55
+ direction='maximize'
56
+ )
57
+
58
+ # 실제 optuna.Study 객체 획득 (로컬에서 ask/tell 가능)
59
+ study = studyWrapper.get_study()
60
+
61
+ # Ask/Tell 패턴으로 최적화
62
+ trial = study.ask()
63
+ params = trial.params
64
+
65
+ # 사용자 모델 학습
66
+ accuracy = train_model(params)
67
+
68
+ # 결과 보고
69
+ study.tell(trial, accuracy)
70
+ ```
71
+
72
+ ### 분산 최적화 (Pod 실행)
73
+ ```python
74
+ import aiauto
52
75
 
53
- # Objective 함수 정의
54
76
  def objective(trial):
55
77
  tc = aiauto.TrialController(trial)
56
78
 
57
79
  # 하이퍼파라미터 샘플링
58
80
  lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
81
+ batch_size = trial.suggest_int('batch_size', 16, 128)
59
82
 
60
- # 모델 학습 및 평가 로직
61
- # ...
62
- tc.log(f'full dataset: train {len(dataset)}, test {len(dataset_test)}, batch_size {batch_size}')
83
+ # 모델 학습 로직
84
+ accuracy = train_model(lr, batch_size)
85
+
86
+ tc.log(f'lr: {lr}, batch_size: {batch_size}, accuracy: {accuracy}')
63
87
 
64
88
  return accuracy
65
89
 
66
- # Study 생성 및 최적화 실행
67
- study = optuna.create_study(
68
- study_name='my_optimization',
69
- storage=ac.get_storage(),
90
+ # StudyWrapper 생성
91
+ studyWrapper = aiauto.create_study(
92
+ study_name='distributed_optimization',
93
+ token='your_jwt_token',
70
94
  direction='maximize'
71
95
  )
72
96
 
73
- study.optimize(objective, n_trials=100)
97
+ # 분산 최적화 실행 (Kubernetes Pod에서 실행)
98
+ studyWrapper.optimize(
99
+ objective=objective,
100
+ n_trials=100,
101
+ parallelism=4,
102
+ requirements_list=['torch==2.0.0', 'torchvision==0.15.0']
103
+ )
104
+
105
+ # 실시간 상태 모니터링
106
+ status = studyWrapper.get_status()
107
+ print(f"Active: {status['count_active']}, Completed: {status['count_completed']}")
74
108
  ```
@@ -1,10 +1,15 @@
1
1
  README.md
2
2
  pyproject.toml
3
3
  src/aiauto/__init__.py
4
+ src/aiauto/_config.py
5
+ src/aiauto/api.py
6
+ src/aiauto/constants.py
4
7
  src/aiauto/core.py
5
- src/aiauto/serialization.py
8
+ src/aiauto/http_client.py
9
+ src/aiauto/serializer.py
6
10
  src/aiauto_client.egg-info/PKG-INFO
7
11
  src/aiauto_client.egg-info/SOURCES.txt
8
12
  src/aiauto_client.egg-info/dependency_links.txt
9
13
  src/aiauto_client.egg-info/requires.txt
10
- src/aiauto_client.egg-info/top_level.txt
14
+ src/aiauto_client.egg-info/top_level.txt
15
+ tests/test_local_storage.py
@@ -0,0 +1,2 @@
1
+ optuna>=3.0.0
2
+ requests>=2.25.0
@@ -0,0 +1,59 @@
1
+ import aiauto
2
+ import optuna
3
+ from unittest.mock import patch
4
+
5
+
6
+ def objective(trial):
7
+ """간단한 이차함수 최적화 예제"""
8
+ # TrialController를 사용한 로깅
9
+ tc = aiauto.TrialController(trial)
10
+ tc.log("Starting simple optimization example")
11
+
12
+ # 하이퍼파라미터 샘플링
13
+ x = trial.suggest_float('x', -10, 10)
14
+ y = trial.suggest_float('y', -10, 10)
15
+
16
+ # 목적함수: (x-2)² + (y-5)² 최소화
17
+ result = (x - 2) ** 2 + (y - 5) ** 2
18
+
19
+ tc.log(f"x={x:.3f}, y={y:.3f}, result={result:.3f}")
20
+
21
+ return result
22
+
23
+
24
+ def main():
25
+ print("🚀 AIAuto 소스코드 직렬화 로컬 테스트")
26
+
27
+ # AIAutoController의 storage를 InMemoryStorage로 패치
28
+ with patch.object(aiauto.AIAutoController, '__init__', lambda self: None):
29
+ controller = aiauto.AIAutoController()
30
+ # 로컬 테스트용 InMemoryStorage 설정
31
+ controller.storage = optuna.storages.InMemoryStorage()
32
+ controller.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
33
+
34
+ # 소스코드 직렬화 테스트
35
+ print("\n=== 소스코드 직렬화 테스트 ===")
36
+ study_wrapper = controller.create_study(
37
+ objective=objective,
38
+ study_name='local_test',
39
+ direction='minimize'
40
+ )
41
+
42
+ print("✅ StudyWrapper 생성 성공!")
43
+
44
+ # 최적화 실행
45
+ print("\n=== 최적화 실행 ===")
46
+ study_wrapper.optimize(n_trials=10)
47
+
48
+ # 결과 출력
49
+ print(f"\n🎉 최적화 완료!")
50
+ print(f"📊 Best value: {study_wrapper.best_value:.3f}")
51
+ print(f"🔧 Best params: {study_wrapper.best_params}")
52
+
53
+ # 이론적 최적해: x=2, y=5, result=0
54
+ print(f"💡 이론적 최적해: x=2, y=5, result=0")
55
+ print(f"📈 오차: {study_wrapper.best_value:.3f}")
56
+
57
+
58
+ if __name__ == "__main__":
59
+ main()
@@ -1,47 +0,0 @@
1
- # AIAuto - Hyperparameter Optimization Client Library
2
-
3
- AIAuto는 Kubernetes 기반의 분산 HPO(Hyperparameter Optimization) 시스템을 위한 클라이언트 라이브러리입니다.
4
- 사용자 python lib <-> Next.js 서버 사이 gRPC 통신 담당
5
-
6
- ## lib build
7
- - pypi build, upload 종속성 다운로드 `pip install build twine`
8
- - build lib `python -m build --wheel --sdist`
9
- - `aiauto-0.1.0.whl` 생성
10
- - `aiauto-0.1.0.tar.gz` 생성
11
- - `aiauto.egg-info` 생성
12
- - `twine upload --repository testpypi dist/*`
13
- - `twine upload dist/*`
14
- - upload 시 pypi token 을 입력하라고 나옴, pypi 로그인 계정 설정가면 있다
15
-
16
- ## 설치
17
- - `pip install aiauto`
18
-
19
- ## 빠른 시작
20
- ```python
21
- import aiauto
22
-
23
- # 컨트롤러 초기화
24
- ac = aiauto.AIAutoController()
25
-
26
- # Objective 함수 정의
27
- def objective(trial):
28
- tc = aiauto.TrialController(trial)
29
-
30
- # 하이퍼파라미터 샘플링
31
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
32
-
33
- # 모델 학습 및 평가 로직
34
- # ...
35
- tc.log(f'full dataset: train {len(dataset)}, test {len(dataset_test)}, batch_size {batch_size}')
36
-
37
- return accuracy
38
-
39
- # Study 생성 및 최적화 실행
40
- study = optuna.create_study(
41
- study_name='my_optimization',
42
- storage=ac.get_storage(),
43
- direction='maximize'
44
- )
45
-
46
- study.optimize(objective, n_trials=100)
47
- ```
@@ -1,50 +0,0 @@
1
- from .core import AIAutoController, TrialController, CallbackTopNArtifact, StudyWrapper
2
- from .serialization import SourceCodeSerializer, create_study_with_source_serialization
3
-
4
- __version__ = "0.1.0"
5
-
6
- __all__ = [
7
- 'AIAutoController',
8
- 'TrialController',
9
- 'CallbackTopNArtifact',
10
- 'StudyWrapper',
11
- 'SourceCodeSerializer',
12
- 'create_study_with_source_serialization',
13
- ]
14
-
15
- # Optuna 호환성을 위한 간편 함수
16
- def create_study(
17
- objective=None,
18
- study_name='aiauto_study',
19
- direction='minimize',
20
- **kwargs
21
- ):
22
- """
23
- Optuna 호환 create_study 함수
24
-
25
- 사용법:
26
- study = aiauto.create_study(
27
- objective=my_objective,
28
- study_name='my_study',
29
- direction='maximize'
30
- )
31
- study.optimize(n_trials=100)
32
- """
33
- controller = AIAutoController()
34
-
35
- if objective is not None:
36
- return controller.create_study_with_serialization(
37
- objective=objective,
38
- study_name=study_name,
39
- direction=direction,
40
- **kwargs
41
- )
42
- else:
43
- # objective가 없으면 일반 optuna study 반환 (기존 방식)
44
- import optuna
45
- return optuna.create_study(
46
- study_name=study_name,
47
- direction=direction,
48
- storage=controller.get_storage(),
49
- **kwargs
50
- )
@@ -1,257 +0,0 @@
1
- from os import makedirs, environ
2
- import tempfile
3
- from typing import Union, Callable, Dict, Any, Optional
4
- import optuna
5
-
6
- from .serialization import create_study_with_source_serialization, SourceCodeSerializer
7
-
8
-
9
- class AIAutoController:
10
- # singleton pattern
11
- def __new__(cls, *args, **kwargs):
12
- if not hasattr(cls, "_instance"):
13
- cls._instance = super().__new__(cls)
14
- return cls._instance
15
-
16
- # singleton pattern
17
- def __init__(self):
18
- cls = type(self)
19
- if not hasattr(cls, "_init"):
20
- # singleton pattern
21
- # ---------------------
22
-
23
- # TODO token 인증
24
- token = environ.get('AIAUTO_TOKEN')
25
-
26
- # mode별 storage 설정
27
- mode = environ.get('AIAUTO_MODE', 'single_gpu')
28
- if mode == "distributed":
29
- # DDP/FSDP pruning callback 지원을 위해 RDBStorage 사용
30
- self.storage = optuna.storages.RDBStorage(
31
- url="sqlite:///optuna.db",
32
- engine_kwargs={"connect_args": {"timeout": 10}}
33
- )
34
- else:
35
- # 기본 GrpcStorageProxy (single GPU 등)
36
- self.storage = optuna.storages.GrpcStorageProxy(host="localhost", port=13000)
37
-
38
- # artifact storage
39
- # TODO 나중에 s3 던 다른 mount 된 경로 건 바꿔야 함
40
- makedirs('./artifacts', exist_ok=True)
41
- self.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
42
- # model 저장을 위한 임시 디렉토리
43
- self.tmp_dir = tempfile.mkdtemp(prefix=f'ai_auto_tmp_')
44
-
45
- # ---------------------
46
- # singleton pattern end
47
- cls._init = True
48
-
49
- def get_storage(self):
50
- return self.storage
51
-
52
- def get_artifact_store(self) -> Union[
53
- optuna.artifacts.FileSystemArtifactStore,
54
- optuna.artifacts.Boto3ArtifactStore,
55
- optuna.artifacts.GCSArtifactStore,
56
- ]:
57
- return self.artifact_store
58
-
59
- def get_artifact_tmp_dir(self):
60
- return self.tmp_dir
61
-
62
- def create_study_with_serialization(
63
- self,
64
- objective: Callable,
65
- study_name: str,
66
- direction: str = 'minimize',
67
- sampler: Optional[optuna.samplers.BaseSampler] = None,
68
- pruner: Optional[optuna.pruners.BasePruner] = None,
69
- **optuna_kwargs
70
- ) -> 'StudyWrapper':
71
- """
72
- 소스코드 직렬화를 사용하여 Study 생성
73
-
74
- Args:
75
- objective: HPO에 사용할 objective 함수
76
- study_name: Study 이름
77
- direction: 최적화 방향 ('minimize' 또는 'maximize')
78
- sampler: Optuna sampler (기본값: TPESampler)
79
- pruner: Optuna pruner
80
- **optuna_kwargs: optuna.create_study에 전달할 추가 인자
81
-
82
- Returns:
83
- StudyWrapper 객체 (Optuna Study 호환)
84
- """
85
- study_config = {
86
- 'study_name': study_name,
87
- 'direction': direction,
88
- 'sampler': sampler.__class__.__name__ if sampler else 'TPESampler',
89
- 'pruner': pruner.__class__.__name__ if pruner else None,
90
- }
91
-
92
- # 소스코드 직렬화
93
- serialized_objective, processed_config = create_study_with_source_serialization(
94
- objective, study_config, **optuna_kwargs
95
- )
96
-
97
- # StudyWrapper 생성 (실제 gRPC 전송은 optimize 시점에)
98
- return StudyWrapper(
99
- serialized_objective=serialized_objective,
100
- study_config=processed_config,
101
- storage=self.storage,
102
- artifact_store=self.artifact_store
103
- )
104
-
105
-
106
- class TrialController:
107
- def __init__(self, trial: optuna.trial.Trial):
108
- self.trial = trial
109
- self.logger = optuna.logging.get_logger("optuna")
110
- self.logs = []
111
-
112
- def get_trial(self) -> optuna.trial.Trial:
113
- return self.trial
114
-
115
- def log(self, value: str):
116
- # optuna dashboard 에 log 를 확인하는 기능이 없어서 user_attribute 에 log를 확인할 수 있게 추가
117
- self.logs.append(value)
118
- self.trial.set_user_attr('logs', ' '.join([f"[{i+1:05d}] {log}" for i, log in enumerate(self.logs)]))
119
- # 실제 log 를 trial_number 랑 같이 확인할 수 있게
120
- self.logger.info(f'\ntrial_number: {self.trial.number}, {value}')
121
-
122
-
123
- # 용량 제한으로 상위 N개의 trial artifact 만 유지
124
- class CallbackTopNArtifact:
125
- def __init__(
126
- self,
127
- artifact_store: Union[
128
- optuna.artifacts.FileSystemArtifactStore,
129
- optuna.artifacts.Boto3ArtifactStore,
130
- optuna.artifacts.GCSArtifactStore,
131
- ],
132
- artifact_attr_name: str = 'artifact_id',
133
- n_keep: int = 5,
134
- ):
135
- self.artifact_store = artifact_store
136
- self.check_attr_name = artifact_attr_name
137
- self.n_keep = n_keep
138
-
139
- def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial):
140
- # COMPLETE 상태이고 artifact를 가진 trial들만 정렬
141
- finished_with_artifacts = [
142
- t for t in study.trials
143
- if t.state == optuna.trial.TrialState.COMPLETE and self.check_attr_name in t.user_attrs
144
- ]
145
-
146
- # 방향에 따라 정렬 (maximize면 내림차순, minimize면 오름차순)
147
- reverse_sort = study.direction == optuna.study.StudyDirection.MAXIMIZE
148
- finished_with_artifacts.sort(key=lambda t: t.value, reverse=reverse_sort)
149
-
150
- # 상위 n_keep개 초과하는 trial들의 artifact 삭제
151
- for old_trial in finished_with_artifacts[self.n_keep:]:
152
- artifact_id = old_trial.user_attrs.get(self.check_attr_name)
153
- if artifact_id:
154
- try:
155
- self.artifact_store.remove(artifact_id)
156
- # user_attr에서도 제거
157
- study._storage.set_trial_user_attr(old_trial._trial_id, self.check_attr_name, None)
158
- except Exception as e:
159
- print(f"Warning: Failed to remove artifact {artifact_id}: {e}")
160
-
161
-
162
- class StudyWrapper:
163
- """
164
- Optuna Study 호환성을 제공하는 래퍼 클래스
165
-
166
- 이 클래스는 소스코드 직렬화된 objective 함수를 관리하고
167
- 실제 HPO 실행을 위해 gRPC 백엔드와 통신합니다.
168
- """
169
-
170
- def __init__(
171
- self,
172
- serialized_objective: Dict[str, Any],
173
- study_config: Dict[str, Any],
174
- storage,
175
- artifact_store
176
- ):
177
- self.serialized_objective = serialized_objective
178
- self.study_config = study_config
179
- self.storage = storage
180
- self.artifact_store = artifact_store
181
- self._local_study = None # 로컬 테스트용
182
-
183
- def optimize(
184
- self,
185
- n_trials: int = 100,
186
- n_jobs: int = 1,
187
- callbacks: Optional[list] = None,
188
- **kwargs
189
- ):
190
- """
191
- HPO 최적화 실행
192
-
193
- 실제 구현에서는 gRPC를 통해 백엔드로 전송하지만,
194
- 현재는 로컬에서 역직렬화하여 테스트합니다.
195
- """
196
- print("🚀 Starting HPO optimization with source code serialization...")
197
- print(f"📊 Study: {self.study_config['study_name']}")
198
- print(f"🎯 Direction: {self.study_config['direction']}")
199
- print(f"🔢 Trials: {n_trials}")
200
-
201
- try:
202
- # 소스코드 역직렬화로 objective 함수 복원
203
- objective_func = SourceCodeSerializer.deserialize_objective(
204
- self.serialized_objective
205
- )
206
- print("✅ Objective function deserialized successfully")
207
-
208
- # 로컬 Study 생성 (실제로는 gRPC 통신)
209
- self._local_study = optuna.create_study(
210
- study_name=self.study_config['study_name'],
211
- direction=self.study_config['direction'],
212
- storage=self.storage,
213
- load_if_exists=True
214
- )
215
-
216
- # 최적화 실행
217
- self._local_study.optimize(
218
- objective_func,
219
- n_trials=n_trials,
220
- n_jobs=n_jobs,
221
- callbacks=callbacks or [],
222
- **kwargs
223
- )
224
-
225
- print(f"🎉 Optimization completed! Best value: {self.best_value}")
226
-
227
- except Exception as e:
228
- print(f"❌ Optimization failed: {e}")
229
- raise
230
-
231
- @property
232
- def best_trial(self):
233
- """최고 성능 Trial 반환"""
234
- if self._local_study:
235
- return self._local_study.best_trial
236
- return None
237
-
238
- @property
239
- def best_value(self):
240
- """최고 성능 값 반환"""
241
- if self._local_study:
242
- return self._local_study.best_value
243
- return None
244
-
245
- @property
246
- def best_params(self):
247
- """최고 성능 하이퍼파라미터 반환"""
248
- if self._local_study:
249
- return self._local_study.best_params
250
- return None
251
-
252
- @property
253
- def trials(self):
254
- """모든 Trial 목록 반환"""
255
- if self._local_study:
256
- return self._local_study.trials
257
- return []
@@ -1,138 +0,0 @@
1
- """
2
- Source Code Serialization Module
3
-
4
- 이 모듈은 Python 버전 간 호환성을 위해 CloudPickle 대신
5
- inspect.getsource를 사용한 소스코드 직렬화 방식을 제공합니다.
6
- """
7
-
8
- import inspect
9
- import types
10
- from typing import Callable, Dict, Any, Tuple
11
-
12
-
13
- class SourceCodeSerializer:
14
- """Objective 함수를 소스코드로 직렬화하는 클래스"""
15
-
16
- @staticmethod
17
- def serialize_objective(objective_func: Callable) -> Dict[str, Any]:
18
- """
19
- Objective 함수를 소스코드로 직렬화
20
-
21
- Args:
22
- objective_func: 직렬화할 objective 함수
23
-
24
- Returns:
25
- 직렬화된 데이터 딕셔너리
26
- - source_code: 함수의 소스코드 문자열
27
- - func_name: 함수 이름
28
- - dependencies: 필요한 import 구문들
29
- """
30
- try:
31
- # 함수 소스코드 추출
32
- source_code = inspect.getsource(objective_func)
33
- func_name = objective_func.__name__
34
-
35
- # 함수가 정의된 모듈의 정보 추출
36
- module = inspect.getmodule(objective_func)
37
- dependencies = []
38
-
39
- if module and hasattr(module, '__file__'):
40
- # 모듈에서 import 구문들 추출 (간단한 방식)
41
- with open(module.__file__, 'r') as f:
42
- module_source = f.read()
43
-
44
- # import 구문 추출 (개선된 파싱 필요시 ast 모듈 사용)
45
- lines = module_source.split('\n')
46
- for line in lines:
47
- line = line.strip()
48
- if line.startswith('import ') or line.startswith('from '):
49
- # 기본적인 import 구문만 추출
50
- if not any(skip in line for skip in ['client', '__', 'relative']):
51
- dependencies.append(line)
52
-
53
- return {
54
- 'source_code': source_code,
55
- 'func_name': func_name,
56
- 'dependencies': dependencies,
57
- 'serialization_method': 'source_code'
58
- }
59
-
60
- except Exception as e:
61
- raise RuntimeError(f"Failed to serialize objective function: {e}")
62
-
63
- @staticmethod
64
- def deserialize_objective(serialized_data: Dict[str, Any]) -> Callable:
65
- """
66
- 직렬화된 데이터로부터 objective 함수를 복원
67
-
68
- Args:
69
- serialized_data: serialize_objective에서 생성된 데이터
70
-
71
- Returns:
72
- 복원된 objective 함수
73
- """
74
- try:
75
- source_code = serialized_data['source_code']
76
- func_name = serialized_data['func_name']
77
- dependencies = serialized_data.get('dependencies', [])
78
-
79
- # 실행 네임스페이스 생성
80
- exec_namespace = {'__builtins__': __builtins__}
81
-
82
- # 의존성 import 실행
83
- for dep in dependencies:
84
- try:
85
- exec(dep, exec_namespace)
86
- except Exception as import_error:
87
- # import 실패는 경고만 하고 계속 진행
88
- print(f"Warning: Failed to import dependency '{dep}': {import_error}")
89
-
90
- # 소스코드 실행
91
- exec(source_code, exec_namespace)
92
-
93
- # 함수 객체 추출
94
- if func_name not in exec_namespace:
95
- raise NameError(f"Function '{func_name}' not found in executed namespace")
96
-
97
- objective_func = exec_namespace[func_name]
98
-
99
- if not callable(objective_func):
100
- raise TypeError(f"'{func_name}' is not callable")
101
-
102
- return objective_func
103
-
104
- except Exception as e:
105
- raise RuntimeError(f"Failed to deserialize objective function: {e}")
106
-
107
-
108
- def create_study_with_source_serialization(
109
- objective: Callable,
110
- study_config: Dict[str, Any],
111
- **optuna_kwargs
112
- ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
113
- """
114
- 소스코드 직렬화를 사용하여 study 생성 준비
115
-
116
- Args:
117
- objective: HPO에 사용할 objective 함수
118
- study_config: study 설정 (name, direction, sampler, pruner 등)
119
- **optuna_kwargs: optuna.create_study에 전달할 추가 인자들
120
-
121
- Returns:
122
- Tuple[serialized_objective, study_config]
123
- - serialized_objective: 직렬화된 objective 함수 데이터
124
- - study_config: study 설정 데이터
125
- """
126
- # Objective 함수 직렬화
127
- serialized_objective = SourceCodeSerializer.serialize_objective(objective)
128
-
129
- # Study 설정 정리
130
- processed_config = {
131
- 'study_name': study_config.get('study_name', 'unnamed_study'),
132
- 'direction': study_config.get('direction', 'minimize'),
133
- 'sampler': study_config.get('sampler', 'TPESampler'),
134
- 'pruner': study_config.get('pruner', None),
135
- 'optuna_kwargs': optuna_kwargs
136
- }
137
-
138
- return serialized_objective, processed_config
@@ -1,4 +0,0 @@
1
- optuna>=3.0.0
2
- grpcio>=1.50.0
3
- grpcio-tools>=1.50.0
4
- protobuf>=4.0.0
File without changes