aiauto-client 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aiauto/__init__.py CHANGED
@@ -1,5 +1,7 @@
1
1
  from .core import AIAutoController, TrialController, CallbackTopNArtifact, StudyWrapper
2
- from .serialization import SourceCodeSerializer, create_study_with_source_serialization
2
+ from .api import create_study
3
+ from ._config import AIAUTO_API_TARGET
4
+ from .constants import RUNTIME_IMAGES
3
5
 
4
6
  __version__ = "0.1.0"
5
7
 
@@ -8,43 +10,7 @@ __all__ = [
8
10
  'TrialController',
9
11
  'CallbackTopNArtifact',
10
12
  'StudyWrapper',
11
- 'SourceCodeSerializer',
12
- 'create_study_with_source_serialization',
13
- ]
14
-
15
- # Optuna 호환성을 위한 간편 함수
16
- def create_study(
17
- objective=None,
18
- study_name='aiauto_study',
19
- direction='minimize',
20
- **kwargs
21
- ):
22
- """
23
- Optuna 호환 create_study 함수
24
-
25
- 사용법:
26
- study = aiauto.create_study(
27
- objective=my_objective,
28
- study_name='my_study',
29
- direction='maximize'
30
- )
31
- study.optimize(n_trials=100)
32
- """
33
- controller = AIAutoController()
34
-
35
- if objective is not None:
36
- return controller.create_study_with_serialization(
37
- objective=objective,
38
- study_name=study_name,
39
- direction=direction,
40
- **kwargs
41
- )
42
- else:
43
- # objective가 없으면 일반 optuna study 반환 (기존 방식)
44
- import optuna
45
- return optuna.create_study(
46
- study_name=study_name,
47
- direction=direction,
48
- storage=controller.get_storage(),
49
- **kwargs
50
- )
13
+ 'create_study',
14
+ 'AIAUTO_API_TARGET',
15
+ 'RUNTIME_IMAGES',
16
+ ]
aiauto/_config.py ADDED
@@ -0,0 +1,3 @@
1
+ # For Connect RPC over HTTP
2
+ # This will be converted to https://api.aiauto.pangyo.ainode.ai in production
3
+ AIAUTO_API_TARGET = "api.aiauto.pangyo.ainode.ai:443"
aiauto/api.py ADDED
@@ -0,0 +1,46 @@
1
+ from typing import Optional, List, Union
2
+ from .serializer import object_to_json
3
+ from .core import StudyWrapper, AIAutoController
4
+
5
+
6
+ def create_study(
7
+ study_name: str,
8
+ token: str,
9
+ direction: Optional[str] = None,
10
+ directions: Optional[List[str]] = None,
11
+ sampler: Union[object, dict, None] = None,
12
+ pruner: Union[object, dict, None] = None
13
+ ) -> StudyWrapper:
14
+ if not direction and not directions:
15
+ raise ValueError("Either 'direction' or 'directions' must be specified")
16
+
17
+ if direction and directions:
18
+ raise ValueError("Cannot specify both 'direction' and 'directions'")
19
+
20
+ try:
21
+ # Initialize controller (which ensures workspace)
22
+ controller = AIAutoController(token)
23
+
24
+ # Prepare request data for CreateStudy
25
+ request_data = {
26
+ "spec": {
27
+ "studyName": study_name,
28
+ "direction": direction or "",
29
+ "directions": directions or [],
30
+ "samplerJson": object_to_json(sampler),
31
+ "prunerJson": object_to_json(pruner)
32
+ }
33
+ }
34
+
35
+ # Call CreateStudy RPC
36
+ response = controller.client.call_rpc("CreateStudy", request_data)
37
+
38
+ # Return StudyWrapper
39
+ return StudyWrapper(
40
+ study_name=response.get("studyName", study_name),
41
+ storage=controller.storage,
42
+ controller=controller
43
+ )
44
+
45
+ except Exception as e:
46
+ raise RuntimeError(f"Failed to create study: {e}") from e
aiauto/constants.py ADDED
@@ -0,0 +1,28 @@
1
+ # Runtime image constants for AIAuto
2
+ # These images are pre-tested and guaranteed to work with AIAuto
3
+
4
+ # Available runtime images
5
+ RUNTIME_IMAGES = [
6
+ # CPU Images
7
+ "ghcr.io/astral-sh/uv:python3.8-bookworm-slim",
8
+ "ghcr.io/astral-sh/uv:python3.9-bookworm-slim",
9
+ "ghcr.io/astral-sh/uv:python3.10-bookworm-slim",
10
+ "ghcr.io/astral-sh/uv:python3.11-bookworm-slim",
11
+ "ghcr.io/astral-sh/uv:python3.12-bookworm-slim",
12
+
13
+ # GPU Images (PyTorch)
14
+ "pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime",
15
+ "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime",
16
+ "pytorch/pytorch:2.4.0-cuda12.4-cudnn9-runtime",
17
+
18
+ # GPU Images (TensorFlow)
19
+ "tensorflow/tensorflow:2.15.0-gpu",
20
+ "tensorflow/tensorflow:2.15.0-gpu-jupyter",
21
+
22
+ # JAX Images
23
+ "nvcr.io/nvidia/jax:23.10-py3",
24
+ "nvcr.io/nvidia/jax:24.04-py3",
25
+
26
+ # Custom/Legacy images
27
+ "ghcr.io/01ai/zipline:latest", # Custom zipline trading library
28
+ ]
aiauto/core.py CHANGED
@@ -1,50 +1,50 @@
1
- from os import makedirs, environ
1
+ from os import makedirs
2
2
  import tempfile
3
- from typing import Union, Callable, Dict, Any, Optional
3
+ from typing import Union, Optional, List, Dict, Callable
4
4
  import optuna
5
-
6
- from .serialization import create_study_with_source_serialization, SourceCodeSerializer
5
+ from .http_client import ConnectRPCClient
6
+ from .serializer import serialize, build_requirements
7
+ from ._config import AIAUTO_API_TARGET
7
8
 
8
9
 
9
10
  class AIAutoController:
10
- # singleton pattern
11
- def __new__(cls, *args, **kwargs):
12
- if not hasattr(cls, "_instance"):
13
- cls._instance = super().__new__(cls)
14
- return cls._instance
11
+ _instances = {}
15
12
 
16
- # singleton pattern
17
- def __init__(self):
18
- cls = type(self)
19
- if not hasattr(cls, "_init"):
20
- # singleton pattern
21
- # ---------------------
13
+ def __new__(cls, token: str):
14
+ if token not in cls._instances:
15
+ cls._instances[token] = super().__new__(cls)
16
+ return cls._instances[token]
22
17
 
23
- # TODO token 인증
24
- token = environ.get('AIAUTO_TOKEN')
18
+ def __init__(self, token: str):
19
+ if hasattr(self, 'token') and self.token == token:
20
+ return
25
21
 
26
- # mode별 storage 설정
27
- mode = environ.get('AIAUTO_MODE', 'single_gpu')
28
- if mode == "distributed":
29
- # DDP/FSDP pruning callback 지원을 위해 RDBStorage 사용
30
- self.storage = optuna.storages.RDBStorage(
31
- url="sqlite:///optuna.db",
32
- engine_kwargs={"connect_args": {"timeout": 10}}
33
- )
34
- else:
35
- # 기본 GrpcStorageProxy (single GPU 등)
36
- self.storage = optuna.storages.GrpcStorageProxy(host="localhost", port=13000)
22
+ self.token = token
23
+ self.client = ConnectRPCClient(token)
24
+
25
+ # EnsureWorkspace 호출해서 journal_grpc_storage_proxy_host_external 받아와서 storage 초기화
26
+ try:
27
+ response = self.client.call_rpc("EnsureWorkspace", {})
28
+
29
+ # 받아온 journal_grpc_storage_proxy_host_external로 storage 초기화
30
+ host_external = response.get('journalGrpcStorageProxyHostExternal', '')
31
+ if not host_external:
32
+ raise RuntimeError("No storage host returned from EnsureWorkspace")
33
+
34
+ host, port = host_external.split(':')
35
+ self.storage = optuna.storages.GrpcStorageProxy(host=host, port=int(port))
36
+
37
+ # Store the internal host for CRD usage (if needed later)
38
+ self.storage_host_internal = response.get('journalGrpcStorageProxyHostInternal', '')
39
+ self.dashboard_url = response.get('dashboardUrl', '')
37
40
 
38
- # artifact storage
39
- # TODO 나중에 s3 다른 mount 된 경로 건 바꿔야 함
40
- makedirs('./artifacts', exist_ok=True)
41
- self.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
42
- # model 저장을 위한 임시 디렉토리
43
- self.tmp_dir = tempfile.mkdtemp(prefix=f'ai_auto_tmp_')
41
+ except Exception as e:
42
+ raise RuntimeError(f"Failed to initialize workspace: {e}") from e
44
43
 
45
- # ---------------------
46
- # singleton pattern end
47
- cls._init = True
44
+ # artifact storage
45
+ makedirs('./artifacts', exist_ok=True)
46
+ self.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
47
+ self.tmp_dir = tempfile.mkdtemp(prefix=f'ai_auto_tmp_')
48
48
 
49
49
  def get_storage(self):
50
50
  return self.storage
@@ -59,49 +59,6 @@ class AIAutoController:
59
59
  def get_artifact_tmp_dir(self):
60
60
  return self.tmp_dir
61
61
 
62
- def create_study_with_serialization(
63
- self,
64
- objective: Callable,
65
- study_name: str,
66
- direction: str = 'minimize',
67
- sampler: Optional[optuna.samplers.BaseSampler] = None,
68
- pruner: Optional[optuna.pruners.BasePruner] = None,
69
- **optuna_kwargs
70
- ) -> 'StudyWrapper':
71
- """
72
- 소스코드 직렬화를 사용하여 Study 생성
73
-
74
- Args:
75
- objective: HPO에 사용할 objective 함수
76
- study_name: Study 이름
77
- direction: 최적화 방향 ('minimize' 또는 'maximize')
78
- sampler: Optuna sampler (기본값: TPESampler)
79
- pruner: Optuna pruner
80
- **optuna_kwargs: optuna.create_study에 전달할 추가 인자
81
-
82
- Returns:
83
- StudyWrapper 객체 (Optuna Study 호환)
84
- """
85
- study_config = {
86
- 'study_name': study_name,
87
- 'direction': direction,
88
- 'sampler': sampler.__class__.__name__ if sampler else 'TPESampler',
89
- 'pruner': pruner.__class__.__name__ if pruner else None,
90
- }
91
-
92
- # 소스코드 직렬화
93
- serialized_objective, processed_config = create_study_with_source_serialization(
94
- objective, study_config, **optuna_kwargs
95
- )
96
-
97
- # StudyWrapper 생성 (실제 gRPC 전송은 optimize 시점에)
98
- return StudyWrapper(
99
- serialized_objective=serialized_objective,
100
- study_config=processed_config,
101
- storage=self.storage,
102
- artifact_store=self.artifact_store
103
- )
104
-
105
62
 
106
63
  class TrialController:
107
64
  def __init__(self, trial: optuna.trial.Trial):
@@ -160,98 +117,81 @@ class CallbackTopNArtifact:
160
117
 
161
118
 
162
119
  class StudyWrapper:
163
- """
164
- Optuna Study 호환성을 제공하는 래퍼 클래스
165
-
166
- 클래스는 소스코드 직렬화된 objective 함수를 관리하고
167
- 실제 HPO 실행을 위해 gRPC 백엔드와 통신합니다.
168
- """
169
-
170
- def __init__(
171
- self,
172
- serialized_objective: Dict[str, Any],
173
- study_config: Dict[str, Any],
174
- storage,
175
- artifact_store
176
- ):
177
- self.serialized_objective = serialized_objective
178
- self.study_config = study_config
179
- self.storage = storage
180
- self.artifact_store = artifact_store
181
- self._local_study = None # 로컬 테스트용
182
-
120
+ def __init__(self, study_name: str, storage, controller: AIAutoController):
121
+ self.study_name = study_name
122
+ self._storage = storage
123
+ self._controller = controller
124
+ self._study = None
125
+
126
+ def get_study(self) -> optuna.Study:
127
+ if self._study is None:
128
+ try:
129
+ self._study = optuna.create_study(
130
+ study_name=self.study_name,
131
+ storage=self._storage,
132
+ load_if_exists=True
133
+ )
134
+ except Exception as e:
135
+ raise RuntimeError("Study not ready. Call get_status() and wait for phase=Ready.") from e
136
+ return self._study
137
+
183
138
  def optimize(
184
139
  self,
185
- n_trials: int = 100,
186
- n_jobs: int = 1,
187
- callbacks: Optional[list] = None,
188
- **kwargs
189
- ):
190
- """
191
- HPO 최적화 실행
192
-
193
- 실제 구현에서는 gRPC를 통해 백엔드로 전송하지만,
194
- 현재는 로컬에서 역직렬화하여 테스트합니다.
195
- """
196
- print("🚀 Starting HPO optimization with source code serialization...")
197
- print(f"📊 Study: {self.study_config['study_name']}")
198
- print(f"🎯 Direction: {self.study_config['direction']}")
199
- print(f"🔢 Trials: {n_trials}")
200
-
140
+ objective: Callable,
141
+ n_trials: int,
142
+ parallelism: int,
143
+ requirements_file: Optional[str] = None,
144
+ requirements_list: Optional[List[str]] = None,
145
+ resources_requests: Optional[Dict[str, str]] = None,
146
+ resources_limits: Optional[Dict[str, str]] = None,
147
+ runtime_image: Optional[str] = None,
148
+ use_gpu: bool = False
149
+ ) -> None:
201
150
  try:
202
- # 소스코드 역직렬화로 objective 함수 복원
203
- objective_func = SourceCodeSerializer.deserialize_objective(
204
- self.serialized_objective
205
- )
206
- print("✅ Objective function deserialized successfully")
207
-
208
- # 로컬 Study 생성 (실제로는 gRPC 통신)
209
- self._local_study = optuna.create_study(
210
- study_name=self.study_config['study_name'],
211
- direction=self.study_config['direction'],
212
- storage=self.storage,
213
- load_if_exists=True
214
- )
151
+ request_data = {
152
+ "objective": {
153
+ "sourceCode": serialize(objective),
154
+ "requirementsTxt": build_requirements(requirements_file, requirements_list)
155
+ },
156
+ "batch": {
157
+ "studyName": self.study_name,
158
+ "nTrials": n_trials,
159
+ "parallelism": parallelism,
160
+ "runtimeImage": runtime_image or "",
161
+ "resourcesRequests": resources_requests or {},
162
+ "resourcesLimits": resources_limits or {},
163
+ "useGpu": use_gpu
164
+ }
165
+ }
215
166
 
216
- # 최적화 실행
217
- self._local_study.optimize(
218
- objective_func,
219
- n_trials=n_trials,
220
- n_jobs=n_jobs,
221
- callbacks=callbacks or [],
222
- **kwargs
167
+ self._controller.client.call_rpc("Optimize", request_data)
168
+
169
+ except Exception as e:
170
+ raise RuntimeError(f"Failed to start optimization: {e}") from e
171
+
172
+ def get_status(self) -> dict:
173
+ try:
174
+ response = self._controller.client.call_rpc(
175
+ "GetStatus",
176
+ {"studyName": self.study_name}
223
177
  )
224
178
 
225
- print(f"🎉 Optimization completed! Best value: {self.best_value}")
226
-
179
+ # Convert camelCase to snake_case for backward compatibility
180
+ return {
181
+ "study_name": response.get("studyName", ""),
182
+ "count_active": response.get("countActive", 0),
183
+ "count_succeeded": response.get("countSucceeded", 0),
184
+ "count_pruned": response.get("countPruned", 0),
185
+ "count_failed": response.get("countFailed", 0),
186
+ "count_total": response.get("countTotal", 0),
187
+ "count_completed": response.get("countCompleted", 0),
188
+ "dashboard_url": response.get("dashboardUrl", ""),
189
+ "last_error": response.get("lastError", ""),
190
+ "updated_at": response.get("updatedAt", "")
191
+ }
192
+
227
193
  except Exception as e:
228
- print(f" Optimization failed: {e}")
229
- raise
230
-
231
- @property
232
- def best_trial(self):
233
- """최고 성능 Trial 반환"""
234
- if self._local_study:
235
- return self._local_study.best_trial
236
- return None
237
-
238
- @property
239
- def best_value(self):
240
- """최고 성능 값 반환"""
241
- if self._local_study:
242
- return self._local_study.best_value
243
- return None
244
-
245
- @property
246
- def best_params(self):
247
- """최고 성능 하이퍼파라미터 반환"""
248
- if self._local_study:
249
- return self._local_study.best_params
250
- return None
251
-
252
- @property
253
- def trials(self):
254
- """모든 Trial 목록 반환"""
255
- if self._local_study:
256
- return self._local_study.trials
257
- return []
194
+ raise RuntimeError(f"Failed to get status: {e}") from e
195
+
196
+ def __repr__(self) -> str:
197
+ return f"StudyWrapper(study_name='{self.study_name}', storage={self._storage})"
aiauto/http_client.py ADDED
@@ -0,0 +1,50 @@
1
+ """HTTP client for Connect RPC communication with Next.js server."""
2
+ import requests
3
+ from typing import Dict, Any, Optional
4
+ from ._config import AIAUTO_API_TARGET
5
+
6
+
7
+ class ConnectRPCClient:
8
+ """Client for calling Connect RPC endpoints via HTTP/JSON."""
9
+
10
+ def __init__(self, token: str, base_url: Optional[str] = None):
11
+ self.token = token
12
+ # Convert gRPC target to HTTP URL
13
+ if base_url:
14
+ self.base_url = base_url
15
+ else:
16
+ # AIAUTO_API_TARGET is like "api.aiauto.pangyo.ainode.ai:443"
17
+ # Convert to "https://api.aiauto.pangyo.ainode.ai"
18
+ host = AIAUTO_API_TARGET.split(':')[0]
19
+ self.base_url = f"https://{host}"
20
+
21
+ self.headers = {
22
+ "Authorization": f"Bearer {token}",
23
+ "Content-Type": "application/json",
24
+ "Connect-Protocol-Version": "1"
25
+ }
26
+
27
+ def call_rpc(self, method: str, request_data: Dict[str, Any]) -> Dict[str, Any]:
28
+ """Call a Connect RPC method and return the response."""
29
+ url = f"{self.base_url}/api/aiauto.v1.AIAutoService/{method}"
30
+
31
+ try:
32
+ response = requests.post(url, json=request_data, headers=self.headers)
33
+ response.raise_for_status()
34
+ return response.json()
35
+ except requests.exceptions.HTTPError as e:
36
+ # Connect RPC error format
37
+ if e.response and e.response.headers.get('content-type', '').startswith('application/json'):
38
+ error_data = e.response.json()
39
+ error_msg = error_data.get('message', str(e))
40
+ raise RuntimeError(f"RPC error: {error_msg}") from e
41
+ raise RuntimeError(f"HTTP error: {e}") from e
42
+ except requests.exceptions.RequestException as e:
43
+ raise RuntimeError(f"Request failed: {e}") from e
44
+
45
+
46
+ def map_http_error(exc: Exception) -> Exception:
47
+ """Convert HTTP/Connect RPC errors to standard exceptions."""
48
+ # For now, just pass through the exception
49
+ # In the future, we can add more sophisticated error mapping
50
+ return exc
aiauto/serializer.py ADDED
@@ -0,0 +1,55 @@
1
+ import inspect
2
+ import json
3
+ from typing import Callable, Union, List, Optional
4
+
5
+
6
+ def serialize(objective: Callable) -> str:
7
+ try:
8
+ return inspect.getsource(objective)
9
+ except Exception as e:
10
+ raise ValueError("objective는 모듈 최상위 def만 허용합니다(데코레이터/로컬/람다 불가)") from e
11
+
12
+
13
+ def build_requirements(file_path: Optional[str] = None, reqs: Optional[List[str]] = None) -> str:
14
+ if file_path and reqs:
15
+ raise ValueError("requirements_file과 requirements_list는 동시에 지정할 수 없습니다")
16
+
17
+ if file_path:
18
+ with open(file_path, 'r') as f:
19
+ return f.read()
20
+ elif reqs:
21
+ return "\n".join(reqs)
22
+ else:
23
+ return ""
24
+
25
+
26
+ def object_to_json(obj: Union[object, dict, None]) -> str:
27
+ if obj is None:
28
+ return ""
29
+
30
+ if isinstance(obj, dict):
31
+ return json.dumps(obj)
32
+
33
+ cls = type(obj)
34
+ module_name = cls.__module__
35
+ class_name = cls.__name__
36
+
37
+ if not module_name.startswith('optuna.'):
38
+ raise ValueError(f"optuna 코어 클래스만 지원합니다: {class_name}")
39
+
40
+ sig = inspect.signature(cls)
41
+ kwargs = {}
42
+
43
+ for param_name, param in sig.parameters.items():
44
+ if param_name == 'self':
45
+ continue
46
+ if hasattr(obj, param_name):
47
+ value = getattr(obj, param_name)
48
+ if param.default != value:
49
+ kwargs[param_name] = value
50
+
51
+ return json.dumps({
52
+ "module": module_name,
53
+ "class": class_name,
54
+ "kwargs": kwargs
55
+ })
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://aiauto.cloude.ainode.ai
@@ -21,9 +21,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
21
  Requires-Python: >=3.8
22
22
  Description-Content-Type: text/markdown
23
23
  Requires-Dist: optuna>=3.0.0
24
- Requires-Dist: grpcio>=1.50.0
25
- Requires-Dist: grpcio-tools>=1.50.0
26
- Requires-Dist: protobuf>=4.0.0
24
+ Requires-Dist: requests>=2.25.0
27
25
 
28
26
  # AIAuto - Hyperparameter Optimization Client Library
29
27
 
@@ -31,44 +29,80 @@ AIAuto는 Kubernetes 기반의 분산 HPO(Hyperparameter Optimization) 시스템
31
29
  사용자 python lib <-> Next.js 서버 사이 gRPC 통신 담당
32
30
 
33
31
  ## lib build
34
- - pypi build, upload 종속성 다운로드 `pip install build twine`
35
- - build lib `python -m build --wheel --sdist`
36
- - `aiauto-0.1.0.whl` 생성
37
- - `aiauto-0.1.0.tar.gz` 생성
38
- - `aiauto.egg-info` 생성
39
- - `twine upload --repository testpypi dist/*`
32
+ - pypi build, upload 종속성 다운로드 `uv add --dev twine`
33
+ - build lib `uv build`
34
+ - `aiauto_client-0.1.1-py3-none-any.whl` 생성
35
+ - `aiauto_client-0.1.1.tar.gz` 생성
36
+ - `aiauto_client.egg-info` 생성
37
+ - `~/.pypirc` 파일에 설정 확인
38
+ - `twine upload --repository aiauto-client dist/*`
40
39
  - `twine upload dist/*`
41
40
  - upload 시 pypi token 을 입력하라고 나옴, pypi 로그인 계정 설정가면 있다
42
41
 
43
42
  ## 설치
44
- - `pip install aiauto`
43
+ - `uv add aiauto-client`
45
44
 
46
45
  ## 빠른 시작
46
+
47
+ ### Study 생성 및 Ask/Tell 패턴
47
48
  ```python
48
49
  import aiauto
49
50
 
50
- # 컨트롤러 초기화
51
- ac = aiauto.AIAutoController()
51
+ # StudyWrapper 생성 (JWT 토큰 필요)
52
+ studyWrapper = aiauto.create_study(
53
+ study_name='my_optimization',
54
+ token='your_jwt_token',
55
+ direction='maximize'
56
+ )
57
+
58
+ # 실제 optuna.Study 객체 획득 (로컬에서 ask/tell 가능)
59
+ study = studyWrapper.get_study()
60
+
61
+ # Ask/Tell 패턴으로 최적화
62
+ trial = study.ask()
63
+ params = trial.params
64
+
65
+ # 사용자 모델 학습
66
+ accuracy = train_model(params)
67
+
68
+ # 결과 보고
69
+ study.tell(trial, accuracy)
70
+ ```
71
+
72
+ ### 분산 최적화 (Pod 실행)
73
+ ```python
74
+ import aiauto
52
75
 
53
- # Objective 함수 정의
54
76
  def objective(trial):
55
77
  tc = aiauto.TrialController(trial)
56
78
 
57
79
  # 하이퍼파라미터 샘플링
58
80
  lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
81
+ batch_size = trial.suggest_int('batch_size', 16, 128)
59
82
 
60
- # 모델 학습 및 평가 로직
61
- # ...
62
- tc.log(f'full dataset: train {len(dataset)}, test {len(dataset_test)}, batch_size {batch_size}')
83
+ # 모델 학습 로직
84
+ accuracy = train_model(lr, batch_size)
85
+
86
+ tc.log(f'lr: {lr}, batch_size: {batch_size}, accuracy: {accuracy}')
63
87
 
64
88
  return accuracy
65
89
 
66
- # Study 생성 및 최적화 실행
67
- study = optuna.create_study(
68
- study_name='my_optimization',
69
- storage=ac.get_storage(),
90
+ # StudyWrapper 생성
91
+ studyWrapper = aiauto.create_study(
92
+ study_name='distributed_optimization',
93
+ token='your_jwt_token',
70
94
  direction='maximize'
71
95
  )
72
96
 
73
- study.optimize(objective, n_trials=100)
97
+ # 분산 최적화 실행 (Kubernetes Pod에서 실행)
98
+ studyWrapper.optimize(
99
+ objective=objective,
100
+ n_trials=100,
101
+ parallelism=4,
102
+ requirements_list=['torch==2.0.0', 'torchvision==0.15.0']
103
+ )
104
+
105
+ # 실시간 상태 모니터링
106
+ status = studyWrapper.get_status()
107
+ print(f"Active: {status['count_active']}, Completed: {status['count_completed']}")
74
108
  ```
@@ -0,0 +1,11 @@
1
+ aiauto/__init__.py,sha256=TgD2ZvIHb7oKJb-HjUl3WfXXtuWLien0sybSy9onjL8,395
2
+ aiauto/_config.py,sha256=DaRTIZlph9T3iuW-Cy4fkw8i3bXB--gMtW947SLZZNs,159
3
+ aiauto/api.py,sha256=hzoVZMwKtH2EaAM1bY67grp6cenltCl8kdG8YHPegvk,1517
4
+ aiauto/constants.py,sha256=UhDCLFoPE89XrHB3SEnZR3YUuzajgugMGX80KYx_qc0,939
5
+ aiauto/core.py,sha256=uvAuSIL6CrIDfRBnBjN6SPFXD1ZJa7w7afNzmDyvzLM,7777
6
+ aiauto/http_client.py,sha256=t1gxeM5-d5bsVoFWgaNcTrt_WWUXuMuxge9gDlEqhoA,2086
7
+ aiauto/serializer.py,sha256=_iPtEoqW8RTKOZ6UrC7CzOqoangpPYzeL7MQfIdmov8,1568
8
+ aiauto_client-0.1.2.dist-info/METADATA,sha256=Yez_Igblnrkc0f6fAk5YQ7HHkqv6xRjZ_jeHak3vzzk,3335
9
+ aiauto_client-0.1.2.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
10
+ aiauto_client-0.1.2.dist-info/top_level.txt,sha256=Sk2ctO9_Bf_tAPwq1x6Vfl6OuL29XzwMTO4F_KG6oJE,7
11
+ aiauto_client-0.1.2.dist-info/RECORD,,
aiauto/serialization.py DELETED
@@ -1,138 +0,0 @@
1
- """
2
- Source Code Serialization Module
3
-
4
- 이 모듈은 Python 버전 간 호환성을 위해 CloudPickle 대신
5
- inspect.getsource를 사용한 소스코드 직렬화 방식을 제공합니다.
6
- """
7
-
8
- import inspect
9
- import types
10
- from typing import Callable, Dict, Any, Tuple
11
-
12
-
13
- class SourceCodeSerializer:
14
- """Objective 함수를 소스코드로 직렬화하는 클래스"""
15
-
16
- @staticmethod
17
- def serialize_objective(objective_func: Callable) -> Dict[str, Any]:
18
- """
19
- Objective 함수를 소스코드로 직렬화
20
-
21
- Args:
22
- objective_func: 직렬화할 objective 함수
23
-
24
- Returns:
25
- 직렬화된 데이터 딕셔너리
26
- - source_code: 함수의 소스코드 문자열
27
- - func_name: 함수 이름
28
- - dependencies: 필요한 import 구문들
29
- """
30
- try:
31
- # 함수 소스코드 추출
32
- source_code = inspect.getsource(objective_func)
33
- func_name = objective_func.__name__
34
-
35
- # 함수가 정의된 모듈의 정보 추출
36
- module = inspect.getmodule(objective_func)
37
- dependencies = []
38
-
39
- if module and hasattr(module, '__file__'):
40
- # 모듈에서 import 구문들 추출 (간단한 방식)
41
- with open(module.__file__, 'r') as f:
42
- module_source = f.read()
43
-
44
- # import 구문 추출 (개선된 파싱 필요시 ast 모듈 사용)
45
- lines = module_source.split('\n')
46
- for line in lines:
47
- line = line.strip()
48
- if line.startswith('import ') or line.startswith('from '):
49
- # 기본적인 import 구문만 추출
50
- if not any(skip in line for skip in ['client', '__', 'relative']):
51
- dependencies.append(line)
52
-
53
- return {
54
- 'source_code': source_code,
55
- 'func_name': func_name,
56
- 'dependencies': dependencies,
57
- 'serialization_method': 'source_code'
58
- }
59
-
60
- except Exception as e:
61
- raise RuntimeError(f"Failed to serialize objective function: {e}")
62
-
63
- @staticmethod
64
- def deserialize_objective(serialized_data: Dict[str, Any]) -> Callable:
65
- """
66
- 직렬화된 데이터로부터 objective 함수를 복원
67
-
68
- Args:
69
- serialized_data: serialize_objective에서 생성된 데이터
70
-
71
- Returns:
72
- 복원된 objective 함수
73
- """
74
- try:
75
- source_code = serialized_data['source_code']
76
- func_name = serialized_data['func_name']
77
- dependencies = serialized_data.get('dependencies', [])
78
-
79
- # 실행 네임스페이스 생성
80
- exec_namespace = {'__builtins__': __builtins__}
81
-
82
- # 의존성 import 실행
83
- for dep in dependencies:
84
- try:
85
- exec(dep, exec_namespace)
86
- except Exception as import_error:
87
- # import 실패는 경고만 하고 계속 진행
88
- print(f"Warning: Failed to import dependency '{dep}': {import_error}")
89
-
90
- # 소스코드 실행
91
- exec(source_code, exec_namespace)
92
-
93
- # 함수 객체 추출
94
- if func_name not in exec_namespace:
95
- raise NameError(f"Function '{func_name}' not found in executed namespace")
96
-
97
- objective_func = exec_namespace[func_name]
98
-
99
- if not callable(objective_func):
100
- raise TypeError(f"'{func_name}' is not callable")
101
-
102
- return objective_func
103
-
104
- except Exception as e:
105
- raise RuntimeError(f"Failed to deserialize objective function: {e}")
106
-
107
-
108
- def create_study_with_source_serialization(
109
- objective: Callable,
110
- study_config: Dict[str, Any],
111
- **optuna_kwargs
112
- ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
113
- """
114
- 소스코드 직렬화를 사용하여 study 생성 준비
115
-
116
- Args:
117
- objective: HPO에 사용할 objective 함수
118
- study_config: study 설정 (name, direction, sampler, pruner 등)
119
- **optuna_kwargs: optuna.create_study에 전달할 추가 인자들
120
-
121
- Returns:
122
- Tuple[serialized_objective, study_config]
123
- - serialized_objective: 직렬화된 objective 함수 데이터
124
- - study_config: study 설정 데이터
125
- """
126
- # Objective 함수 직렬화
127
- serialized_objective = SourceCodeSerializer.serialize_objective(objective)
128
-
129
- # Study 설정 정리
130
- processed_config = {
131
- 'study_name': study_config.get('study_name', 'unnamed_study'),
132
- 'direction': study_config.get('direction', 'minimize'),
133
- 'sampler': study_config.get('sampler', 'TPESampler'),
134
- 'pruner': study_config.get('pruner', None),
135
- 'optuna_kwargs': optuna_kwargs
136
- }
137
-
138
- return serialized_objective, processed_config
@@ -1,7 +0,0 @@
1
- aiauto/__init__.py,sha256=VvEM3L0NZGrHi3kHV_gSRf8X2baqLDPOSbArgd6LpaI,1353
2
- aiauto/core.py,sha256=GKCF24GA25QCu8n2q3YXnff4Sb3Dfx1yKFvE7QZ8108,9182
3
- aiauto/serialization.py,sha256=6Rb5k01hx7uXaLt1XmUrmn1KzMjxsYinzi4fjglc3jw,5137
4
- aiauto_client-0.1.0.dist-info/METADATA,sha256=inimyQ0HuHH8mWkYUOP1FpXez506z1i8_oi0UVBSwsE,2510
5
- aiauto_client-0.1.0.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
6
- aiauto_client-0.1.0.dist-info/top_level.txt,sha256=Sk2ctO9_Bf_tAPwq1x6Vfl6OuL29XzwMTO4F_KG6oJE,7
7
- aiauto_client-0.1.0.dist-info/RECORD,,