aiauto-client 0.1.14__tar.gz → 0.1.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/PKG-INFO +37 -21
  2. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/README.md +36 -20
  3. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/examples/example_torch_multiple_objective.py +14 -4
  4. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/examples/example_torch_single_objective.py +13 -2
  5. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/examples/simple_example.py +2 -2
  6. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/pyproject.toml +1 -1
  7. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto/core.py +15 -2
  8. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto/serializer.py +4 -1
  9. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto_client.egg-info/PKG-INFO +37 -21
  10. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto_client.egg-info/SOURCES.txt +2 -1
  11. aiauto_client-0.1.16/tests/test_pruners.py +205 -0
  12. aiauto_client-0.1.16/tests/test_samplers.py +226 -0
  13. aiauto_client-0.1.14/tests/test_local_storage.py +0 -59
  14. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/MANIFEST.in +0 -0
  15. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/setup.cfg +0 -0
  16. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto/__init__.py +0 -0
  17. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto/_config.py +0 -0
  18. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto/constants.py +0 -0
  19. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto/http_client.py +0 -0
  20. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto_client.egg-info/dependency_links.txt +0 -0
  21. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto_client.egg-info/requires.txt +0 -0
  22. {aiauto_client-0.1.14 → aiauto_client-0.1.16}/src/aiauto_client.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
@@ -121,7 +121,7 @@ Jupyter Notebook이나 Python REPL에서 정의한 함수는 Serialize 할 수
121
121
  import aiauto
122
122
  import optuna
123
123
 
124
- def objective(trial: optuna.trial.Trial):
124
+ def objective(trial):
125
125
  """
126
126
  이 함수는 외부 서버에서 실행됩니다.
127
127
  모든 import는 함수 내부에 작성하세요.
@@ -170,7 +170,7 @@ time.sleep(5)
170
170
  # `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
171
171
 
172
172
  # objective 함수 정의
173
- def objective(trial: optuna.trial.Trial):
173
+ def objective(trial):
174
174
  """실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수"""
175
175
  x = trial.suggest_float('x', -10, 10)
176
176
  y = trial.suggest_float('y', -10, 10)
@@ -214,7 +214,7 @@ time.sleep(5)
214
214
 
215
215
  # objective 함수 정의
216
216
  # https://docs.pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 참고
217
- def objective(trial: optuna.trial.Trial):
217
+ def objective(trial):
218
218
  """
219
219
  실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수
220
220
  모든 import는 함수 내부에 존재해야 함
@@ -224,6 +224,8 @@ def objective(trial: optuna.trial.Trial):
224
224
  from torch.utils.data import DataLoader, random_split, Subset
225
225
  from torchvision import transforms, datasets
226
226
  import torch.nn.functional as F
227
+
228
+ import optuna
227
229
 
228
230
  # 하이퍼파라미터 샘플링
229
231
  lr = trial.suggest_float('learning_rate', 1e-5, 1e-1, log=True)
@@ -315,7 +317,12 @@ study_wrapper.optimize(
315
317
  n_trials=100,
316
318
  parallelism=4,
317
319
  use_gpu=True, # GPU 사용
318
- requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
320
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
321
+ # requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치 # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
322
+ resources_requests={
323
+ "cpu": "2",
324
+ "memory": "4Gi",
325
+ },
319
326
  )
320
327
  time.sleep(5)
321
328
  ```
@@ -344,7 +351,7 @@ time.sleep(5)
344
351
 
345
352
  # objective 함수 정의
346
353
  # https://docs.pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 참고
347
- def objective(trial: optuna.trial.Trial):
354
+ def objective(trial):
348
355
  """
349
356
  실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수
350
357
  모든 import는 함수 내부에 존재해야 함
@@ -446,7 +453,16 @@ study_wrapper.optimize(
446
453
  n_trials=100,
447
454
  parallelism=4,
448
455
  use_gpu=True, # GPU 사용
449
- requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
456
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
457
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
458
+ # 'torch',
459
+ # 'torchvision',
460
+ 'fvcore',
461
+ ], # Pod에서 자동 설치
462
+ resources_requests={
463
+ "cpu": "2",
464
+ "memory": "4Gi",
465
+ },
450
466
  )
451
467
  time.sleep(5)
452
468
  ```
@@ -545,15 +561,15 @@ study = controller.create_study('exp1', direction='minimize')
545
561
  time.sleep(5)
546
562
 
547
563
  def objective(trial):
548
- import numpy as np
549
- x = trial.suggest_float('x', -10, 10)
550
- return (x - 1.23) ** 2
564
+ import numpy as np
565
+ x = trial.suggest_float('x', -10, 10)
566
+ return (x - 1.23) ** 2
551
567
 
552
568
  study.optimize(
553
- objective,
554
- n_trials=64,
555
- parallelism=8,
556
- requirements_list=['numpy'],
569
+ objective,
570
+ n_trials=64,
571
+ parallelism=8,
572
+ requirements_list=['numpy'],
557
573
  )
558
574
  time.sleep(5)
559
575
  ```
@@ -600,17 +616,17 @@ import optuna, aiauto, time
600
616
 
601
617
  controller = aiauto.AIAutoController('aiauto_xxx')
602
618
  study = controller.create_study(
603
- study_name='cnn',
604
- direction='minimize',
605
- sampler=optuna.samplers.TPESampler(seed=42),
606
- pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
619
+ study_name='cnn',
620
+ direction='minimize',
621
+ sampler=optuna.samplers.TPESampler(seed=42),
622
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
607
623
  )
608
624
  time.sleep(5)
609
625
 
610
626
  def objective(trial):
611
- import numpy as np
612
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
613
- return (np.log10(lr) + 2) ** 2
627
+ import numpy as np
628
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
629
+ return (np.log10(lr) + 2) ** 2
614
630
 
615
631
  study.optimize(objective, n_trials=50, parallelism=4)
616
632
  time.sleep(5)
@@ -94,7 +94,7 @@ Jupyter Notebook이나 Python REPL에서 정의한 함수는 Serialize 할 수
94
94
  import aiauto
95
95
  import optuna
96
96
 
97
- def objective(trial: optuna.trial.Trial):
97
+ def objective(trial):
98
98
  """
99
99
  이 함수는 외부 서버에서 실행됩니다.
100
100
  모든 import는 함수 내부에 작성하세요.
@@ -143,7 +143,7 @@ time.sleep(5)
143
143
  # `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
144
144
 
145
145
  # objective 함수 정의
146
- def objective(trial: optuna.trial.Trial):
146
+ def objective(trial):
147
147
  """실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수"""
148
148
  x = trial.suggest_float('x', -10, 10)
149
149
  y = trial.suggest_float('y', -10, 10)
@@ -187,7 +187,7 @@ time.sleep(5)
187
187
 
188
188
  # objective 함수 정의
189
189
  # https://docs.pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 참고
190
- def objective(trial: optuna.trial.Trial):
190
+ def objective(trial):
191
191
  """
192
192
  실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수
193
193
  모든 import는 함수 내부에 존재해야 함
@@ -197,6 +197,8 @@ def objective(trial: optuna.trial.Trial):
197
197
  from torch.utils.data import DataLoader, random_split, Subset
198
198
  from torchvision import transforms, datasets
199
199
  import torch.nn.functional as F
200
+
201
+ import optuna
200
202
 
201
203
  # 하이퍼파라미터 샘플링
202
204
  lr = trial.suggest_float('learning_rate', 1e-5, 1e-1, log=True)
@@ -288,7 +290,12 @@ study_wrapper.optimize(
288
290
  n_trials=100,
289
291
  parallelism=4,
290
292
  use_gpu=True, # GPU 사용
291
- requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
293
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
294
+ # requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치 # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
295
+ resources_requests={
296
+ "cpu": "2",
297
+ "memory": "4Gi",
298
+ },
292
299
  )
293
300
  time.sleep(5)
294
301
  ```
@@ -317,7 +324,7 @@ time.sleep(5)
317
324
 
318
325
  # objective 함수 정의
319
326
  # https://docs.pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 참고
320
- def objective(trial: optuna.trial.Trial):
327
+ def objective(trial):
321
328
  """
322
329
  실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수
323
330
  모든 import는 함수 내부에 존재해야 함
@@ -419,7 +426,16 @@ study_wrapper.optimize(
419
426
  n_trials=100,
420
427
  parallelism=4,
421
428
  use_gpu=True, # GPU 사용
422
- requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
429
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
430
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
431
+ # 'torch',
432
+ # 'torchvision',
433
+ 'fvcore',
434
+ ], # Pod에서 자동 설치
435
+ resources_requests={
436
+ "cpu": "2",
437
+ "memory": "4Gi",
438
+ },
423
439
  )
424
440
  time.sleep(5)
425
441
  ```
@@ -518,15 +534,15 @@ study = controller.create_study('exp1', direction='minimize')
518
534
  time.sleep(5)
519
535
 
520
536
  def objective(trial):
521
- import numpy as np
522
- x = trial.suggest_float('x', -10, 10)
523
- return (x - 1.23) ** 2
537
+ import numpy as np
538
+ x = trial.suggest_float('x', -10, 10)
539
+ return (x - 1.23) ** 2
524
540
 
525
541
  study.optimize(
526
- objective,
527
- n_trials=64,
528
- parallelism=8,
529
- requirements_list=['numpy'],
542
+ objective,
543
+ n_trials=64,
544
+ parallelism=8,
545
+ requirements_list=['numpy'],
530
546
  )
531
547
  time.sleep(5)
532
548
  ```
@@ -573,17 +589,17 @@ import optuna, aiauto, time
573
589
 
574
590
  controller = aiauto.AIAutoController('aiauto_xxx')
575
591
  study = controller.create_study(
576
- study_name='cnn',
577
- direction='minimize',
578
- sampler=optuna.samplers.TPESampler(seed=42),
579
- pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
592
+ study_name='cnn',
593
+ direction='minimize',
594
+ sampler=optuna.samplers.TPESampler(seed=42),
595
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
580
596
  )
581
597
  time.sleep(5)
582
598
 
583
599
  def objective(trial):
584
- import numpy as np
585
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
586
- return (np.log10(lr) + 2) ** 2
600
+ import numpy as np
601
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
602
+ return (np.log10(lr) + 2) ** 2
587
603
 
588
604
  study.optimize(objective, n_trials=50, parallelism=4)
589
605
  time.sleep(5)
@@ -49,6 +49,7 @@ def objective_multi(trial):
49
49
  from fvcore.nn import FlopCountAnalysis
50
50
 
51
51
  from optuna.artifacts import upload_artifact
52
+ import aiauto
52
53
 
53
54
 
54
55
  # objective 함수의 매개변수로 받아온 optuna 자체의 trial 을 aiauto 에서 사용하는 TrialController 로 Warpping Log 찍는 용도
@@ -259,6 +260,7 @@ def objective_detailed(trial):
259
260
  from fvcore.nn import FlopCountAnalysis
260
261
 
261
262
  from optuna.artifacts import upload_artifact
263
+ import aiauto
262
264
 
263
265
 
264
266
  # objective 함수의 매개변수로 받아온 optuna 자체의 trial 을 aiauto 에서 사용하는 TrialController 로 Warpping Log 찍는 용도
@@ -440,14 +442,18 @@ if __name__ == '__main__':
440
442
  n_trials=100,
441
443
  parallelism=4, # n_jobs 대신 parallelism 사용
442
444
  use_gpu=True, # GPU 사용
443
- # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
445
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
444
446
  # requirements.txt 대신 리스트로 전달 (외부 서버에서 설치)
445
- requirements_list=[
446
- 'torch',
447
- 'torchvision',
447
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
448
+ # 'torch',
449
+ # 'torchvision',
448
450
  'fvcore', # FLOPS 계산용 (multi-objective 사용 시)
449
451
  ],
450
452
  # CallbackTopNArtifact은 클라이언트 측에서는 사용 불가 (runner 에서 자동으로 지정 됨)
453
+ resources_requests={
454
+ "cpu": "2",
455
+ "memory": "4Gi",
456
+ },
451
457
  )
452
458
  time.sleep(5)
453
459
 
@@ -470,4 +476,8 @@ if __name__ == '__main__':
470
476
  use_gpu=True,
471
477
  # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
472
478
  requirements_list=['torch', 'torchvision', 'fvcore'],
479
+ resources_requests={
480
+ "cpu": "2",
481
+ "memory": "4Gi",
482
+ },
473
483
  )
@@ -47,7 +47,9 @@ def objective_single(trial):
47
47
  from torchvision import transforms, datasets
48
48
  import torch.nn.functional as F
49
49
 
50
+ import optuna
50
51
  from optuna.artifacts import upload_artifact
52
+ import aiauto
51
53
 
52
54
 
53
55
  # objective 함수의 매개변수로 받아온 optuna 자체의 trial 을 aiauto 에서 사용하는 TrialController 로 Warpping Log 찍는 용도
@@ -261,6 +263,7 @@ def objective_detailed(trial):
261
263
  import torch.nn.functional as F
262
264
 
263
265
  from optuna.artifacts import upload_artifact
266
+ import aiauto
264
267
 
265
268
 
266
269
  # objective 함수의 매개변수로 받아온 optuna 자체의 trial 을 aiauto 에서 사용하는 TrialController 로 Warpping Log 찍는 용도
@@ -447,10 +450,14 @@ if __name__ == '__main__':
447
450
  n_trials=100,
448
451
  parallelism=4, # n_jobs 대신 parallelism 사용
449
452
  use_gpu=True, # GPU 사용
450
- # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
453
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
451
454
  # objective 함수 안에서 import 하는거 requirements.txt 대신 리스트로 전달 (pod 안에서 설치)
452
- requirements_list=['torch', 'torchvision'],
455
+ # requirements_list=['torch', 'torchvision'], # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
453
456
  # CallbackTopNArtifact은 클라이언트 측에서는 사용 불가 (runner 에서 자동으로 지정 됨)
457
+ resources_requests={
458
+ "cpu": "2",
459
+ "memory": "4Gi",
460
+ },
454
461
  )
455
462
  time.sleep(5)
456
463
 
@@ -473,4 +480,8 @@ if __name__ == '__main__':
473
480
  use_gpu=True,
474
481
  # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
475
482
  requirements_list=['torch', 'torchvision'],
483
+ resources_requests={
484
+ "cpu": "2",
485
+ "memory": "4Gi",
486
+ },
476
487
  )
@@ -26,7 +26,7 @@ ac = aiauto.AIAutoController('<token>')
26
26
 
27
27
 
28
28
  # single objective accuracy
29
- def simple_objective(trial):
29
+ def objective_simple(trial):
30
30
  import optuna
31
31
  import aiauto
32
32
  """
@@ -74,7 +74,7 @@ if __name__ == '__main__':
74
74
  # runtime_image = "ghcr.io/astral-sh/uv:python3.8-bookworm-slim", # default image for use_gpu False
75
75
  )
76
76
  time.sleep(5)
77
-
77
+
78
78
  study = study_wrapper.get_study()
79
79
 
80
80
  print('\nBest trials:')
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "aiauto-client"
7
- version = "0.1.14"
7
+ version = "0.1.16"
8
8
  description = "AI Auto HPO (Hyperparameter Optimization) Client Library"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -188,11 +188,24 @@ class StudyWrapper:
188
188
  parallelism: int = 2,
189
189
  requirements_file: Optional[str] = None,
190
190
  requirements_list: Optional[List[str]] = None,
191
- resources_requests: Optional[Dict[str, str]] = {"cpu": "256m", "memory": "256Mi"},
192
- resources_limits: Optional[Dict[str, str]] = {"cpu": "256m", "memory": "256Mi"},
191
+ resources_requests: Optional[Dict[str, str]] = None,
192
+ resources_limits: Optional[Dict[str, str]] = None,
193
193
  runtime_image: Optional[str] = 'ghcr.io/astral-sh/uv:python3.8-bookworm-slim',
194
194
  use_gpu: bool = False
195
195
  ) -> None:
196
+ # 리소스 기본값 설정
197
+ if resources_requests is None:
198
+ if use_gpu:
199
+ resources_requests = {"cpu": "2", "memory": "4Gi"}
200
+ else:
201
+ resources_requests = {"cpu": "1", "memory": "1Gi"}
202
+
203
+ if resources_limits is None:
204
+ if use_gpu:
205
+ resources_limits = {"cpu": "2", "memory": "4Gi"}
206
+ else:
207
+ resources_limits = {"cpu": "1", "memory": "1Gi"}
208
+
196
209
  if runtime_image is None or runtime_image == "":
197
210
  if use_gpu:
198
211
  runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime"
@@ -59,9 +59,12 @@ def object_to_json(obj: Union[object, dict, None]) -> str:
59
59
 
60
60
  # __init__의 실제 파라미터인지 확인
61
61
  if param_name in valid_params:
62
- # PatientPruner의 wrapped_pruner 특별 처리
62
+ # PatientPruner의 wrapped_pruner 특별 처리
63
63
  if class_name == "PatientPruner" and param_name == "wrapped_pruner" and value is not None:
64
64
  kwargs[param_name] = json.loads(object_to_json(value))
65
+ # CmaEsSampler와 QMCSampler의 independent_sampler 특별 처리
66
+ elif param_name == "independent_sampler" and value is not None and class_name in ["CmaEsSampler", "QMCSampler"]:
67
+ kwargs[param_name] = json.loads(object_to_json(value))
65
68
  # Callable 타입은 제외 (gamma, weights 등)
66
69
  elif not callable(value):
67
70
  kwargs[param_name] = value
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
@@ -121,7 +121,7 @@ Jupyter Notebook이나 Python REPL에서 정의한 함수는 Serialize 할 수
121
121
  import aiauto
122
122
  import optuna
123
123
 
124
- def objective(trial: optuna.trial.Trial):
124
+ def objective(trial):
125
125
  """
126
126
  이 함수는 외부 서버에서 실행됩니다.
127
127
  모든 import는 함수 내부에 작성하세요.
@@ -170,7 +170,7 @@ time.sleep(5)
170
170
  # `https://dashboard.common.aiauto.pangyo.ainode.ai/study` 에서 생성된 study 확인 가능
171
171
 
172
172
  # objective 함수 정의
173
- def objective(trial: optuna.trial.Trial):
173
+ def objective(trial):
174
174
  """실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수"""
175
175
  x = trial.suggest_float('x', -10, 10)
176
176
  y = trial.suggest_float('y', -10, 10)
@@ -214,7 +214,7 @@ time.sleep(5)
214
214
 
215
215
  # objective 함수 정의
216
216
  # https://docs.pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 참고
217
- def objective(trial: optuna.trial.Trial):
217
+ def objective(trial):
218
218
  """
219
219
  실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수
220
220
  모든 import는 함수 내부에 존재해야 함
@@ -224,6 +224,8 @@ def objective(trial: optuna.trial.Trial):
224
224
  from torch.utils.data import DataLoader, random_split, Subset
225
225
  from torchvision import transforms, datasets
226
226
  import torch.nn.functional as F
227
+
228
+ import optuna
227
229
 
228
230
  # 하이퍼파라미터 샘플링
229
231
  lr = trial.suggest_float('learning_rate', 1e-5, 1e-1, log=True)
@@ -315,7 +317,12 @@ study_wrapper.optimize(
315
317
  n_trials=100,
316
318
  parallelism=4,
317
319
  use_gpu=True, # GPU 사용
318
- requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
320
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
321
+ # requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치 # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
322
+ resources_requests={
323
+ "cpu": "2",
324
+ "memory": "4Gi",
325
+ },
319
326
  )
320
327
  time.sleep(5)
321
328
  ```
@@ -344,7 +351,7 @@ time.sleep(5)
344
351
 
345
352
  # objective 함수 정의
346
353
  # https://docs.pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 참고
347
- def objective(trial: optuna.trial.Trial):
354
+ def objective(trial):
348
355
  """
349
356
  실제 실행은 사용자 로컬 컴퓨터가 아닌 서버에서 실행 될 함수
350
357
  모든 import는 함수 내부에 존재해야 함
@@ -446,7 +453,16 @@ study_wrapper.optimize(
446
453
  n_trials=100,
447
454
  parallelism=4,
448
455
  use_gpu=True, # GPU 사용
449
- requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
456
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
457
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
458
+ # 'torch',
459
+ # 'torchvision',
460
+ 'fvcore',
461
+ ], # Pod에서 자동 설치
462
+ resources_requests={
463
+ "cpu": "2",
464
+ "memory": "4Gi",
465
+ },
450
466
  )
451
467
  time.sleep(5)
452
468
  ```
@@ -545,15 +561,15 @@ study = controller.create_study('exp1', direction='minimize')
545
561
  time.sleep(5)
546
562
 
547
563
  def objective(trial):
548
- import numpy as np
549
- x = trial.suggest_float('x', -10, 10)
550
- return (x - 1.23) ** 2
564
+ import numpy as np
565
+ x = trial.suggest_float('x', -10, 10)
566
+ return (x - 1.23) ** 2
551
567
 
552
568
  study.optimize(
553
- objective,
554
- n_trials=64,
555
- parallelism=8,
556
- requirements_list=['numpy'],
569
+ objective,
570
+ n_trials=64,
571
+ parallelism=8,
572
+ requirements_list=['numpy'],
557
573
  )
558
574
  time.sleep(5)
559
575
  ```
@@ -600,17 +616,17 @@ import optuna, aiauto, time
600
616
 
601
617
  controller = aiauto.AIAutoController('aiauto_xxx')
602
618
  study = controller.create_study(
603
- study_name='cnn',
604
- direction='minimize',
605
- sampler=optuna.samplers.TPESampler(seed=42),
606
- pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
619
+ study_name='cnn',
620
+ direction='minimize',
621
+ sampler=optuna.samplers.TPESampler(seed=42),
622
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
607
623
  )
608
624
  time.sleep(5)
609
625
 
610
626
  def objective(trial):
611
- import numpy as np
612
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
613
- return (np.log10(lr) + 2) ** 2
627
+ import numpy as np
628
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
629
+ return (np.log10(lr) + 2) ** 2
614
630
 
615
631
  study.optimize(objective, n_trials=50, parallelism=4)
616
632
  time.sleep(5)
@@ -15,4 +15,5 @@ src/aiauto_client.egg-info/SOURCES.txt
15
15
  src/aiauto_client.egg-info/dependency_links.txt
16
16
  src/aiauto_client.egg-info/requires.txt
17
17
  src/aiauto_client.egg-info/top_level.txt
18
- tests/test_local_storage.py
18
+ tests/test_pruners.py
19
+ tests/test_samplers.py
@@ -0,0 +1,205 @@
1
+ #!/usr/bin/env python
2
+ """
3
+ Optuna Pruner 직렬화/역직렬화 테스트
4
+
5
+ 모든 Optuna Pruner가 올바르게 직렬화되고 복원되는지 검증합니다.
6
+ 특히 PatientPruner의 wrapped_pruner 재귀 처리를 중점적으로 테스트합니다.
7
+ """
8
+
9
+ import sys
10
+ import os
11
+ import json
12
+ import optuna
13
+ import pytest
14
+
15
+ # Add parent directory to path for imports
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
17
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../runners'))
18
+
19
+ from aiauto.serializer import object_to_json
20
+ from runner_create_study import from_json, PRUNER_WHITELIST
21
+
22
+
23
+ def test_nop_pruner():
24
+ """NopPruner 테스트 - 파라미터 없음"""
25
+ pruner = optuna.pruners.NopPruner()
26
+ json_str = object_to_json(pruner)
27
+
28
+ # JSON 파싱 가능 여부 확인
29
+ parsed = json.loads(json_str)
30
+ assert parsed["cls"] == "NopPruner"
31
+ # NopPruner는 파라미터가 없으므로 args가 비어있거나 없을 수 있음
32
+
33
+ # 역직렬화
34
+ restored = from_json(json_str, PRUNER_WHITELIST)
35
+ assert type(restored).__name__ == "NopPruner"
36
+
37
+
38
+ def test_median_pruner():
39
+ """MedianPruner 테스트 - 기본 파라미터"""
40
+ pruner = optuna.pruners.MedianPruner(
41
+ n_startup_trials=5,
42
+ n_warmup_steps=10,
43
+ interval_steps=2
44
+ )
45
+ json_str = object_to_json(pruner)
46
+
47
+ parsed = json.loads(json_str)
48
+ assert parsed["cls"] == "MedianPruner"
49
+ assert parsed["kwargs"]["n_startup_trials"] == 5
50
+ assert parsed["kwargs"]["n_warmup_steps"] == 10
51
+ assert parsed["kwargs"]["interval_steps"] == 2
52
+
53
+ restored = from_json(json_str, PRUNER_WHITELIST)
54
+ assert type(restored).__name__ == "MedianPruner"
55
+ assert restored._n_startup_trials == 5
56
+ assert restored._n_warmup_steps == 10
57
+ assert restored._interval_steps == 2
58
+
59
+
60
+ def test_percentile_pruner():
61
+ """PercentilePruner 테스트 - percentile 필수 파라미터"""
62
+ pruner = optuna.pruners.PercentilePruner(
63
+ percentile=25.0,
64
+ n_startup_trials=3
65
+ )
66
+ json_str = object_to_json(pruner)
67
+
68
+ parsed = json.loads(json_str)
69
+ assert parsed["cls"] == "PercentilePruner"
70
+ assert parsed["kwargs"]["percentile"] == 25.0
71
+ assert parsed["kwargs"]["n_startup_trials"] == 3
72
+
73
+ restored = from_json(json_str, PRUNER_WHITELIST)
74
+ assert type(restored).__name__ == "PercentilePruner"
75
+ assert restored._percentile == 25.0
76
+
77
+
78
+ def test_patient_pruner_with_wrapped():
79
+ """PatientPruner 테스트 - wrapped_pruner 재귀 처리"""
80
+ wrapped = optuna.pruners.MedianPruner(n_startup_trials=10)
81
+ pruner = optuna.pruners.PatientPruner(
82
+ wrapped_pruner=wrapped,
83
+ patience=3,
84
+ min_delta=0.1
85
+ )
86
+ json_str = object_to_json(pruner)
87
+
88
+ parsed = json.loads(json_str)
89
+ assert parsed["cls"] == "PatientPruner"
90
+ assert parsed["kwargs"]["patience"] == 3
91
+ assert parsed["kwargs"]["min_delta"] == 0.1
92
+
93
+ # wrapped_pruner가 재귀적으로 직렬화되었는지 확인
94
+ assert "wrapped_pruner" in parsed["kwargs"]
95
+ assert parsed["kwargs"]["wrapped_pruner"]["cls"] == "MedianPruner"
96
+ assert parsed["kwargs"]["wrapped_pruner"]["kwargs"]["n_startup_trials"] == 10
97
+
98
+ restored = from_json(json_str, PRUNER_WHITELIST)
99
+ assert type(restored).__name__ == "PatientPruner"
100
+ assert restored._patience == 3
101
+ assert restored._min_delta == 0.1
102
+ assert type(restored._wrapped_pruner).__name__ == "MedianPruner"
103
+ assert restored._wrapped_pruner._n_startup_trials == 10
104
+
105
+
106
+ def test_patient_pruner_without_wrapped():
107
+ """PatientPruner 테스트 - wrapped_pruner None으로"""
108
+ pruner = optuna.pruners.PatientPruner(wrapped_pruner=None, patience=5)
109
+ json_str = object_to_json(pruner)
110
+
111
+ parsed = json.loads(json_str)
112
+ assert parsed["cls"] == "PatientPruner"
113
+ assert parsed["kwargs"]["patience"] == 5
114
+ # wrapped_pruner가 None이면 직렬화에서 제외될 수 있음
115
+ if "wrapped_pruner" in parsed["kwargs"]:
116
+ assert parsed["kwargs"]["wrapped_pruner"] is None
117
+
118
+ restored = from_json(json_str, PRUNER_WHITELIST)
119
+ assert type(restored).__name__ == "PatientPruner"
120
+ assert restored._patience == 5
121
+ assert restored._wrapped_pruner is None
122
+
123
+
124
+ def test_threshold_pruner():
125
+ """ThresholdPruner 테스트"""
126
+ pruner = optuna.pruners.ThresholdPruner(
127
+ lower=0.1,
128
+ upper=0.9,
129
+ n_warmup_steps=5
130
+ )
131
+ json_str = object_to_json(pruner)
132
+
133
+ parsed = json.loads(json_str)
134
+ assert parsed["cls"] == "ThresholdPruner"
135
+ assert parsed["kwargs"]["lower"] == 0.1
136
+ assert parsed["kwargs"]["upper"] == 0.9
137
+
138
+ restored = from_json(json_str, PRUNER_WHITELIST)
139
+ assert type(restored).__name__ == "ThresholdPruner"
140
+ assert restored._lower == 0.1
141
+ assert restored._upper == 0.9
142
+
143
+
144
+ def test_successive_halving_pruner():
145
+ """SuccessiveHalvingPruner 테스트"""
146
+ pruner = optuna.pruners.SuccessiveHalvingPruner(
147
+ min_resource=1,
148
+ reduction_factor=4,
149
+ min_early_stopping_rate=0
150
+ )
151
+ json_str = object_to_json(pruner)
152
+
153
+ parsed = json.loads(json_str)
154
+ assert parsed["cls"] == "SuccessiveHalvingPruner"
155
+
156
+ restored = from_json(json_str, PRUNER_WHITELIST)
157
+ assert type(restored).__name__ == "SuccessiveHalvingPruner"
158
+
159
+
160
+ def test_hyperband_pruner():
161
+ """HyperbandPruner 테스트"""
162
+ pruner = optuna.pruners.HyperbandPruner(
163
+ min_resource=1,
164
+ max_resource=100,
165
+ reduction_factor=3
166
+ )
167
+ json_str = object_to_json(pruner)
168
+
169
+ parsed = json.loads(json_str)
170
+ assert parsed["cls"] == "HyperbandPruner"
171
+ assert parsed["kwargs"]["min_resource"] == 1
172
+ assert parsed["kwargs"]["max_resource"] == 100
173
+ assert parsed["kwargs"]["reduction_factor"] == 3
174
+
175
+ restored = from_json(json_str, PRUNER_WHITELIST)
176
+ assert type(restored).__name__ == "HyperbandPruner"
177
+
178
+
179
+ if __name__ == "__main__":
180
+ # 각 테스트 실행
181
+ test_nop_pruner()
182
+ print("✅ NopPruner test passed")
183
+
184
+ test_median_pruner()
185
+ print("✅ MedianPruner test passed")
186
+
187
+ test_percentile_pruner()
188
+ print("✅ PercentilePruner test passed")
189
+
190
+ test_patient_pruner_with_wrapped()
191
+ print("✅ PatientPruner with wrapped test passed")
192
+
193
+ test_patient_pruner_without_wrapped()
194
+ print("✅ PatientPruner without wrapped test passed")
195
+
196
+ test_threshold_pruner()
197
+ print("✅ ThresholdPruner test passed")
198
+
199
+ test_successive_halving_pruner()
200
+ print("✅ SuccessiveHalvingPruner test passed")
201
+
202
+ test_hyperband_pruner()
203
+ print("✅ HyperbandPruner test passed")
204
+
205
+ print("\n✅ All Pruner tests passed!")
@@ -0,0 +1,226 @@
1
+ #!/usr/bin/env python
2
+ """
3
+ Optuna Sampler 직렬화/역직렬화 테스트
4
+
5
+ 주요 Optuna Sampler가 올바르게 직렬화되고 복원되는지 검증합니다.
6
+ Callable 타입 파라미터(gamma, weights 등)는 직렬화에서 제외됨을 확인합니다.
7
+ """
8
+
9
+ import sys
10
+ import os
11
+ import json
12
+ import optuna
13
+ import pytest
14
+
15
+ # Add parent directory to path for imports
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
17
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../runners'))
18
+
19
+ from aiauto.serializer import object_to_json
20
+ from runner_create_study import from_json, SAMPLER_WHITELIST
21
+
22
+
23
+ def test_random_sampler():
24
+ """RandomSampler 테스트 - seed 파라미터"""
25
+ sampler = optuna.samplers.RandomSampler(seed=42)
26
+ json_str = object_to_json(sampler)
27
+
28
+ parsed = json.loads(json_str)
29
+ assert parsed["cls"] == "RandomSampler"
30
+ # seed는 저장되지만 다른 형태로 저장될 수 있음
31
+ if "seed" in parsed["kwargs"]:
32
+ assert parsed["kwargs"]["seed"] == 42
33
+
34
+ restored = from_json(json_str, SAMPLER_WHITELIST)
35
+ assert type(restored).__name__ == "RandomSampler"
36
+
37
+
38
+ def test_tpe_sampler():
39
+ """TPESampler 테스트 - 주요 파라미터"""
40
+ sampler = optuna.samplers.TPESampler(
41
+ n_startup_trials=5,
42
+ n_ei_candidates=10,
43
+ seed=42,
44
+ multivariate=True,
45
+ warn_independent_sampling=False
46
+ )
47
+ json_str = object_to_json(sampler)
48
+
49
+ parsed = json.loads(json_str)
50
+ assert parsed["cls"] == "TPESampler"
51
+ assert parsed["kwargs"]["n_startup_trials"] == 5
52
+ assert parsed["kwargs"]["n_ei_candidates"] == 10
53
+ assert parsed["kwargs"]["multivariate"] == True
54
+ assert parsed["kwargs"]["warn_independent_sampling"] == False
55
+ if "seed" in parsed["kwargs"]:
56
+ assert parsed["kwargs"]["seed"] == 42
57
+
58
+ # Callable 파라미터들(gamma, weights)은 제외되어야 함
59
+ assert "gamma" not in parsed["kwargs"]
60
+ assert "weights" not in parsed["kwargs"]
61
+
62
+ restored = from_json(json_str, SAMPLER_WHITELIST)
63
+ assert type(restored).__name__ == "TPESampler"
64
+ assert restored._n_startup_trials == 5
65
+ assert restored._n_ei_candidates == 10
66
+ if hasattr(restored, '_seed'):
67
+ assert restored._seed == 42
68
+
69
+
70
+ def test_tpe_sampler_default():
71
+ """TPESampler 테스트 - 기본값"""
72
+ sampler = optuna.samplers.TPESampler()
73
+ json_str = object_to_json(sampler)
74
+
75
+ parsed = json.loads(json_str)
76
+ assert parsed["cls"] == "TPESampler"
77
+
78
+ # 기본값들이 올바르게 직렬화되는지 확인
79
+ assert "n_startup_trials" in parsed["kwargs"]
80
+ assert "n_ei_candidates" in parsed["kwargs"]
81
+
82
+ restored = from_json(json_str, SAMPLER_WHITELIST)
83
+ assert type(restored).__name__ == "TPESampler"
84
+
85
+
86
+ def test_nsgaii_sampler():
87
+ """NSGAIISampler 테스트 - 다목적 최적화용"""
88
+ sampler = optuna.samplers.NSGAIISampler(
89
+ population_size=10,
90
+ mutation_prob=0.1,
91
+ crossover_prob=0.8,
92
+ swapping_prob=0.4,
93
+ seed=42
94
+ )
95
+ json_str = object_to_json(sampler)
96
+
97
+ parsed = json.loads(json_str)
98
+ assert parsed["cls"] == "NSGAIISampler"
99
+ assert parsed["kwargs"]["population_size"] == 10
100
+ if "mutation_prob" in parsed["kwargs"]:
101
+ assert parsed["kwargs"]["mutation_prob"] == 0.1
102
+ if "crossover_prob" in parsed["kwargs"]:
103
+ assert parsed["kwargs"]["crossover_prob"] == 0.8
104
+ if "swapping_prob" in parsed["kwargs"]:
105
+ assert parsed["kwargs"]["swapping_prob"] == 0.4
106
+ if "seed" in parsed["kwargs"]:
107
+ assert parsed["kwargs"]["seed"] == 42
108
+
109
+ # Callable 파라미터들은 제외되거나 null이어야 함
110
+ assert parsed["kwargs"].get("constraints_func") is None
111
+ assert "elite_population_selection_strategy" not in parsed["kwargs"]
112
+
113
+ restored = from_json(json_str, SAMPLER_WHITELIST)
114
+ assert type(restored).__name__ == "NSGAIISampler"
115
+ assert restored._population_size == 10
116
+ if hasattr(restored, '_seed'):
117
+ assert restored._seed == 42
118
+
119
+
120
+ def test_grid_sampler():
121
+ """GridSampler 테스트 - search_space 파라미터"""
122
+ search_space = {
123
+ "x": [-10, -5, 0, 5, 10],
124
+ "y": [1, 2, 3]
125
+ }
126
+ sampler = optuna.samplers.GridSampler(search_space=search_space)
127
+ json_str = object_to_json(sampler)
128
+
129
+ parsed = json.loads(json_str)
130
+ assert parsed["cls"] == "GridSampler"
131
+ assert parsed["kwargs"]["search_space"] == search_space
132
+
133
+ restored = from_json(json_str, SAMPLER_WHITELIST)
134
+ assert type(restored).__name__ == "GridSampler"
135
+ assert restored._search_space == search_space
136
+
137
+
138
+ def test_bruteforce_sampler():
139
+ """BruteForceSampler 테스트"""
140
+ sampler = optuna.samplers.BruteForceSampler(seed=42)
141
+ json_str = object_to_json(sampler)
142
+
143
+ parsed = json.loads(json_str)
144
+ assert parsed["cls"] == "BruteForceSampler"
145
+ if "seed" in parsed["kwargs"]:
146
+ assert parsed["kwargs"]["seed"] == 42
147
+
148
+ restored = from_json(json_str, SAMPLER_WHITELIST)
149
+ assert type(restored).__name__ == "BruteForceSampler"
150
+ if hasattr(restored, '_seed'):
151
+ assert restored._seed == 42
152
+
153
+
154
+ def test_cmaes_sampler():
155
+ """CmaEsSampler 테스트"""
156
+ sampler = optuna.samplers.CmaEsSampler(
157
+ n_startup_trials=5,
158
+ seed=42
159
+ )
160
+ json_str = object_to_json(sampler)
161
+
162
+ parsed = json.loads(json_str)
163
+ assert parsed["cls"] == "CmaEsSampler"
164
+ assert parsed["kwargs"]["n_startup_trials"] == 5
165
+ if "seed" in parsed["kwargs"]:
166
+ assert parsed["kwargs"]["seed"] == 42
167
+ # restart_strategy는 deprecated되어 저장되지 않음
168
+
169
+ restored = from_json(json_str, SAMPLER_WHITELIST)
170
+ assert type(restored).__name__ == "CmaEsSampler"
171
+ assert restored._n_startup_trials == 5
172
+ if hasattr(restored, '_seed'):
173
+ assert restored._seed == 42
174
+
175
+
176
+ def test_qmc_sampler():
177
+ """QMCSampler 테스트"""
178
+ sampler = optuna.samplers.QMCSampler(
179
+ qmc_type="sobol",
180
+ scramble=True,
181
+ seed=42
182
+ )
183
+ json_str = object_to_json(sampler)
184
+
185
+ parsed = json.loads(json_str)
186
+ assert parsed["cls"] == "QMCSampler"
187
+ assert parsed["kwargs"]["qmc_type"] == "sobol"
188
+ assert parsed["kwargs"]["scramble"] == True
189
+ if "seed" in parsed["kwargs"]:
190
+ assert parsed["kwargs"]["seed"] == 42
191
+
192
+ restored = from_json(json_str, SAMPLER_WHITELIST)
193
+ assert type(restored).__name__ == "QMCSampler"
194
+ assert restored._qmc_type == "sobol"
195
+ assert restored._scramble == True
196
+ if hasattr(restored, '_seed'):
197
+ assert restored._seed == 42
198
+
199
+
200
+ if __name__ == "__main__":
201
+ # 각 테스트 실행
202
+ test_random_sampler()
203
+ print("✅ RandomSampler test passed")
204
+
205
+ test_tpe_sampler()
206
+ print("✅ TPESampler test passed")
207
+
208
+ test_tpe_sampler_default()
209
+ print("✅ TPESampler (default) test passed")
210
+
211
+ test_nsgaii_sampler()
212
+ print("✅ NSGAIISampler test passed")
213
+
214
+ test_grid_sampler()
215
+ print("✅ GridSampler test passed")
216
+
217
+ test_bruteforce_sampler()
218
+ print("✅ BruteForceSampler test passed")
219
+
220
+ test_cmaes_sampler()
221
+ print("✅ CmaEsSampler test passed")
222
+
223
+ test_qmc_sampler()
224
+ print("✅ QMCSampler test passed")
225
+
226
+ print("\n✅ All Sampler tests passed!")
@@ -1,59 +0,0 @@
1
- import aiauto
2
- import optuna
3
- from unittest.mock import patch
4
-
5
-
6
- def objective(trial):
7
- """간단한 이차함수 최적화 예제"""
8
- # TrialController를 사용한 로깅
9
- tc = aiauto.TrialController(trial)
10
- tc.log("Starting simple optimization example")
11
-
12
- # 하이퍼파라미터 샘플링
13
- x = trial.suggest_float('x', -10, 10)
14
- y = trial.suggest_float('y', -10, 10)
15
-
16
- # 목적함수: (x-2)² + (y-5)² 최소화
17
- result = (x - 2) ** 2 + (y - 5) ** 2
18
-
19
- tc.log(f"x={x:.3f}, y={y:.3f}, result={result:.3f}")
20
-
21
- return result
22
-
23
-
24
- def main():
25
- print("🚀 AIAuto 소스코드 직렬화 로컬 테스트")
26
-
27
- # AIAutoController의 storage를 InMemoryStorage로 패치
28
- with patch.object(aiauto.AIAutoController, '__init__', lambda self: None):
29
- controller = aiauto.AIAutoController()
30
- # 로컬 테스트용 InMemoryStorage 설정
31
- controller.storage = optuna.storages.InMemoryStorage()
32
- controller.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
33
-
34
- # 소스코드 직렬화 테스트
35
- print("\n=== 소스코드 직렬화 테스트 ===")
36
- study_wrapper = controller.create_study(
37
- objective=objective,
38
- study_name='local_test',
39
- direction='minimize'
40
- )
41
-
42
- print("✅ StudyWrapper 생성 성공!")
43
-
44
- # 최적화 실행
45
- print("\n=== 최적화 실행 ===")
46
- study_wrapper.optimize(n_trials=10)
47
-
48
- # 결과 출력
49
- print(f"\n🎉 최적화 완료!")
50
- print(f"📊 Best value: {study_wrapper.best_value:.3f}")
51
- print(f"🔧 Best params: {study_wrapper.best_params}")
52
-
53
- # 이론적 최적해: x=2, y=5, result=0
54
- print(f"💡 이론적 최적해: x=2, y=5, result=0")
55
- print(f"📈 오차: {study_wrapper.best_value:.3f}")
56
-
57
-
58
- if __name__ == "__main__":
59
- main()
File without changes