aiauto-client 0.1.13__tar.gz → 0.1.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/PKG-INFO +31 -17
  2. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/README.md +30 -16
  3. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/examples/example_torch_multiple_objective.py +12 -4
  4. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/examples/example_torch_single_objective.py +10 -2
  5. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/pyproject.toml +1 -1
  6. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto/core.py +15 -2
  7. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto/serializer.py +20 -9
  8. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto_client.egg-info/PKG-INFO +31 -17
  9. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto_client.egg-info/SOURCES.txt +2 -1
  10. aiauto_client-0.1.15/tests/test_pruners.py +205 -0
  11. aiauto_client-0.1.15/tests/test_samplers.py +226 -0
  12. aiauto_client-0.1.13/tests/test_local_storage.py +0 -59
  13. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/MANIFEST.in +0 -0
  14. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/examples/simple_example.py +0 -0
  15. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/setup.cfg +0 -0
  16. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto/__init__.py +0 -0
  17. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto/_config.py +0 -0
  18. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto/constants.py +0 -0
  19. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto/http_client.py +0 -0
  20. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto_client.egg-info/dependency_links.txt +0 -0
  21. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto_client.egg-info/requires.txt +0 -0
  22. {aiauto_client-0.1.13 → aiauto_client-0.1.15}/src/aiauto_client.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.13
3
+ Version: 0.1.15
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
@@ -315,7 +315,12 @@ study_wrapper.optimize(
315
315
  n_trials=100,
316
316
  parallelism=4,
317
317
  use_gpu=True, # GPU 사용
318
- requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
318
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
319
+ # requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치 # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
320
+ resources_requests={
321
+ "cpu": "2",
322
+ "memory": "4Gi",
323
+ },
319
324
  )
320
325
  time.sleep(5)
321
326
  ```
@@ -446,7 +451,16 @@ study_wrapper.optimize(
446
451
  n_trials=100,
447
452
  parallelism=4,
448
453
  use_gpu=True, # GPU 사용
449
- requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
454
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
455
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
456
+ # 'torch',
457
+ # 'torchvision',
458
+ 'fvcore',
459
+ ], # Pod에서 자동 설치
460
+ resources_requests={
461
+ "cpu": "2",
462
+ "memory": "4Gi",
463
+ },
450
464
  )
451
465
  time.sleep(5)
452
466
  ```
@@ -545,15 +559,15 @@ study = controller.create_study('exp1', direction='minimize')
545
559
  time.sleep(5)
546
560
 
547
561
  def objective(trial):
548
- import numpy as np
549
- x = trial.suggest_float('x', -10, 10)
550
- return (x - 1.23) ** 2
562
+ import numpy as np
563
+ x = trial.suggest_float('x', -10, 10)
564
+ return (x - 1.23) ** 2
551
565
 
552
566
  study.optimize(
553
- objective,
554
- n_trials=64,
555
- parallelism=8,
556
- requirements_list=['numpy'],
567
+ objective,
568
+ n_trials=64,
569
+ parallelism=8,
570
+ requirements_list=['numpy'],
557
571
  )
558
572
  time.sleep(5)
559
573
  ```
@@ -600,17 +614,17 @@ import optuna, aiauto, time
600
614
 
601
615
  controller = aiauto.AIAutoController('aiauto_xxx')
602
616
  study = controller.create_study(
603
- study_name='cnn',
604
- direction='minimize',
605
- sampler=optuna.samplers.TPESampler(seed=42),
606
- pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
617
+ study_name='cnn',
618
+ direction='minimize',
619
+ sampler=optuna.samplers.TPESampler(seed=42),
620
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
607
621
  )
608
622
  time.sleep(5)
609
623
 
610
624
  def objective(trial):
611
- import numpy as np
612
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
613
- return (np.log10(lr) + 2) ** 2
625
+ import numpy as np
626
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
627
+ return (np.log10(lr) + 2) ** 2
614
628
 
615
629
  study.optimize(objective, n_trials=50, parallelism=4)
616
630
  time.sleep(5)
@@ -288,7 +288,12 @@ study_wrapper.optimize(
288
288
  n_trials=100,
289
289
  parallelism=4,
290
290
  use_gpu=True, # GPU 사용
291
- requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
291
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
292
+ # requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치 # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
293
+ resources_requests={
294
+ "cpu": "2",
295
+ "memory": "4Gi",
296
+ },
292
297
  )
293
298
  time.sleep(5)
294
299
  ```
@@ -419,7 +424,16 @@ study_wrapper.optimize(
419
424
  n_trials=100,
420
425
  parallelism=4,
421
426
  use_gpu=True, # GPU 사용
422
- requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
427
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
428
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
429
+ # 'torch',
430
+ # 'torchvision',
431
+ 'fvcore',
432
+ ], # Pod에서 자동 설치
433
+ resources_requests={
434
+ "cpu": "2",
435
+ "memory": "4Gi",
436
+ },
423
437
  )
424
438
  time.sleep(5)
425
439
  ```
@@ -518,15 +532,15 @@ study = controller.create_study('exp1', direction='minimize')
518
532
  time.sleep(5)
519
533
 
520
534
  def objective(trial):
521
- import numpy as np
522
- x = trial.suggest_float('x', -10, 10)
523
- return (x - 1.23) ** 2
535
+ import numpy as np
536
+ x = trial.suggest_float('x', -10, 10)
537
+ return (x - 1.23) ** 2
524
538
 
525
539
  study.optimize(
526
- objective,
527
- n_trials=64,
528
- parallelism=8,
529
- requirements_list=['numpy'],
540
+ objective,
541
+ n_trials=64,
542
+ parallelism=8,
543
+ requirements_list=['numpy'],
530
544
  )
531
545
  time.sleep(5)
532
546
  ```
@@ -573,17 +587,17 @@ import optuna, aiauto, time
573
587
 
574
588
  controller = aiauto.AIAutoController('aiauto_xxx')
575
589
  study = controller.create_study(
576
- study_name='cnn',
577
- direction='minimize',
578
- sampler=optuna.samplers.TPESampler(seed=42),
579
- pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
590
+ study_name='cnn',
591
+ direction='minimize',
592
+ sampler=optuna.samplers.TPESampler(seed=42),
593
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
580
594
  )
581
595
  time.sleep(5)
582
596
 
583
597
  def objective(trial):
584
- import numpy as np
585
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
586
- return (np.log10(lr) + 2) ** 2
598
+ import numpy as np
599
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
600
+ return (np.log10(lr) + 2) ** 2
587
601
 
588
602
  study.optimize(objective, n_trials=50, parallelism=4)
589
603
  time.sleep(5)
@@ -440,14 +440,18 @@ if __name__ == '__main__':
440
440
  n_trials=100,
441
441
  parallelism=4, # n_jobs 대신 parallelism 사용
442
442
  use_gpu=True, # GPU 사용
443
- # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
443
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
444
444
  # requirements.txt 대신 리스트로 전달 (외부 서버에서 설치)
445
- requirements_list=[
446
- 'torch',
447
- 'torchvision',
445
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
446
+ # 'torch',
447
+ # 'torchvision',
448
448
  'fvcore', # FLOPS 계산용 (multi-objective 사용 시)
449
449
  ],
450
450
  # CallbackTopNArtifact은 클라이언트 측에서는 사용 불가 (runner 에서 자동으로 지정 됨)
451
+ resources_requests={
452
+ "cpu": "2",
453
+ "memory": "4Gi",
454
+ },
451
455
  )
452
456
  time.sleep(5)
453
457
 
@@ -470,4 +474,8 @@ if __name__ == '__main__':
470
474
  use_gpu=True,
471
475
  # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
472
476
  requirements_list=['torch', 'torchvision', 'fvcore'],
477
+ resources_requests={
478
+ "cpu": "2",
479
+ "memory": "4Gi",
480
+ },
473
481
  )
@@ -447,10 +447,14 @@ if __name__ == '__main__':
447
447
  n_trials=100,
448
448
  parallelism=4, # n_jobs 대신 parallelism 사용
449
449
  use_gpu=True, # GPU 사용
450
- # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
450
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
451
451
  # objective 함수 안에서 import 하는거 requirements.txt 대신 리스트로 전달 (pod 안에서 설치)
452
- requirements_list=['torch', 'torchvision'],
452
+ # requirements_list=['torch', 'torchvision'], # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
453
453
  # CallbackTopNArtifact은 클라이언트 측에서는 사용 불가 (runner 에서 자동으로 지정 됨)
454
+ resources_requests={
455
+ "cpu": "2",
456
+ "memory": "4Gi",
457
+ },
454
458
  )
455
459
  time.sleep(5)
456
460
 
@@ -473,4 +477,8 @@ if __name__ == '__main__':
473
477
  use_gpu=True,
474
478
  # runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime", # default image for use_gpu True
475
479
  requirements_list=['torch', 'torchvision'],
480
+ resources_requests={
481
+ "cpu": "2",
482
+ "memory": "4Gi",
483
+ },
476
484
  )
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "aiauto-client"
7
- version = "0.1.13"
7
+ version = "0.1.15"
8
8
  description = "AI Auto HPO (Hyperparameter Optimization) Client Library"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -188,11 +188,24 @@ class StudyWrapper:
188
188
  parallelism: int = 2,
189
189
  requirements_file: Optional[str] = None,
190
190
  requirements_list: Optional[List[str]] = None,
191
- resources_requests: Optional[Dict[str, str]] = {"cpu": "256m", "memory": "256Mi"},
192
- resources_limits: Optional[Dict[str, str]] = {"cpu": "256m", "memory": "256Mi"},
191
+ resources_requests: Optional[Dict[str, str]] = None,
192
+ resources_limits: Optional[Dict[str, str]] = None,
193
193
  runtime_image: Optional[str] = 'ghcr.io/astral-sh/uv:python3.8-bookworm-slim',
194
194
  use_gpu: bool = False
195
195
  ) -> None:
196
+ # 리소스 기본값 설정
197
+ if resources_requests is None:
198
+ if use_gpu:
199
+ resources_requests = {"cpu": "2", "memory": "4Gi"}
200
+ else:
201
+ resources_requests = {"cpu": "1", "memory": "1Gi"}
202
+
203
+ if resources_limits is None:
204
+ if use_gpu:
205
+ resources_limits = {"cpu": "2", "memory": "4Gi"}
206
+ else:
207
+ resources_limits = {"cpu": "1", "memory": "1Gi"}
208
+
196
209
  if runtime_image is None or runtime_image == "":
197
210
  if use_gpu:
198
211
  runtime_image = "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime"
@@ -47,16 +47,27 @@ def object_to_json(obj: Union[object, dict, None]) -> str:
47
47
  if not module_name.startswith('optuna.'):
48
48
  raise ValueError(f"optuna 코어 클래스만 지원합니다: {class_name}")
49
49
 
50
- sig = inspect.signature(cls)
50
+ # __init__의 실제 파라미터만 가져오기
51
+ sig = inspect.signature(cls.__init__)
52
+ valid_params = set(sig.parameters.keys()) - {'self'}
53
+
54
+ # Optuna 객체들은 __dict__에 _param_name 형태로 저장
51
55
  kwargs = {}
52
-
53
- for param_name, param in sig.parameters.items():
54
- if param_name == 'self':
55
- continue
56
- if hasattr(obj, param_name):
57
- value = getattr(obj, param_name)
58
- if param.default != value:
59
- kwargs[param_name] = value
56
+ for key, value in obj.__dict__.items():
57
+ if key.startswith('_'):
58
+ param_name = key[1:] # _ 제거
59
+
60
+ # __init__의 실제 파라미터인지 확인
61
+ if param_name in valid_params:
62
+ # PatientPruner의 wrapped_pruner 특별 처리
63
+ if class_name == "PatientPruner" and param_name == "wrapped_pruner" and value is not None:
64
+ kwargs[param_name] = json.loads(object_to_json(value))
65
+ # CmaEsSampler와 QMCSampler의 independent_sampler 특별 처리
66
+ elif param_name == "independent_sampler" and value is not None and class_name in ["CmaEsSampler", "QMCSampler"]:
67
+ kwargs[param_name] = json.loads(object_to_json(value))
68
+ # Callable 타입은 제외 (gamma, weights 등)
69
+ elif not callable(value):
70
+ kwargs[param_name] = value
60
71
 
61
72
  return json.dumps({
62
73
  "cls": class_name,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aiauto-client
3
- Version: 0.1.13
3
+ Version: 0.1.15
4
4
  Summary: AI Auto HPO (Hyperparameter Optimization) Client Library
5
5
  Author-email: AIAuto Team <ainode@zeroone.ai>
6
6
  Project-URL: Homepage, https://dashboard.common.aiauto.pangyo.ainode.ai
@@ -315,7 +315,12 @@ study_wrapper.optimize(
315
315
  n_trials=100,
316
316
  parallelism=4,
317
317
  use_gpu=True, # GPU 사용
318
- requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치
318
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
319
+ # requirements_list=['torch', 'torchvision'] # Pod에서 자동 설치 # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
320
+ resources_requests={
321
+ "cpu": "2",
322
+ "memory": "4Gi",
323
+ },
319
324
  )
320
325
  time.sleep(5)
321
326
  ```
@@ -446,7 +451,16 @@ study_wrapper.optimize(
446
451
  n_trials=100,
447
452
  parallelism=4,
448
453
  use_gpu=True, # GPU 사용
449
- requirements_list=['torch', 'torchvision', 'fvcore'] # Pod에서 자동 설치
454
+ runtime_image='pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime', # default image for use_gpu True
455
+ requirements_list=[ # pip list 명시는 다운로드 받는데 느림, runtime_image 를 torch 로 명시하는게 나음
456
+ # 'torch',
457
+ # 'torchvision',
458
+ 'fvcore',
459
+ ], # Pod에서 자동 설치
460
+ resources_requests={
461
+ "cpu": "2",
462
+ "memory": "4Gi",
463
+ },
450
464
  )
451
465
  time.sleep(5)
452
466
  ```
@@ -545,15 +559,15 @@ study = controller.create_study('exp1', direction='minimize')
545
559
  time.sleep(5)
546
560
 
547
561
  def objective(trial):
548
- import numpy as np
549
- x = trial.suggest_float('x', -10, 10)
550
- return (x - 1.23) ** 2
562
+ import numpy as np
563
+ x = trial.suggest_float('x', -10, 10)
564
+ return (x - 1.23) ** 2
551
565
 
552
566
  study.optimize(
553
- objective,
554
- n_trials=64,
555
- parallelism=8,
556
- requirements_list=['numpy'],
567
+ objective,
568
+ n_trials=64,
569
+ parallelism=8,
570
+ requirements_list=['numpy'],
557
571
  )
558
572
  time.sleep(5)
559
573
  ```
@@ -600,17 +614,17 @@ import optuna, aiauto, time
600
614
 
601
615
  controller = aiauto.AIAutoController('aiauto_xxx')
602
616
  study = controller.create_study(
603
- study_name='cnn',
604
- direction='minimize',
605
- sampler=optuna.samplers.TPESampler(seed=42),
606
- pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
617
+ study_name='cnn',
618
+ direction='minimize',
619
+ sampler=optuna.samplers.TPESampler(seed=42),
620
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=5),
607
621
  )
608
622
  time.sleep(5)
609
623
 
610
624
  def objective(trial):
611
- import numpy as np
612
- lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
613
- return (np.log10(lr) + 2) ** 2
625
+ import numpy as np
626
+ lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)
627
+ return (np.log10(lr) + 2) ** 2
614
628
 
615
629
  study.optimize(objective, n_trials=50, parallelism=4)
616
630
  time.sleep(5)
@@ -15,4 +15,5 @@ src/aiauto_client.egg-info/SOURCES.txt
15
15
  src/aiauto_client.egg-info/dependency_links.txt
16
16
  src/aiauto_client.egg-info/requires.txt
17
17
  src/aiauto_client.egg-info/top_level.txt
18
- tests/test_local_storage.py
18
+ tests/test_pruners.py
19
+ tests/test_samplers.py
@@ -0,0 +1,205 @@
1
+ #!/usr/bin/env python
2
+ """
3
+ Optuna Pruner 직렬화/역직렬화 테스트
4
+
5
+ 모든 Optuna Pruner가 올바르게 직렬화되고 복원되는지 검증합니다.
6
+ 특히 PatientPruner의 wrapped_pruner 재귀 처리를 중점적으로 테스트합니다.
7
+ """
8
+
9
+ import sys
10
+ import os
11
+ import json
12
+ import optuna
13
+ import pytest
14
+
15
+ # Add parent directory to path for imports
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
17
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../runners'))
18
+
19
+ from aiauto.serializer import object_to_json
20
+ from runner_create_study import from_json, PRUNER_WHITELIST
21
+
22
+
23
+ def test_nop_pruner():
24
+ """NopPruner 테스트 - 파라미터 없음"""
25
+ pruner = optuna.pruners.NopPruner()
26
+ json_str = object_to_json(pruner)
27
+
28
+ # JSON 파싱 가능 여부 확인
29
+ parsed = json.loads(json_str)
30
+ assert parsed["cls"] == "NopPruner"
31
+ # NopPruner는 파라미터가 없으므로 args가 비어있거나 없을 수 있음
32
+
33
+ # 역직렬화
34
+ restored = from_json(json_str, PRUNER_WHITELIST)
35
+ assert type(restored).__name__ == "NopPruner"
36
+
37
+
38
+ def test_median_pruner():
39
+ """MedianPruner 테스트 - 기본 파라미터"""
40
+ pruner = optuna.pruners.MedianPruner(
41
+ n_startup_trials=5,
42
+ n_warmup_steps=10,
43
+ interval_steps=2
44
+ )
45
+ json_str = object_to_json(pruner)
46
+
47
+ parsed = json.loads(json_str)
48
+ assert parsed["cls"] == "MedianPruner"
49
+ assert parsed["kwargs"]["n_startup_trials"] == 5
50
+ assert parsed["kwargs"]["n_warmup_steps"] == 10
51
+ assert parsed["kwargs"]["interval_steps"] == 2
52
+
53
+ restored = from_json(json_str, PRUNER_WHITELIST)
54
+ assert type(restored).__name__ == "MedianPruner"
55
+ assert restored._n_startup_trials == 5
56
+ assert restored._n_warmup_steps == 10
57
+ assert restored._interval_steps == 2
58
+
59
+
60
+ def test_percentile_pruner():
61
+ """PercentilePruner 테스트 - percentile 필수 파라미터"""
62
+ pruner = optuna.pruners.PercentilePruner(
63
+ percentile=25.0,
64
+ n_startup_trials=3
65
+ )
66
+ json_str = object_to_json(pruner)
67
+
68
+ parsed = json.loads(json_str)
69
+ assert parsed["cls"] == "PercentilePruner"
70
+ assert parsed["kwargs"]["percentile"] == 25.0
71
+ assert parsed["kwargs"]["n_startup_trials"] == 3
72
+
73
+ restored = from_json(json_str, PRUNER_WHITELIST)
74
+ assert type(restored).__name__ == "PercentilePruner"
75
+ assert restored._percentile == 25.0
76
+
77
+
78
+ def test_patient_pruner_with_wrapped():
79
+ """PatientPruner 테스트 - wrapped_pruner 재귀 처리"""
80
+ wrapped = optuna.pruners.MedianPruner(n_startup_trials=10)
81
+ pruner = optuna.pruners.PatientPruner(
82
+ wrapped_pruner=wrapped,
83
+ patience=3,
84
+ min_delta=0.1
85
+ )
86
+ json_str = object_to_json(pruner)
87
+
88
+ parsed = json.loads(json_str)
89
+ assert parsed["cls"] == "PatientPruner"
90
+ assert parsed["kwargs"]["patience"] == 3
91
+ assert parsed["kwargs"]["min_delta"] == 0.1
92
+
93
+ # wrapped_pruner가 재귀적으로 직렬화되었는지 확인
94
+ assert "wrapped_pruner" in parsed["kwargs"]
95
+ assert parsed["kwargs"]["wrapped_pruner"]["cls"] == "MedianPruner"
96
+ assert parsed["kwargs"]["wrapped_pruner"]["kwargs"]["n_startup_trials"] == 10
97
+
98
+ restored = from_json(json_str, PRUNER_WHITELIST)
99
+ assert type(restored).__name__ == "PatientPruner"
100
+ assert restored._patience == 3
101
+ assert restored._min_delta == 0.1
102
+ assert type(restored._wrapped_pruner).__name__ == "MedianPruner"
103
+ assert restored._wrapped_pruner._n_startup_trials == 10
104
+
105
+
106
+ def test_patient_pruner_without_wrapped():
107
+ """PatientPruner 테스트 - wrapped_pruner None으로"""
108
+ pruner = optuna.pruners.PatientPruner(wrapped_pruner=None, patience=5)
109
+ json_str = object_to_json(pruner)
110
+
111
+ parsed = json.loads(json_str)
112
+ assert parsed["cls"] == "PatientPruner"
113
+ assert parsed["kwargs"]["patience"] == 5
114
+ # wrapped_pruner가 None이면 직렬화에서 제외될 수 있음
115
+ if "wrapped_pruner" in parsed["kwargs"]:
116
+ assert parsed["kwargs"]["wrapped_pruner"] is None
117
+
118
+ restored = from_json(json_str, PRUNER_WHITELIST)
119
+ assert type(restored).__name__ == "PatientPruner"
120
+ assert restored._patience == 5
121
+ assert restored._wrapped_pruner is None
122
+
123
+
124
+ def test_threshold_pruner():
125
+ """ThresholdPruner 테스트"""
126
+ pruner = optuna.pruners.ThresholdPruner(
127
+ lower=0.1,
128
+ upper=0.9,
129
+ n_warmup_steps=5
130
+ )
131
+ json_str = object_to_json(pruner)
132
+
133
+ parsed = json.loads(json_str)
134
+ assert parsed["cls"] == "ThresholdPruner"
135
+ assert parsed["kwargs"]["lower"] == 0.1
136
+ assert parsed["kwargs"]["upper"] == 0.9
137
+
138
+ restored = from_json(json_str, PRUNER_WHITELIST)
139
+ assert type(restored).__name__ == "ThresholdPruner"
140
+ assert restored._lower == 0.1
141
+ assert restored._upper == 0.9
142
+
143
+
144
+ def test_successive_halving_pruner():
145
+ """SuccessiveHalvingPruner 테스트"""
146
+ pruner = optuna.pruners.SuccessiveHalvingPruner(
147
+ min_resource=1,
148
+ reduction_factor=4,
149
+ min_early_stopping_rate=0
150
+ )
151
+ json_str = object_to_json(pruner)
152
+
153
+ parsed = json.loads(json_str)
154
+ assert parsed["cls"] == "SuccessiveHalvingPruner"
155
+
156
+ restored = from_json(json_str, PRUNER_WHITELIST)
157
+ assert type(restored).__name__ == "SuccessiveHalvingPruner"
158
+
159
+
160
+ def test_hyperband_pruner():
161
+ """HyperbandPruner 테스트"""
162
+ pruner = optuna.pruners.HyperbandPruner(
163
+ min_resource=1,
164
+ max_resource=100,
165
+ reduction_factor=3
166
+ )
167
+ json_str = object_to_json(pruner)
168
+
169
+ parsed = json.loads(json_str)
170
+ assert parsed["cls"] == "HyperbandPruner"
171
+ assert parsed["kwargs"]["min_resource"] == 1
172
+ assert parsed["kwargs"]["max_resource"] == 100
173
+ assert parsed["kwargs"]["reduction_factor"] == 3
174
+
175
+ restored = from_json(json_str, PRUNER_WHITELIST)
176
+ assert type(restored).__name__ == "HyperbandPruner"
177
+
178
+
179
+ if __name__ == "__main__":
180
+ # 각 테스트 실행
181
+ test_nop_pruner()
182
+ print("✅ NopPruner test passed")
183
+
184
+ test_median_pruner()
185
+ print("✅ MedianPruner test passed")
186
+
187
+ test_percentile_pruner()
188
+ print("✅ PercentilePruner test passed")
189
+
190
+ test_patient_pruner_with_wrapped()
191
+ print("✅ PatientPruner with wrapped test passed")
192
+
193
+ test_patient_pruner_without_wrapped()
194
+ print("✅ PatientPruner without wrapped test passed")
195
+
196
+ test_threshold_pruner()
197
+ print("✅ ThresholdPruner test passed")
198
+
199
+ test_successive_halving_pruner()
200
+ print("✅ SuccessiveHalvingPruner test passed")
201
+
202
+ test_hyperband_pruner()
203
+ print("✅ HyperbandPruner test passed")
204
+
205
+ print("\n✅ All Pruner tests passed!")
@@ -0,0 +1,226 @@
1
+ #!/usr/bin/env python
2
+ """
3
+ Optuna Sampler 직렬화/역직렬화 테스트
4
+
5
+ 주요 Optuna Sampler가 올바르게 직렬화되고 복원되는지 검증합니다.
6
+ Callable 타입 파라미터(gamma, weights 등)는 직렬화에서 제외됨을 확인합니다.
7
+ """
8
+
9
+ import sys
10
+ import os
11
+ import json
12
+ import optuna
13
+ import pytest
14
+
15
+ # Add parent directory to path for imports
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
17
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../runners'))
18
+
19
+ from aiauto.serializer import object_to_json
20
+ from runner_create_study import from_json, SAMPLER_WHITELIST
21
+
22
+
23
+ def test_random_sampler():
24
+ """RandomSampler 테스트 - seed 파라미터"""
25
+ sampler = optuna.samplers.RandomSampler(seed=42)
26
+ json_str = object_to_json(sampler)
27
+
28
+ parsed = json.loads(json_str)
29
+ assert parsed["cls"] == "RandomSampler"
30
+ # seed는 저장되지만 다른 형태로 저장될 수 있음
31
+ if "seed" in parsed["kwargs"]:
32
+ assert parsed["kwargs"]["seed"] == 42
33
+
34
+ restored = from_json(json_str, SAMPLER_WHITELIST)
35
+ assert type(restored).__name__ == "RandomSampler"
36
+
37
+
38
+ def test_tpe_sampler():
39
+ """TPESampler 테스트 - 주요 파라미터"""
40
+ sampler = optuna.samplers.TPESampler(
41
+ n_startup_trials=5,
42
+ n_ei_candidates=10,
43
+ seed=42,
44
+ multivariate=True,
45
+ warn_independent_sampling=False
46
+ )
47
+ json_str = object_to_json(sampler)
48
+
49
+ parsed = json.loads(json_str)
50
+ assert parsed["cls"] == "TPESampler"
51
+ assert parsed["kwargs"]["n_startup_trials"] == 5
52
+ assert parsed["kwargs"]["n_ei_candidates"] == 10
53
+ assert parsed["kwargs"]["multivariate"] == True
54
+ assert parsed["kwargs"]["warn_independent_sampling"] == False
55
+ if "seed" in parsed["kwargs"]:
56
+ assert parsed["kwargs"]["seed"] == 42
57
+
58
+ # Callable 파라미터들(gamma, weights)은 제외되어야 함
59
+ assert "gamma" not in parsed["kwargs"]
60
+ assert "weights" not in parsed["kwargs"]
61
+
62
+ restored = from_json(json_str, SAMPLER_WHITELIST)
63
+ assert type(restored).__name__ == "TPESampler"
64
+ assert restored._n_startup_trials == 5
65
+ assert restored._n_ei_candidates == 10
66
+ if hasattr(restored, '_seed'):
67
+ assert restored._seed == 42
68
+
69
+
70
+ def test_tpe_sampler_default():
71
+ """TPESampler 테스트 - 기본값"""
72
+ sampler = optuna.samplers.TPESampler()
73
+ json_str = object_to_json(sampler)
74
+
75
+ parsed = json.loads(json_str)
76
+ assert parsed["cls"] == "TPESampler"
77
+
78
+ # 기본값들이 올바르게 직렬화되는지 확인
79
+ assert "n_startup_trials" in parsed["kwargs"]
80
+ assert "n_ei_candidates" in parsed["kwargs"]
81
+
82
+ restored = from_json(json_str, SAMPLER_WHITELIST)
83
+ assert type(restored).__name__ == "TPESampler"
84
+
85
+
86
+ def test_nsgaii_sampler():
87
+ """NSGAIISampler 테스트 - 다목적 최적화용"""
88
+ sampler = optuna.samplers.NSGAIISampler(
89
+ population_size=10,
90
+ mutation_prob=0.1,
91
+ crossover_prob=0.8,
92
+ swapping_prob=0.4,
93
+ seed=42
94
+ )
95
+ json_str = object_to_json(sampler)
96
+
97
+ parsed = json.loads(json_str)
98
+ assert parsed["cls"] == "NSGAIISampler"
99
+ assert parsed["kwargs"]["population_size"] == 10
100
+ if "mutation_prob" in parsed["kwargs"]:
101
+ assert parsed["kwargs"]["mutation_prob"] == 0.1
102
+ if "crossover_prob" in parsed["kwargs"]:
103
+ assert parsed["kwargs"]["crossover_prob"] == 0.8
104
+ if "swapping_prob" in parsed["kwargs"]:
105
+ assert parsed["kwargs"]["swapping_prob"] == 0.4
106
+ if "seed" in parsed["kwargs"]:
107
+ assert parsed["kwargs"]["seed"] == 42
108
+
109
+ # Callable 파라미터들은 제외되거나 null이어야 함
110
+ assert parsed["kwargs"].get("constraints_func") is None
111
+ assert "elite_population_selection_strategy" not in parsed["kwargs"]
112
+
113
+ restored = from_json(json_str, SAMPLER_WHITELIST)
114
+ assert type(restored).__name__ == "NSGAIISampler"
115
+ assert restored._population_size == 10
116
+ if hasattr(restored, '_seed'):
117
+ assert restored._seed == 42
118
+
119
+
120
+ def test_grid_sampler():
121
+ """GridSampler 테스트 - search_space 파라미터"""
122
+ search_space = {
123
+ "x": [-10, -5, 0, 5, 10],
124
+ "y": [1, 2, 3]
125
+ }
126
+ sampler = optuna.samplers.GridSampler(search_space=search_space)
127
+ json_str = object_to_json(sampler)
128
+
129
+ parsed = json.loads(json_str)
130
+ assert parsed["cls"] == "GridSampler"
131
+ assert parsed["kwargs"]["search_space"] == search_space
132
+
133
+ restored = from_json(json_str, SAMPLER_WHITELIST)
134
+ assert type(restored).__name__ == "GridSampler"
135
+ assert restored._search_space == search_space
136
+
137
+
138
+ def test_bruteforce_sampler():
139
+ """BruteForceSampler 테스트"""
140
+ sampler = optuna.samplers.BruteForceSampler(seed=42)
141
+ json_str = object_to_json(sampler)
142
+
143
+ parsed = json.loads(json_str)
144
+ assert parsed["cls"] == "BruteForceSampler"
145
+ if "seed" in parsed["kwargs"]:
146
+ assert parsed["kwargs"]["seed"] == 42
147
+
148
+ restored = from_json(json_str, SAMPLER_WHITELIST)
149
+ assert type(restored).__name__ == "BruteForceSampler"
150
+ if hasattr(restored, '_seed'):
151
+ assert restored._seed == 42
152
+
153
+
154
+ def test_cmaes_sampler():
155
+ """CmaEsSampler 테스트"""
156
+ sampler = optuna.samplers.CmaEsSampler(
157
+ n_startup_trials=5,
158
+ seed=42
159
+ )
160
+ json_str = object_to_json(sampler)
161
+
162
+ parsed = json.loads(json_str)
163
+ assert parsed["cls"] == "CmaEsSampler"
164
+ assert parsed["kwargs"]["n_startup_trials"] == 5
165
+ if "seed" in parsed["kwargs"]:
166
+ assert parsed["kwargs"]["seed"] == 42
167
+ # restart_strategy는 deprecated되어 저장되지 않음
168
+
169
+ restored = from_json(json_str, SAMPLER_WHITELIST)
170
+ assert type(restored).__name__ == "CmaEsSampler"
171
+ assert restored._n_startup_trials == 5
172
+ if hasattr(restored, '_seed'):
173
+ assert restored._seed == 42
174
+
175
+
176
+ def test_qmc_sampler():
177
+ """QMCSampler 테스트"""
178
+ sampler = optuna.samplers.QMCSampler(
179
+ qmc_type="sobol",
180
+ scramble=True,
181
+ seed=42
182
+ )
183
+ json_str = object_to_json(sampler)
184
+
185
+ parsed = json.loads(json_str)
186
+ assert parsed["cls"] == "QMCSampler"
187
+ assert parsed["kwargs"]["qmc_type"] == "sobol"
188
+ assert parsed["kwargs"]["scramble"] == True
189
+ if "seed" in parsed["kwargs"]:
190
+ assert parsed["kwargs"]["seed"] == 42
191
+
192
+ restored = from_json(json_str, SAMPLER_WHITELIST)
193
+ assert type(restored).__name__ == "QMCSampler"
194
+ assert restored._qmc_type == "sobol"
195
+ assert restored._scramble == True
196
+ if hasattr(restored, '_seed'):
197
+ assert restored._seed == 42
198
+
199
+
200
+ if __name__ == "__main__":
201
+ # 각 테스트 실행
202
+ test_random_sampler()
203
+ print("✅ RandomSampler test passed")
204
+
205
+ test_tpe_sampler()
206
+ print("✅ TPESampler test passed")
207
+
208
+ test_tpe_sampler_default()
209
+ print("✅ TPESampler (default) test passed")
210
+
211
+ test_nsgaii_sampler()
212
+ print("✅ NSGAIISampler test passed")
213
+
214
+ test_grid_sampler()
215
+ print("✅ GridSampler test passed")
216
+
217
+ test_bruteforce_sampler()
218
+ print("✅ BruteForceSampler test passed")
219
+
220
+ test_cmaes_sampler()
221
+ print("✅ CmaEsSampler test passed")
222
+
223
+ test_qmc_sampler()
224
+ print("✅ QMCSampler test passed")
225
+
226
+ print("\n✅ All Sampler tests passed!")
@@ -1,59 +0,0 @@
1
- import aiauto
2
- import optuna
3
- from unittest.mock import patch
4
-
5
-
6
- def objective(trial):
7
- """간단한 이차함수 최적화 예제"""
8
- # TrialController를 사용한 로깅
9
- tc = aiauto.TrialController(trial)
10
- tc.log("Starting simple optimization example")
11
-
12
- # 하이퍼파라미터 샘플링
13
- x = trial.suggest_float('x', -10, 10)
14
- y = trial.suggest_float('y', -10, 10)
15
-
16
- # 목적함수: (x-2)² + (y-5)² 최소화
17
- result = (x - 2) ** 2 + (y - 5) ** 2
18
-
19
- tc.log(f"x={x:.3f}, y={y:.3f}, result={result:.3f}")
20
-
21
- return result
22
-
23
-
24
- def main():
25
- print("🚀 AIAuto 소스코드 직렬화 로컬 테스트")
26
-
27
- # AIAutoController의 storage를 InMemoryStorage로 패치
28
- with patch.object(aiauto.AIAutoController, '__init__', lambda self: None):
29
- controller = aiauto.AIAutoController()
30
- # 로컬 테스트용 InMemoryStorage 설정
31
- controller.storage = optuna.storages.InMemoryStorage()
32
- controller.artifact_store = optuna.artifacts.FileSystemArtifactStore('./artifacts')
33
-
34
- # 소스코드 직렬화 테스트
35
- print("\n=== 소스코드 직렬화 테스트 ===")
36
- study_wrapper = controller.create_study(
37
- objective=objective,
38
- study_name='local_test',
39
- direction='minimize'
40
- )
41
-
42
- print("✅ StudyWrapper 생성 성공!")
43
-
44
- # 최적화 실행
45
- print("\n=== 최적화 실행 ===")
46
- study_wrapper.optimize(n_trials=10)
47
-
48
- # 결과 출력
49
- print(f"\n🎉 최적화 완료!")
50
- print(f"📊 Best value: {study_wrapper.best_value:.3f}")
51
- print(f"🔧 Best params: {study_wrapper.best_params}")
52
-
53
- # 이론적 최적해: x=2, y=5, result=0
54
- print(f"💡 이론적 최적해: x=2, y=5, result=0")
55
- print(f"📈 오차: {study_wrapper.best_value:.3f}")
56
-
57
-
58
- if __name__ == "__main__":
59
- main()
File without changes