pyfemtet 0.6.6__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

pyfemtet/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.6.6"
1
+ __version__ = "0.7.1"
@@ -1,5 +1,4 @@
1
1
  from time import time, sleep
2
- import logging
3
2
 
4
3
  from win32com.client import CDispatch
5
4
  from femtetutils import util
@@ -9,7 +8,9 @@ from pyfemtet.dispatch_extensions._impl import _get_pid
9
8
  from pyfemtet.core import _version
10
9
  from pyfemtet._message import Msg
11
10
 
12
- logger = logging.getLogger('fem')
11
+ from pyfemtet.logger import get_module_logger
12
+
13
+ logger = get_module_logger('util.femtet.exit', __name__)
13
14
 
14
15
 
15
16
  def _exit_or_force_terminate(timeout, Femtet: CDispatch, force=True):
@@ -0,0 +1,10 @@
1
+ from contextlib import nullcontext
2
+ from dask.distributed import Lock
3
+
4
+
5
+ def lock_or_no_lock(name: str, client=None):
6
+ lock = Lock(name, client)
7
+ if lock.client is None:
8
+ return nullcontext()
9
+ else:
10
+ return lock
@@ -1,26 +1,31 @@
1
1
  """Excel のエラーダイアログを補足します。"""
2
- import sys
3
2
  from time import sleep
4
3
  from threading import Thread
5
- import logging
6
4
  import asyncio # for timeout
7
5
  import win32gui
8
6
  import win32con
9
7
  import win32api
8
+ import win32process
9
+
10
+ from pyfemtet.logger import get_module_logger
11
+
12
+ logger = get_module_logger('util.excel', __name__)
10
13
 
11
- logger = logging.getLogger(__name__)
12
- if __name__ == '__main__':
13
- formatter = logging.Formatter(logging.BASIC_FORMAT)
14
- handler = logging.StreamHandler(sys.stdout)
15
- handler.setFormatter(formatter)
16
- logger.addHandler(handler)
17
- logger.setLevel(logging.DEBUG)
14
+
15
+ def _get_pid(hwnd):
16
+ """Window handle から process ID を取得します."""
17
+ if hwnd > 0:
18
+ _, pid = win32process.GetWindowThreadProcessId(hwnd)
19
+ else:
20
+ pid = 0
21
+ return pid
18
22
 
19
23
 
20
24
  class _ExcelDialogProcessor:
21
25
 
22
- def __init__(self, excel_, timeout):
26
+ def __init__(self, excel_, timeout, restore_book=True):
23
27
  self.excel = excel_
28
+ self.excel_pid = _get_pid(excel_.hWnd)
24
29
  self.__excel_window_title = f' - Excel' # {basename} - Excel
25
30
  self.__error_dialog_title = 'Microsoft Visual Basic'
26
31
  self.__vbe_window_title = f'Microsoft Visual Basic for Applications - ' # Microsoft Visual Basic for Applications - {basename}
@@ -31,6 +36,7 @@ class _ExcelDialogProcessor:
31
36
  self.__error_raised = False
32
37
  self.__excel_state_stash = dict()
33
38
  self.__watch_thread = None
39
+ self.restore_book = restore_book
34
40
 
35
41
  async def watch(self):
36
42
 
@@ -47,6 +53,7 @@ class _ExcelDialogProcessor:
47
53
  win32gui.EnumWindows(self.enum_callback_to_close_dialog, found)
48
54
  await asyncio.sleep(0.5)
49
55
  if any(found):
56
+ await asyncio.sleep(1.)
50
57
  break
51
58
 
52
59
  logger.debug('ブックを閉じます。')
@@ -61,7 +68,7 @@ class _ExcelDialogProcessor:
61
68
  def enum_callback_to_activate(self, hwnd, _):
62
69
  title = win32gui.GetWindowText(hwnd)
63
70
  # Excel 本体
64
- if self.__excel_window_title in title:
71
+ if (self.excel_pid == _get_pid(hwnd)) and (self.__excel_window_title in title):
65
72
  # Visible == True の際、エラーが発生した際、
66
73
  # 一度 Excel ウィンドウをアクティブ化しないと dialog が出てこない
67
74
  # が、これだけではダメかも。
@@ -70,7 +77,7 @@ class _ExcelDialogProcessor:
70
77
  def enum_callback_to_close_dialog(self, hwnd, found):
71
78
  title = win32gui.GetWindowText(hwnd)
72
79
  # エラーダイアログ
73
- if self.__error_dialog_title == title:
80
+ if (self.excel_pid == _get_pid(hwnd)) and (self.__error_dialog_title == title):
74
81
  # 何故かこのコマンド以外受け付けず、
75
82
  # このコマンドで問答無用でデバッグモードに入る
76
83
  logger.debug('エラーダイアログを見つけました。')
@@ -82,14 +89,14 @@ class _ExcelDialogProcessor:
82
89
  def enum_callback_to_close_confirm_dialog(self, hwnd, _):
83
90
  title = win32gui.GetWindowText(hwnd)
84
91
  # 確認ダイアログ
85
- if "Microsoft Excel" in title:
92
+ if (self.excel_pid == _get_pid(hwnd)) and ("Microsoft Excel" in title):
86
93
  # DisplayAlerts が False の場合は不要
87
94
  win32gui.SendMessage(hwnd, win32con.WM_SYSCOMMAND, win32con.SC_CLOSE, 0)
88
95
 
89
96
  def enum_callback_to_close_book(self, hwnd, _):
90
97
  title = win32gui.GetWindowText(hwnd)
91
98
  # VBE
92
- if self.__vbe_window_title in title:
99
+ if (self.excel_pid == _get_pid(hwnd)) and (self.__vbe_window_title in title):
93
100
  # 何故かこれで book 本体が閉じる
94
101
  win32gui.SendMessage(hwnd, win32con.WM_CLOSE, 0, 0)
95
102
 
@@ -133,15 +140,17 @@ class _ExcelDialogProcessor:
133
140
  # if exc_type is not None:
134
141
  # if issubclass(exc_type, com_error) and self.__error_raised:
135
142
  if self.__error_raised:
143
+ if self.restore_book:
136
144
  logger.debug('エラーハンドリングの副作用でブックを閉じているので'
137
145
  'Excel のブックを開きなおします。')
138
146
  for wb_path in self.__workbook_paths:
139
147
  self.excel.Workbooks.Open(wb_path)
140
148
 
141
149
 
142
- def watch_excel_macro_error(excel_, timeout):
150
+ def watch_excel_macro_error(excel_, timeout, restore_book=True):
143
151
  """Excel のエラーダイアログの出現を監視し、検出されればブックを閉じます。"""
144
- return _ExcelDialogProcessor(excel_, timeout)
152
+ return _ExcelDialogProcessor(excel_, timeout, restore_book)
153
+
145
154
 
146
155
 
147
156
  if __name__ == '__main__':
@@ -9,4 +9,3 @@ from pyfemtet.dispatch_extensions._impl import (
9
9
  _get_pid,
10
10
  _get_pids,
11
11
  )
12
-
@@ -18,14 +18,11 @@ from multiprocessing.context import BaseContext, SpawnProcess, _concrete_context
18
18
  from multiprocessing.process import _children, _cleanup
19
19
  from multiprocessing.managers import SyncManager
20
20
 
21
- import logging
22
- from pyfemtet.logger import get_logger
23
-
24
21
  from pyfemtet._message import Msg
25
22
 
23
+ from pyfemtet.logger import get_module_logger
26
24
 
27
- logger = get_logger('dispatch')
28
- logger.setLevel(logging.INFO)
25
+ logger = get_module_logger('dispatch', __name__)
29
26
 
30
27
 
31
28
  DISPATCH_TIMEOUT = 120
@@ -1,3 +1,22 @@
1
- from pyfemtet.logger._impl import get_logger
1
+ import logging
2
+ from pyfemtet.logger._impl import (
3
+ get_module_logger,
4
+ add_file_output,
5
+ set_stdout_output,
6
+ remove_file_output,
7
+ remove_stdout_output,
8
+ remove_all_output,
9
+ )
2
10
 
3
- __all__ = ['get_logger']
11
+
12
+ def get_dask_logger():
13
+ return logging.getLogger('distributed')
14
+
15
+
16
+ def get_optuna_logger():
17
+ import optuna
18
+ return optuna.logging.get_logger('optuna')
19
+
20
+
21
+ def get_dash_logger():
22
+ return logging.getLogger('werkzeug')
pyfemtet/logger/_impl.py CHANGED
@@ -1,107 +1,231 @@
1
+ import logging
1
2
  import os
2
3
  import sys
3
- import logging
4
+ import datetime
5
+ import locale
6
+ from threading import Lock
7
+ from pathlib import Path
8
+ import platform
9
+
4
10
  from colorlog import ColoredFormatter
5
11
  from dask.distributed import get_worker
6
12
 
13
+ LOCALE, LOCALE_ENCODING = locale.getlocale()
14
+ if platform.system() == 'Windows':
15
+ DATEFMT = '%#m/%#d %#H:%M'
16
+ else:
17
+ DATEFMT = '%-m/%-d %-H:%M'
18
+
19
+ __lock = Lock() # thread 並列されたタスクがアクセスする場合に備えて
7
20
 
8
- def _get_worker_name_as_prefix():
9
- name = '(Main) '
21
+ __initialized_root_packages: list[str] = list()
22
+
23
+
24
+ # ===== set dask worker prefix to ``ROOT`` logger =====
25
+
26
+ def _get_dask_worker_name():
27
+ name = '(Main)'
10
28
  try:
11
29
  worker = get_worker()
12
30
  if isinstance(worker.name, str): # local なら index, cluster なら tcp address
13
- name = f'({worker.name}) '
31
+ name = f'({worker.name})'
14
32
  else:
15
- name = f'(Sub{worker.name}) '
33
+ name = f'(Sub{worker.name})'
16
34
  except ValueError:
17
35
  pass
18
36
  return name
19
37
 
20
38
 
21
39
  class _DaskLogRecord(logging.LogRecord):
22
- """Generate a log message with dask worker name."""
23
-
24
- # def __init__(self, *args, **kwargs):
25
- # super().__init__(*args, **kwargs)
26
- # self.worker = _get_worker_name_as_prefix()
27
-
28
40
  def getMessage(self):
29
- """Add worker name to loggin message.
30
-
31
- This function is originated from logging.LogRecord.
32
-
33
- # Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved.
34
-
35
- """
36
41
  msg = str(self.msg)
37
42
  if self.args:
38
43
  msg = msg % self.args
39
- msg = _get_worker_name_as_prefix() + msg
44
+ msg = _get_dask_worker_name() + ' ' + msg
40
45
  return msg
41
46
 
42
47
 
43
- logging.setLogRecordFactory(_DaskLogRecord) # すべての logging %(message)s の前に prefix を入れる
48
+ logging.setLogRecordFactory(_DaskLogRecord)
49
+
50
+
51
+ # ===== format config =====
52
+
53
+ def __create_formatter(colored=True):
54
+
55
+ if colored:
56
+ # colorized
57
+ header = "%(log_color)s" + "[%(name)s %(levelname).4s]" + " %(asctime)s" + "%(reset)s"
58
+
59
+ formatter = ColoredFormatter(
60
+ f"{header} %(message)s",
61
+ datefmt=DATEFMT,
62
+ reset=True,
63
+ log_colors={
64
+ "DEBUG": "purple",
65
+ "INFO": "cyan",
66
+ "WARNING": "yellow",
67
+ "ERROR": "light_red",
68
+ "CRITICAL": "red",
69
+ },
70
+ )
71
+
72
+ else:
73
+ header = "[%(name)s %(levelname).4s]"
74
+ formatter = logging.Formatter(
75
+ f"{header} %(message)s",
76
+ datefmt=DATEFMT,
77
+ )
78
+
79
+ return formatter
80
+
81
+
82
+ # ===== handler config =====
83
+
84
+ STDOUT_HANDLER_NAME = 'stdout-handler'
85
+
86
+
87
+ def __get_stdout_handler():
88
+ stdout_handler = logging.StreamHandler(sys.stdout)
89
+ stdout_handler.set_name(STDOUT_HANDLER_NAME)
90
+ stdout_handler.setFormatter(__create_formatter(colored=True))
91
+ return stdout_handler
92
+
93
+
94
+ def __has_stdout_handler(logger):
95
+ return any([handler.get_name() != STDOUT_HANDLER_NAME for handler in logger.handlers])
96
+
97
+
98
+ def set_stdout_output(logger, level=logging.INFO):
44
99
 
100
+ if not __has_stdout_handler(logger):
101
+ logger.addHandler(__get_stdout_handler())
45
102
 
46
- def _color_supported() -> bool:
47
- """Detection of color support.
103
+ logger.setLevel(level)
48
104
 
49
- This function is originated from optuna.logging.
50
105
 
51
- # Copyright (c) 2018 Preferred Networks, Inc.
106
+ def remove_stdout_output(logger):
107
+ if __has_stdout_handler(logger):
108
+ logger.removeHandler(__get_stdout_handler())
109
+
110
+
111
+ def add_file_output(logger, filepath=None, level=logging.INFO) -> str:
112
+ """Add FileHandler to the logger.
113
+
114
+ Returns:
115
+ str: THe name of the added handler.
116
+ Its format is 'filehandler-{os.path.basename(filepath)}'
52
117
 
53
118
  """
54
119
 
55
- # NO_COLOR environment variable:
56
- if os.environ.get("NO_COLOR", None):
57
- return False
120
+ # certify filepath
121
+ if filepath is None:
122
+ filepath = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + f'_{logger.name}.log'
123
+
124
+ # add file handler
125
+ file_handler = logging.FileHandler(filename=filepath, encoding=LOCALE_ENCODING)
126
+ file_handler.set_name(f'filehandler-{os.path.basename(filepath)}')
127
+ file_handler.setFormatter(__create_formatter(colored=False))
128
+ logger.addHandler(file_handler)
129
+
130
+ # set (default) log level
131
+ logger.setLevel(level)
132
+
133
+ return file_handler.get_name()
134
+
135
+
136
+ def remove_file_output(logger, filepath=None):
137
+ """Removes FileHandler from the logger.
138
+
139
+ If filepath is None, remove all FileHandler.
140
+ """
141
+
142
+ if filepath is None:
143
+ for handler in logger.handlers:
144
+ if 'filehandler-' in handler.name:
145
+ logger.removeHandler(handler)
58
146
 
59
- if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty():
60
- return False
61
147
  else:
62
- return True
63
-
64
-
65
- def _create_formatter() -> logging.Formatter:
66
- """Create a formatter."""
67
- # header = f"[pyfemtet %(name)s] %(levelname).4s %(worker)s]"
68
- header = f"[pyfemtet %(name)s %(levelname).4s]"
69
- message = "%(message)s"
70
-
71
- formatter = ColoredFormatter(
72
- f"%(log_color)s{header}%(reset)s {message}",
73
- datefmt=None,
74
- reset=True,
75
- log_colors={
76
- "DEBUG": "purple",
77
- "INFO": "cyan",
78
- "WARNING": "yellow",
79
- "ERROR": "light_red",
80
- "CRITICAL": "red",
81
- },
82
- )
148
+ handler_name = f'filehandler-{os.path.basename(filepath)}'
149
+ for handler in logger.handlers:
150
+ if handler_name == handler.name:
151
+ logger.removeHandler(handler)
152
+
153
+
154
+ def remove_all_output(logger):
155
+ for handler in logger.handlers:
156
+ logger.removeHandler(handler)
157
+
158
+ logger.addHandler(logging.NullHandler())
83
159
 
84
- return formatter
85
160
 
161
+ # ===== root-package logger =====
86
162
 
87
- def get_logger(logger_name):
88
- """Returns a logger.
163
+ def setup_package_root_logger(package_name):
164
+ global __initialized_root_packages
165
+ if package_name not in __initialized_root_packages:
166
+ with __lock:
167
+ logger = logging.getLogger(package_name)
168
+ logger.propagate = True
169
+ set_stdout_output(logger)
170
+ logger.setLevel(logging.INFO)
171
+ __initialized_root_packages.append(package_name)
172
+ else:
173
+ logger = logging.getLogger(package_name)
174
+ return logger
175
+
176
+
177
+ # ===== module logger =====
89
178
 
90
- Examples:
91
- >>> # Retrieves a specific logger used in pyfemtet.opt.
92
- >>> import logging # doctest: +SKIP
93
- >>> from pyfemtet.logger import get_logger # doctest: +SKIP
94
- >>> logger = get_logger('opt') # logger of optimizer # doctest: +SKIP
95
- >>> logger.setLevel(logging.ERROR) # disable all log from optimizer # doctest: +SKIP
179
+ def get_module_logger(name: str, __module_name__: str, ) -> logging.Logger:
180
+ """Return the module-level logger.
181
+
182
+ The format is defined in the package_root_logger.
183
+
184
+ Args:
185
+ name (str): The logger name to want.
186
+ __module_name__ (str): __name__ of the module.
187
+
188
+ Returns:
189
+ logging.Logger:
190
+ The logger its name is ``root_package.subpackage.module``.
191
+ child level logger's signal propagates to the parent logger
192
+ and is shown in the parent(s)'s handler(s).
96
193
 
97
194
  """
98
195
 
99
- formatter = _create_formatter()
196
+ # check root logger initialized
197
+ name_arr = name.split('.')
198
+ if name_arr[0] not in __initialized_root_packages:
199
+ setup_package_root_logger(name_arr[0])
200
+
201
+ # get logger
202
+ logger = logging.getLogger(name)
100
203
 
101
- logger = logging.getLogger(logger_name)
102
- handler = logging.StreamHandler()
103
- handler.setFormatter(formatter)
104
- logger.addHandler(handler)
105
- logger.setLevel(logging.DEBUG)
204
+ # If not root logger, ensure propagate is True.
205
+ if len(name_arr) > 1:
206
+ logger.propagate = True
207
+
208
+ # If debug mode, set specific level.
209
+ if __module_name__ == '__main__':
210
+ logger.setLevel(logging.DEBUG)
106
211
 
107
212
  return logger
213
+
214
+
215
+ if __name__ == '__main__':
216
+
217
+ root_logger = setup_package_root_logger('logger')
218
+ optimizer_logger = get_module_logger('logger.optimizer', __name__); optimizer_logger.setLevel(logging.INFO)
219
+ interface_logger = get_module_logger('logger.interface', __name__)
220
+
221
+ root_logger.info("This is root logger's info.")
222
+ optimizer_logger.info("This is optimizer logger's info.")
223
+
224
+ add_file_output(interface_logger, 'test-module-log.log', level=logging.DEBUG)
225
+ interface_logger.debug('debugging...')
226
+ remove_file_output(interface_logger, 'test-module-log.log')
227
+
228
+ interface_logger.debug('debug is finished.')
229
+ root_logger.debug("This message will not be shown "
230
+ "even if the module_logger's level "
231
+ "is logging.DEBUG.")
pyfemtet/opt/_femopt.py CHANGED
@@ -12,7 +12,7 @@ from traceback import print_exception
12
12
  # 3rd-party
13
13
  import numpy as np
14
14
  import pandas as pd
15
- from dask.distributed import LocalCluster, Client
15
+ from dask.distributed import LocalCluster, Client, get_worker, Nanny
16
16
 
17
17
  # pyfemtet relative
18
18
  from pyfemtet.opt.interface import FEMInterface, FemtetInterface
@@ -136,6 +136,7 @@ class FEMOpt:
136
136
  self.monitor_server_kwargs = dict()
137
137
  self.monitor_process_worker_name = None
138
138
  self._hv_reference = None
139
+ self._extra_space_dir = None
139
140
 
140
141
  # multiprocess 時に pickle できないオブジェクト参照の削除
141
142
  def __getstate__(self):
@@ -675,10 +676,6 @@ class FEMOpt:
675
676
  directions,
676
677
  )
677
678
 
678
- # Femtet の confirm_before_exit のセット
679
- self.fem.confirm_before_exit = confirm_before_exit
680
- self.fem.kwargs['confirm_before_exit'] = confirm_before_exit
681
-
682
679
  logger.info('Femtet loaded successfully.')
683
680
 
684
681
  # クラスターの設定
@@ -718,30 +715,43 @@ class FEMOpt:
718
715
  # これは CLI の --no-nanny オプションも同様らしい。
719
716
 
720
717
  # クラスターの構築
718
+ # noinspection PyTypeChecker
721
719
  cluster = LocalCluster(
722
720
  processes=True,
723
721
  n_workers=n_parallel,
724
722
  threads_per_worker=1,
723
+ worker_class=Nanny,
725
724
  )
726
725
  logger.info('LocalCluster launched successfully.')
727
726
 
728
- self.client = Client(cluster, direct_to_workers=False)
729
- self.scheduler_address = self.client.scheduler.address
727
+ self.client = Client(
728
+ cluster,
729
+ direct_to_workers=False,
730
+ )
730
731
  logger.info('Client launched successfully.')
731
732
 
732
- # 最適化タスクを振り分ける worker を指定
733
- subprocess_indices = list(range(n_parallel))[1:]
734
- worker_addresses = list(self.client.nthreads().keys())
733
+ self.scheduler_address = self.client.scheduler.address
735
734
 
736
- # monitor worker の設定
737
- self.monitor_process_worker_name = worker_addresses[0]
738
- worker_addresses[0] = 'Main'
735
+ # worker address を取得
736
+ nannies_dict: dict[Any, Nanny] = self.client.cluster.workers
737
+ nannies = tuple(nannies_dict.values())
738
+
739
+ # ひとつの Nanny を選んで monitor 用にしつつ
740
+ # その space は main process に使わせるために記憶する
741
+ self.monitor_process_worker_name = nannies[0].worker_address
742
+ self._extra_space_dir = nannies[0].worker_dir
743
+
744
+ # 名前と address がごちゃごちゃになっていて可読性が悪いが
745
+ # 選んだ以外の Nanny は計算を割り当てる用にする
746
+ worker_addresses = ['Main']
747
+ worker_addresses.extend([n.worker_address for n in nannies[1:]])
748
+ subprocess_indices = list(range(n_parallel))[1:]
739
749
 
740
750
  with self.client.cluster as _cluster, self.client as _client:
741
751
 
742
752
  # actor の設定
743
- self.status = OptimizationStatus(_client)
744
- self.worker_status_list = [OptimizationStatus(_client, name) for name in worker_addresses] # tqdm 検討
753
+ self.status = OptimizationStatus(_client, worker_address=self.monitor_process_worker_name)
754
+ self.worker_status_list = [OptimizationStatus(_client, worker_address=self.monitor_process_worker_name, name=name) for name in worker_addresses] # tqdm 検討
745
755
  self.status.set(OptimizationStatus.SETTING_UP)
746
756
  self.history = History(
747
757
  self.history_path,
@@ -773,7 +783,6 @@ class FEMOpt:
773
783
  logger.info('Process monitor initialized successfully.')
774
784
 
775
785
  # fem
776
- # TODO: n_parallel=1 のときもアップロードしている。これを使うべきか、アップロードしないべき。
777
786
  self.fem._setup_before_parallel(_client)
778
787
 
779
788
  # opt
@@ -794,7 +803,7 @@ class FEMOpt:
794
803
  subprocess_indices,
795
804
  [self.worker_status_list] * len(subprocess_indices),
796
805
  [wait_setup] * len(subprocess_indices),
797
- workers=worker_addresses,
806
+ workers=worker_addresses if self.opt.is_cluster else worker_addresses[1:],
798
807
  allow_other_workers=False,
799
808
  )
800
809
 
@@ -818,6 +827,7 @@ class FEMOpt:
818
827
  ),
819
828
  kwargs=dict(
820
829
  skip_reconstruct=True,
830
+ space_dir=self._extra_space_dir,
821
831
  )
822
832
  )
823
833
  t_main.start()