pyfemtet 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

Files changed (46) hide show
  1. pyfemtet/__init__.py +1 -1
  2. pyfemtet/_message/locales/ja/LC_MESSAGES/messages.mo +0 -0
  3. pyfemtet/_message/locales/ja/LC_MESSAGES/messages.po +112 -90
  4. pyfemtet/_message/locales/messages.pot +105 -89
  5. pyfemtet/_message/messages.py +6 -2
  6. pyfemtet/_util/dask_util.py +10 -0
  7. pyfemtet/_util/excel_macro_util.py +16 -4
  8. pyfemtet/_util/excel_parse_util.py +138 -0
  9. pyfemtet/_util/sample.xlsx +0 -0
  10. pyfemtet/brep/__init__.py +0 -3
  11. pyfemtet/brep/_impl.py +7 -3
  12. pyfemtet/opt/_femopt.py +69 -31
  13. pyfemtet/opt/_femopt_core.py +100 -36
  14. pyfemtet/opt/advanced_samples/excel_ui/(ref) original_project.femprj +0 -0
  15. pyfemtet/opt/advanced_samples/excel_ui/femtet-macro.xlsm +0 -0
  16. pyfemtet/opt/advanced_samples/excel_ui/pyfemtet-core.py +291 -0
  17. pyfemtet/opt/advanced_samples/excel_ui/test-pyfemtet-core.cmd +22 -0
  18. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data.py +60 -0
  19. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data_jp.py +57 -0
  20. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate.py +100 -0
  21. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate_jp.py +90 -0
  22. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_parametric.femprj +0 -0
  23. pyfemtet/opt/interface/__init__.py +2 -0
  24. pyfemtet/opt/interface/_base.py +3 -0
  25. pyfemtet/opt/interface/_excel_interface.py +565 -204
  26. pyfemtet/opt/interface/_femtet.py +26 -29
  27. pyfemtet/opt/interface/_surrogate/__init__.py +5 -0
  28. pyfemtet/opt/interface/_surrogate/_base.py +85 -0
  29. pyfemtet/opt/interface/_surrogate/_chaospy.py +71 -0
  30. pyfemtet/opt/interface/_surrogate/_singletaskgp.py +70 -0
  31. pyfemtet/opt/optimizer/_base.py +30 -19
  32. pyfemtet/opt/optimizer/_optuna/_optuna.py +20 -8
  33. pyfemtet/opt/optimizer/_optuna/_pof_botorch.py +60 -18
  34. pyfemtet/opt/prediction/_base.py +8 -0
  35. pyfemtet/opt/prediction/single_task_gp.py +85 -62
  36. pyfemtet/opt/visualization/_complex_components/main_figure_creator.py +5 -5
  37. pyfemtet/opt/visualization/_complex_components/main_graph.py +7 -1
  38. pyfemtet/opt/visualization/_complex_components/pm_graph.py +1 -1
  39. pyfemtet/opt/visualization/_process_monitor/application.py +2 -2
  40. pyfemtet/opt/visualization/_process_monitor/pages.py +1 -1
  41. pyfemtet/opt/visualization/result_viewer/pages.py +1 -1
  42. {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/METADATA +3 -2
  43. {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/RECORD +46 -29
  44. {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/WHEEL +1 -1
  45. {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/LICENSE +0 -0
  46. {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/entry_points.txt +0 -0
@@ -169,6 +169,21 @@ def symlog(x):
169
169
  )
170
170
 
171
171
 
172
+ def get_minimum_YVar_and_standardizer(Y: torch.Tensor):
173
+ standardizer = Standardize(m=Y.shape[-1])
174
+ if _get_use_fixed_noise():
175
+ import gpytorch
176
+ min_noise = gpytorch.settings.min_fixed_noise.value(Y.dtype)
177
+
178
+ standardizer.forward(Y) # require to un-transform
179
+ _, YVar = standardizer.untransform(Y, min_noise * torch.ones_like(Y))
180
+
181
+ else:
182
+ YVar = None
183
+
184
+ return YVar, standardizer
185
+
186
+
172
187
  # ベースとなる獲得関数クラスに pof 係数を追加したクラスを作成する関数
173
188
  def acqf_patch_factory(acqf_class, pof_config=None):
174
189
  """ベース acqf クラスに pof 係数の計算を追加したクラスを作成します。
@@ -394,11 +409,13 @@ def logei_candidates_func(
394
409
 
395
410
  train_x = normalize(train_x, bounds=bounds)
396
411
 
412
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
413
+
397
414
  model = SingleTaskGP(
398
415
  train_x,
399
416
  train_y,
400
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
401
- outcome_transform=Standardize(m=train_y.size(-1))
417
+ train_Yvar=train_yvar,
418
+ outcome_transform=standardizer,
402
419
  )
403
420
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
404
421
  fit_gpytorch_mll(mll)
@@ -540,11 +557,13 @@ def qei_candidates_func(
540
557
  if pending_x is not None:
541
558
  pending_x = normalize(pending_x, bounds=bounds)
542
559
 
560
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
561
+
543
562
  model = SingleTaskGP(
544
563
  train_x,
545
564
  train_y,
546
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
547
- outcome_transform=Standardize(m=train_y.size(-1))
565
+ train_Yvar=train_yvar,
566
+ outcome_transform=standardizer,
548
567
  )
549
568
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
550
569
  fit_gpytorch_mll(mll)
@@ -641,11 +660,13 @@ def qnei_candidates_func(
641
660
  if pending_x is not None:
642
661
  pending_x = normalize(pending_x, bounds=bounds)
643
662
 
663
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
664
+
644
665
  model = SingleTaskGP(
645
666
  train_x,
646
667
  train_y,
647
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
648
- outcome_transform=Standardize(m=train_y.size(-1))
668
+ train_Yvar=train_yvar,
669
+ outcome_transform=standardizer,
649
670
  )
650
671
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
651
672
  fit_gpytorch_mll(mll)
@@ -747,11 +768,13 @@ def qehvi_candidates_func(
747
768
  if pending_x is not None:
748
769
  pending_x = normalize(pending_x, bounds=bounds)
749
770
 
771
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
772
+
750
773
  model = SingleTaskGP(
751
774
  train_x,
752
775
  train_y,
753
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
754
- outcome_transform=Standardize(m=train_y.size(-1))
776
+ train_Yvar=train_yvar,
777
+ outcome_transform=standardizer,
755
778
  )
756
779
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
757
780
  fit_gpytorch_mll(mll)
@@ -850,11 +873,13 @@ def ehvi_candidates_func(
850
873
  train_y = train_obj
851
874
  train_x = normalize(train_x, bounds=bounds)
852
875
 
876
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
877
+
853
878
  model = SingleTaskGP(
854
879
  train_x,
855
880
  train_y,
856
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
857
- outcome_transform=Standardize(m=train_y.size(-1))
881
+ train_Yvar=train_yvar,
882
+ outcome_transform=standardizer,
858
883
  )
859
884
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
860
885
  fit_gpytorch_mll(mll)
@@ -962,11 +987,13 @@ def qnehvi_candidates_func(
962
987
  if pending_x is not None:
963
988
  pending_x = normalize(pending_x, bounds=bounds)
964
989
 
990
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
991
+
965
992
  model = SingleTaskGP(
966
993
  train_x,
967
994
  train_y,
968
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
969
- outcome_transform=Standardize(m=train_y.size(-1))
995
+ train_Yvar=train_yvar,
996
+ outcome_transform=standardizer,
970
997
  )
971
998
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
972
999
  fit_gpytorch_mll(mll)
@@ -1083,11 +1110,13 @@ def qparego_candidates_func(
1083
1110
  if pending_x is not None:
1084
1111
  pending_x = normalize(pending_x, bounds=bounds)
1085
1112
 
1113
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
1114
+
1086
1115
  model = SingleTaskGP(
1087
1116
  train_x,
1088
1117
  train_y,
1089
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
1090
- outcome_transform=Standardize(m=train_y.size(-1))
1118
+ train_Yvar=train_yvar,
1119
+ outcome_transform=standardizer,
1091
1120
  )
1092
1121
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
1093
1122
  fit_gpytorch_mll(mll)
@@ -1184,11 +1213,13 @@ def qkg_candidates_func(
1184
1213
  if pending_x is not None:
1185
1214
  pending_x = normalize(pending_x, bounds=bounds)
1186
1215
 
1216
+ train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
1217
+
1187
1218
  model = SingleTaskGP(
1188
1219
  train_x,
1189
1220
  train_y,
1190
- train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
1191
- outcome_transform=Standardize(m=train_y.size(-1))
1221
+ train_Yvar=train_yvar,
1222
+ outcome_transform=standardizer,
1192
1223
  )
1193
1224
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
1194
1225
  fit_gpytorch_mll(mll)
@@ -1286,7 +1317,7 @@ def qhvkg_candidates_func(
1286
1317
  SingleTaskGP(
1287
1318
  train_x,
1288
1319
  train_y[..., [i]],
1289
- train_Yvar=1e-4*torch.ones_like(train_y[..., [i]]) if _get_use_fixed_noise() else None,
1320
+ train_Yvar=get_minimum_YVar_and_standardizer(train_y[..., [i]])[0],
1290
1321
  outcome_transform=Standardize(m=1)
1291
1322
  )
1292
1323
  for i in range(train_y.shape[-1])
@@ -1763,7 +1794,18 @@ class PoFBoTorchSampler(BaseSampler):
1763
1794
  for trial_idx, trial in enumerate(trials):
1764
1795
  params[trial_idx] = trans.transform(trial.params)
1765
1796
  if trial.state == TrialState.COMPLETE:
1766
- values[trial_idx, 0] = 1. # feasible
1797
+ # complete, but infeasible (in case of weak constraint)
1798
+ if 'constraints' in trial.user_attrs.keys():
1799
+ cns = trial.user_attrs['constraints']
1800
+ if cns is None:
1801
+ values[trial_idx, 0] = 1. # feasible (or should RuntimeError)
1802
+ else:
1803
+ if numpy.array(cns).max() > 0:
1804
+ values[trial_idx, 0] = 1. # feasible
1805
+ else:
1806
+ values[trial_idx, 0] = 1. # feasible
1807
+ else:
1808
+ values[trial_idx, 0] = 1. # feasible
1767
1809
  elif trial.state == TrialState.PRUNED:
1768
1810
  values[trial_idx, 0] = 0. # infeasible
1769
1811
  else:
@@ -32,6 +32,14 @@ class PyFemtetPredictionModel:
32
32
  def __init__(self, history: History, df: pd.DataFrame, MetaModel: type):
33
33
  assert issubclass(MetaModel, PredictionModelBase)
34
34
  self.meta_model: PredictionModelBase = MetaModel()
35
+
36
+ from pyfemtet.opt.prediction.single_task_gp import SingleTaskGPModel
37
+ if isinstance(self.meta_model, SingleTaskGPModel):
38
+ self.meta_model.set_bounds_from_history(
39
+ history,
40
+ df,
41
+ )
42
+
35
43
  self.obj_names = history.obj_names
36
44
  self.prm_names = history.prm_names
37
45
  self.df = df
@@ -3,46 +3,20 @@ import torch
3
3
  import gpytorch
4
4
 
5
5
  from botorch.models import SingleTaskGP
6
+ from botorch.models.transforms.input import Normalize
7
+ from botorch.models.transforms.outcome import Standardize
6
8
  from botorch.fit import fit_gpytorch_mll
7
9
  from gpytorch.mlls import ExactMarginalLogLikelihood
8
10
 
9
11
  from pyfemtet.opt.prediction._base import PredictionModelBase
10
12
 
11
13
 
12
- class _StandardScaler:
14
+ DEVICE = 'cpu'
15
+ DTYPE = torch.float64
13
16
 
14
- # noinspection PyAttributeOutsideInit
15
- def fit_transform(self, x: torch.Tensor) -> torch.Tensor:
16
- self.m = x.numpy().mean(axis=0)
17
- self.s = x.numpy().std(axis=0, ddof=1)
18
- return self.transform(x)
19
-
20
- def transform(self, x: torch.Tensor) -> torch.Tensor:
21
- return torch.tensor((x.numpy() - self.m) / self.s).double()
22
-
23
- def inverse_transform_mean(self, x: torch.Tensor) -> torch.Tensor:
24
- return torch.tensor(x.numpy() * self.s + self.m).double()
25
-
26
- def inverse_transform_var(self, x: torch.Tensor) -> torch.Tensor:
27
- return torch.tensor(x.numpy() * self.s**2).double()
28
-
29
-
30
- class _MinMaxScaler:
31
17
 
32
- # noinspection PyAttributeOutsideInit
33
- def fit_transform(self, x: torch.Tensor) -> torch.Tensor:
34
- self.max = x.numpy().max(axis=0)
35
- self.min = x.numpy().min(axis=0)
36
- return self.transform(x)
37
-
38
- def transform(self, x: torch.Tensor) -> torch.Tensor:
39
- return torch.tensor((x.numpy() - self.min) / (self.max - self.min)).double()
40
-
41
- def inverse_transform_mean(self, x: torch.Tensor) -> torch.Tensor:
42
- return torch.tensor(x.numpy() * (self.max - self.min) + self.min).double()
43
-
44
- def inverse_transform_var(self, x: torch.Tensor) -> torch.Tensor:
45
- return torch.tensor(x.numpy() * (self.max - self.min)**2).double()
18
+ def tensor(x_):
19
+ return torch.tensor(x_, dtype=DTYPE, device=DEVICE)
46
20
 
47
21
 
48
22
  class SingleTaskGPModel(PredictionModelBase):
@@ -52,43 +26,92 @@ class SingleTaskGPModel(PredictionModelBase):
52
26
  https://botorch.org/api/models.html#botorch.models.gp_regression.SingleTaskGP
53
27
  """
54
28
 
29
+ def __init__(self, bounds=None, is_noise_free=True):
30
+ if bounds is not None:
31
+ if isinstance(bounds, np.ndarray):
32
+ self.bounds = tensor(bounds).T
33
+ elif isinstance(bounds, list) or isinstance(bounds, tuple):
34
+ self.bounds = tensor(np.array(bounds)).T
35
+ else:
36
+ raise NotImplementedError('Bounds must be a np.ndarray or list or tuple.')
37
+ else:
38
+ self.bounds = None
39
+ self.is_noise_free = is_noise_free
40
+ self._standardizer: Standardize = None
41
+
42
+ def set_bounds_from_history(self, history, df=None):
43
+ from pyfemtet.opt._femopt_core import History
44
+ history: History
45
+ metadata: str
46
+
47
+ if df is None:
48
+ df = history.get_df()
49
+
50
+ columns = df.columns
51
+ metadata_columns = history.metadata
52
+ target_columns = [
53
+ col for col, metadata in zip(columns, metadata_columns)
54
+ if metadata == 'prm_lb' or metadata == 'prm_ub'
55
+ ]
56
+
57
+ bounds_buff = df.iloc[0][target_columns].values # 2*len(prm_names) array
58
+ bounds = bounds_buff.reshape(-1, 2).astype(float)
59
+ self.bounds = tensor(bounds).T
60
+
55
61
  # noinspection PyAttributeOutsideInit
56
62
  def fit(self, x: np.ndarray, y: np.ndarray):
57
- train_x = torch.tensor(x).double()
58
- train_y = torch.tensor(y).double()
63
+ X = tensor(x)
64
+ Y = tensor(y)
59
65
 
60
- # check y shape (if single objective problem, output dimension is (n,) )
61
- self._is_single_objective = len(y[0]) == 1
62
-
63
- # Normalize the input data to the unit cube
64
- self.scaler_x = _MinMaxScaler()
65
- train_x = self.scaler_x.fit_transform(train_x)
66
-
67
- # Standardize the output data
68
- self.scaler_y = _StandardScaler()
69
- train_y = self.scaler_y.fit_transform(train_y)
66
+ # Standardize SingleTaskGP に任せると
67
+ # 小さい Variance を勝手に 1e-10 に丸めるので
68
+ # 外で Standardize してから渡す
69
+ standardizer = Standardize(m=Y.shape[-1],)
70
+ std_Y, _ = standardizer.forward(Y)
71
+ YVar = torch.full_like(Y, 1e-6)
72
+ self._standardizer = standardizer
70
73
 
71
74
  # Fit a Gaussian Process model using the extracted data
72
- self.gp = SingleTaskGP(train_x, train_y)
75
+ self.gp = SingleTaskGP(
76
+ train_X=X,
77
+ train_Y=std_Y,
78
+ train_Yvar=YVar if self.is_noise_free else None,
79
+ input_transform=Normalize(d=X.shape[-1], bounds=self.bounds),
80
+ # BoTorch 0.13 前後で None を渡すと
81
+ # Standardize しない挙動は変わらないので None を渡せばよい
82
+ outcome_transform=None,
83
+ )
73
84
  mll = ExactMarginalLogLikelihood(self.gp.likelihood, self.gp)
74
85
  fit_gpytorch_mll(mll)
75
86
 
76
- def predict(self, x: np.ndarray) -> list[np.ndarray, np.ndarray]:
77
- x = torch.tensor(x).double()
78
- self.gp.eval()
79
- with torch.no_grad(), gpytorch.settings.fast_pred_var():
80
- # normalized
81
- scaled_x = self.scaler_x.transform(x)
82
- # predict
83
- pred = self.gp(scaled_x)
84
- if self._is_single_objective:
85
- scaled_mean = pred.mean.reshape((-1, 1))
86
- scaled_var = pred.variance.reshape((-1, 1))
87
- else:
88
- scaled_mean = torch.permute(pred.mean, (1, 0))
89
- scaled_var = torch.permute(pred.variance, (1, 0))
90
- # unscaling
91
- mean = self.scaler_y.inverse_transform_mean(scaled_mean).numpy()
92
- var = self.scaler_y.inverse_transform_var(scaled_var).numpy()
87
+ def predict(self, x: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
88
+ X = tensor(x)
89
+
90
+ post = self.gp.posterior(X)
91
+
92
+ # fit() で Standardize してから SingleTaskGP に渡したので
93
+ # posterior は手動で un-standardize する必要がある
94
+ M, V = self._standardizer.untransform(post.mean, post.variance)
95
+
96
+ mean = M.detach().numpy()
97
+ var = V.detach().numpy()
93
98
  std = np.sqrt(var)
99
+
94
100
  return mean, std
101
+
102
+
103
+ if __name__ == '__main__':
104
+ dim = 3
105
+ N = 20
106
+ bounds = (np.arange(dim*2)**2).reshape((-1, 2))
107
+ x = np.random.rand(N, dim)
108
+ x = x * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
109
+ y = (x ** 2).sum(axis=1, keepdims=True) * 1e-7
110
+
111
+ model = SingleTaskGPModel()
112
+ model.fit(x, y)
113
+ print(model.predict(np.array([[(b[1] + b[0])/2 for b in bounds]])))
114
+
115
+ # 外挿
116
+ print(model.predict(np.array([[b[1] for b in bounds]])))
117
+ print(model.predict(np.array([[b[1] * 2 for b in bounds]])))
@@ -150,21 +150,21 @@ def _get_single_objective_plot(history, df):
150
150
  # ===== その時点までの最小の点を青で打つ =====
151
151
  fig.add_trace(
152
152
  go.Scatter(
153
- x=df['trial'][indices],
154
- y=df[obj_name][indices],
153
+ x=df['trial'].iloc[indices],
154
+ y=df[obj_name].iloc[indices],
155
155
  mode="markers+lines",
156
156
  marker=dict(color='#007bff', size=9),
157
157
  name=Msg.LEGEND_LABEL_OPTIMAL_SOLUTIONS,
158
158
  line=dict(width=1, color='#6c757d',),
159
- customdata=df['trial'][indices].values.reshape((-1, 1)),
159
+ customdata=df['trial'].iloc[indices].values.reshape((-1, 1)),
160
160
  legendgroup='optimal',
161
161
  )
162
162
  )
163
163
 
164
164
  # ===== その時点までの最小の点から現在までの平行点線を引く =====
165
165
  if len(indices) > 1:
166
- x = [df['trial'][indices].iloc[-1], df['trial'].iloc[-1]]
167
- y = [df[obj_name][indices].iloc[-1]] * 2
166
+ x = [df['trial'].iloc[indices].iloc[-1], df['trial'].iloc[-1]]
167
+ y = [df[obj_name].iloc[indices].iloc[-1]] * 2
168
168
  fig.add_trace(
169
169
  go.Scatter(
170
170
  x=x,
@@ -460,6 +460,12 @@ class MainGraph(AbstractPage):
460
460
  bbox = pt["bbox"]
461
461
 
462
462
  # get row of the history from customdata defined in main_figure
463
+ if 'customdata' not in pt.keys():
464
+ raise PreventUpdate
465
+
466
+ if len(pt['customdata']) == 0:
467
+ raise PreventUpdate
468
+
463
469
  trial = pt['customdata'][0]
464
470
 
465
471
  df = self.data_accessor()
@@ -560,5 +566,5 @@ class MainGraph(AbstractPage):
560
566
  if isinstance(self.application, ProcessMonitorApplication):
561
567
  df = self.application.local_data
562
568
  else:
563
- df = self.application.history.get_df()
569
+ df = self.application.history.get_df(valid_only=True)
564
570
  return df
@@ -606,5 +606,5 @@ class PredictionModelGraph(AbstractPage):
606
606
  if isinstance(self.application, ProcessMonitorApplication):
607
607
  df = self.application.local_data
608
608
  else:
609
- df = self.application.history.get_df()
609
+ df = self.application.history.get_df(valid_only=True) # TODO: prediction グラフで infeasible な結果を反映する
610
610
  return df
@@ -67,7 +67,7 @@ class ProcessMonitorApplication(PyFemtetApplicationBase):
67
67
  if self._should_get_actor_data:
68
68
  return self._df
69
69
  else:
70
- return self.history.get_df()
70
+ return self.history.get_df(valid_only=True)
71
71
 
72
72
  @local_data.setter
73
73
  def local_data(self, value: pd.DataFrame):
@@ -112,7 +112,7 @@ class ProcessMonitorApplication(PyFemtetApplicationBase):
112
112
  worker_status.set(OptimizationStatus.INTERRUPTING)
113
113
 
114
114
  # status と df を actor から application に反映する
115
- self._df = self.history.get_df().copy()
115
+ self._df = self.history.get_df(valid_only=True).copy()
116
116
  self.local_entire_status_int = self.entire_status.get()
117
117
  self.local_worker_status_int_list = [s.get() for s in self.worker_status_list]
118
118
 
@@ -389,5 +389,5 @@ class OptunaVisualizerPage(AbstractPage):
389
389
  if isinstance(self.application, ProcessMonitorApplication):
390
390
  df = self.application.local_data
391
391
  else:
392
- df = self.application.history.get_df()
392
+ df = self.application.history.get_df(valid_only=True)
393
393
  return df
@@ -281,7 +281,7 @@ class HomePage(AbstractPage):
281
281
  trial = pt['customdata'][0]
282
282
 
283
283
  # get parameter and update model
284
- df = self.application.history.get_df()
284
+ df = self.application.history.get_df(valid_only=True)
285
285
  row = df[df['trial'] == trial]
286
286
  metadata = np.array(self.application.history.metadata)
287
287
  idx = np.where(metadata == 'prm')[0]
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: pyfemtet
3
- Version: 0.7.0
3
+ Version: 0.8.0
4
4
  Summary: Design parameter optimization using Femtet.
5
5
  Home-page: https://github.com/pyfemtet/pyfemtet
6
6
  License: BSD-3-Clause
@@ -22,6 +22,7 @@ Requires-Dist: dash-bootstrap-components (>=1.5.0,<2.0.0)
22
22
  Requires-Dist: dask (>=2023.12.1,<2024.0.0)
23
23
  Requires-Dist: distributed (>=2023.12.1,<2024.0.0)
24
24
  Requires-Dist: femtetutils (>=1.0.0,<2.0.0)
25
+ Requires-Dist: fire (>=0.6.0,<0.7.0)
25
26
  Requires-Dist: numpy (>=1.26.2,<2.0.0)
26
27
  Requires-Dist: openpyxl (>=3.1.2,<4.0.0)
27
28
  Requires-Dist: optuna (>=3.4.0,<5.0.0)
@@ -1,4 +1,4 @@
1
- pyfemtet/__init__.py,sha256=P1kRuZW02tF-ekLuBqSjV_tIKKqcbfRPDdpp9C8ftUY,21
1
+ pyfemtet/__init__.py,sha256=GO5-jMO9tN8CnIeVgyWYWO1Og938AAqM04cbZPzmyiE,21
2
2
  pyfemtet/_femtet_config_util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  pyfemtet/_femtet_config_util/autosave.py,sha256=dNirA9XGuFehas8_Jkj2BW9GOzMbPyhnt1WHcH_ObSU,2070
4
4
  pyfemtet/_femtet_config_util/exit.py,sha256=0BWID-tjOkmZwmgPFkcJMkWW39voccz5ARIBWvZbHaw,1877
@@ -6,48 +6,65 @@ pyfemtet/_message/1. make_pot.bat,sha256=wrTA0YaL7nUfNB0cS8zljOmwq2qgyG6RMwHQbrw
6
6
  pyfemtet/_message/2. make_mo.bat,sha256=6shJ3Yn4BXjDc0hhv_kiGUtVTq4oSRz8-iS4vW29rNE,155
7
7
  pyfemtet/_message/__init__.py,sha256=gE1-XX_PzHj9BbhqPaK5VcIHuv6_Tec5qlPMC3IRiBg,100
8
8
  pyfemtet/_message/babel.cfg,sha256=AQIFCQ7NlAA84PhV0gowHhbIXH41zA55mzhgyROniJk,73
9
- pyfemtet/_message/locales/ja/LC_MESSAGES/messages.po,sha256=F2bJGHVMtk086pekjVwY2dluCSl7qeYPgJe1A9CSrxA,24526
10
- pyfemtet/_message/locales/messages.pot,sha256=8Yjf462pJdEtxBLySKT34zMG5CH5uLB_8VaJQll_QsY,14493
11
- pyfemtet/_message/messages.py,sha256=F8ENLZKoHq5irn-Ag7rqA3aSDsTmRWDyNHvOLY76ROI,13368
9
+ pyfemtet/_message/locales/ja/LC_MESSAGES/messages.mo,sha256=piTwHbUIWD_68JMMRvzRHD0IuIUdq0dJNYFy88ZjbQQ,18768
10
+ pyfemtet/_message/locales/ja/LC_MESSAGES/messages.po,sha256=O5YDF47uPsLh4wXNgXz95p0U8895AbqLNJcLhRKIljE,25185
11
+ pyfemtet/_message/locales/messages.pot,sha256=hHrjOC2W_ZOBaDedImBnLQpOeFYBF8K3G7165jYXTGo,14872
12
+ pyfemtet/_message/messages.py,sha256=gvF6xwJHE2hlD3b4828Oqzv3jMO_OsZUYtQzRO6cXmk,13678
12
13
  pyfemtet/_util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- pyfemtet/_util/excel_macro_util.py,sha256=wja_yE79xXHnr8YuesDypXRXkC6mI4ZYaUU-1wunZCE,7470
14
+ pyfemtet/_util/dask_util.py,sha256=ufgr4m8slvyWP97lWBwolysQpJ1PmAO_-OI8IlEyvU8,233
15
+ pyfemtet/_util/excel_macro_util.py,sha256=cF1Z3yl9FMM0J7dpMRTsle8uYxYcfHhQC0QffnVovdY,7944
16
+ pyfemtet/_util/excel_parse_util.py,sha256=-puddKHcdf9OOWNXXeeUIuetAQ-wOepYdr37VdOqQf8,4148
17
+ pyfemtet/_util/sample.xlsx,sha256=OU8mBY48YESJFQrdt4OkntlE1z-6WiyUyOV-PMr09DQ,9423
14
18
  pyfemtet/_warning.py,sha256=TSOj8mOhuyfOUJB24LsW6GNhTA3IzIEevJw_hLKTrq8,2205
15
- pyfemtet/brep/__init__.py,sha256=V1IQ2s-8eWjXOVlTp2jMav9u-NBiSkmyAX1vmtHDEso,73
16
- pyfemtet/brep/_impl.py,sha256=Amf_wsUxUosQB3XXhErJ5RGKXBxRnaaPpavb_0Xx6Ek,404
19
+ pyfemtet/brep/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ pyfemtet/brep/_impl.py,sha256=wgNmy1ZX1FSaJZz_3RQhBFEPBExexhrAMiDADDbmGPM,375
17
21
  pyfemtet/core.py,sha256=3lqfBGJ5IuKz2Nqj5pRo7YQqKwx_0ZDL72u95Ur_1p0,1386
18
22
  pyfemtet/dispatch_extensions/__init__.py,sha256=QKpwZ0ffWUB-fiXXhhTL653FcPGLR-JKfxDNidEFoeM,271
19
23
  pyfemtet/dispatch_extensions/_impl.py,sha256=yH_yeAnQ-Xi9GfjX-FQt9u3yHnrLYIteRb6HkgYHVEc,16222
20
24
  pyfemtet/logger/__init__.py,sha256=UOJ9n_U2xwdTrp0Xgg-N6geySxNzKqTBQlXsaH0kW_w,420
21
25
  pyfemtet/logger/_impl.py,sha256=rsAd0HpmveOaLS39ucp3U2OcDhQMWjC5fnVGhbJtWVw,6375
22
26
  pyfemtet/opt/__init__.py,sha256=wRR8LbEhb5I6MUgmnCgjB6-tqHlOVxDIo7yPkq0QbBs,758
23
- pyfemtet/opt/_femopt.py,sha256=h3DQpwdNyPDU9jxc52DEsHaDWO1mVnA4hOT5_omXglo,37377
24
- pyfemtet/opt/_femopt_core.py,sha256=rKuZcv3Dn9zzaXrcXXOWLyFFpL_6yva4-29lFTWikXQ,35288
27
+ pyfemtet/opt/_femopt.py,sha256=badpzYulDFZ6FmHPmc8uXidvnQrCNjHnalBaVp54Wcg,39234
28
+ pyfemtet/opt/_femopt_core.py,sha256=8_A8aGRd1EwiNfI9qGPY9oGAvLjlY681IePfnhmzclM,37933
25
29
  pyfemtet/opt/_test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
30
  pyfemtet/opt/_test_utils/control_femtet.py,sha256=8oAl9y5V2n8Nnsgx_ebcZVzwFt1eI3swkdiKg6pg3-M,1085
27
31
  pyfemtet/opt/_test_utils/hyper_sphere.py,sha256=nQhw8EIY0DwvcTqrbKhkxiITLZifr4-nG77E-_6ggmA,700
28
32
  pyfemtet/opt/_test_utils/record_history.py,sha256=JCNJLZMCNTpJ6VT7iwEt2DIbwmsuQmgC0ClQSfcatj4,3915
29
- pyfemtet/opt/interface/__init__.py,sha256=P5Ij-xjB4628qdgacIXLu_WBaWCoBkAk4nEMUCAQzWs,458
30
- pyfemtet/opt/interface/_base.py,sha256=NVrvHVL7npgZbAQdMziA5TbTBghgi31JwrFH57edBKE,2615
31
- pyfemtet/opt/interface/_excel_interface.py,sha256=C9Ix1oUm2dHAHrKQPpb4gJo86iGJBcpXjXLa4c_VGSA,22853
32
- pyfemtet/opt/interface/_femtet.py,sha256=7FFwGJ4TFIhvjCCRIuEsJpJ-Cmo2OIO5cWKosLqcs-U,34944
33
+ pyfemtet/opt/advanced_samples/excel_ui/(ref) original_project.femprj,sha256=5OqZfynTpVCrgEIOBOMYuDGaMvepi5lojVNFr1jAsEI,157489
34
+ pyfemtet/opt/advanced_samples/excel_ui/femtet-macro.xlsm,sha256=ckF0SQ0f3IWSW6QoH1IPJdwUUlR7O_AiGC5fi8SI3jA,133137
35
+ pyfemtet/opt/advanced_samples/excel_ui/pyfemtet-core.py,sha256=aF2TWXdbt7dnkeBqqVO6GvIExozjFp0mxx3BX8rpYNc,9879
36
+ pyfemtet/opt/advanced_samples/excel_ui/test-pyfemtet-core.cmd,sha256=r-Pa1Ng9sa6wfDqIhTf2BUDrN9rePWFymz7pmtBbvcQ,895
37
+ pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data.py,sha256=tgNH0z-mUZRq-3VLjR-BU09z2COmXFruyrc4T8WS5U8,1663
38
+ pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data_jp.py,sha256=xtfJWrc353k1977wIf66MOPmgqLDDQpMCtX8QSDE5zQ,1813
39
+ pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate.py,sha256=s0b31wuN3iXjb78dt0ro0ZjxHa8uLIH94jRfEuj1EVY,3090
40
+ pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate_jp.py,sha256=OAOpHKyMMo1StSqNMqx4saYDn4hiGOKDypyK6uhTILQ,3215
41
+ pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_parametric.femprj,sha256=iIHH1X-wWBqEYj4cFJXco73LCJXSrYBsSKOD0HxYu60,87599
42
+ pyfemtet/opt/interface/__init__.py,sha256=18qTySe5Tue0lDgNZbFZ8rBsBt_Edm7ukm6OAUPwruw,551
43
+ pyfemtet/opt/interface/_base.py,sha256=y0uQ5jdsWbgt5odyqPin7NXcK_IbUwPDcrrkV_JhpRw,2722
44
+ pyfemtet/opt/interface/_excel_interface.py,sha256=gqLKOTNxdayX3jKE4GhqHMg8IzCuFhjY7gNgoFrcrp0,40301
45
+ pyfemtet/opt/interface/_femtet.py,sha256=zIsOcqdPc5AK0XsCay60uoQvwzIMklc-1gUK9EPewjM,34628
33
46
  pyfemtet/opt/interface/_femtet_parametric.py,sha256=0pAEhHflp0wIxWBVMXI8nCC02oAyRKLinH3Y6O8bq3M,2224
34
47
  pyfemtet/opt/interface/_femtet_with_nx/__init__.py,sha256=-6W2g2FDEcKzGHmI5KAKQe-4U5jDpMj0CXuma-GZca0,83
35
48
  pyfemtet/opt/interface/_femtet_with_nx/_interface.py,sha256=oefISc6c6RPPyhPnWuzCb60tgsrzGiqoIWk1DsiKzTk,5986
36
49
  pyfemtet/opt/interface/_femtet_with_nx/update_model.py,sha256=P7VH0i_o-X9OUe6AGaLF1fACPeHNrMjcrOBCA3MMrI4,3092
37
50
  pyfemtet/opt/interface/_femtet_with_sldworks.py,sha256=qqo2P4qZN0d89uNQyohKxq-Yhdql5vC0QHg4bpy7Ky8,11011
51
+ pyfemtet/opt/interface/_surrogate/__init__.py,sha256=2UT5NuBylyWQJNjg1zsBRCV-MzNCUswTUt6ZuSrYFUM,120
52
+ pyfemtet/opt/interface/_surrogate/_base.py,sha256=-k02jRywxaSKMDzGitPE_qBj5nUxC7OL6guF4y6F1Zw,2923
53
+ pyfemtet/opt/interface/_surrogate/_chaospy.py,sha256=gL72bCgs1AY_EZdJtcifSC-apwsZzp4zsWYxcpVKvtw,1969
54
+ pyfemtet/opt/interface/_surrogate/_singletaskgp.py,sha256=YBRm-8MRwK26qg6T5LKaAhwPfF3jLcKQV-fycP6dnlA,2406
38
55
  pyfemtet/opt/optimizer/__init__.py,sha256=Ia6viowECkG0IFXtFef0tJ4jDKsoDzJLqMJ9xLFH2LQ,543
39
- pyfemtet/opt/optimizer/_base.py,sha256=GsTOs3ZHLqCglYYRBk4XSJ5h1lR_W9eA2STiyQig6dM,12497
56
+ pyfemtet/opt/optimizer/_base.py,sha256=j8aQc3fGehZTJT9ETf9cr3VWYs2FYk1F8fO3f7QyKAU,13099
40
57
  pyfemtet/opt/optimizer/_optuna/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
58
  pyfemtet/opt/optimizer/_optuna/_botorch_patch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
59
  pyfemtet/opt/optimizer/_optuna/_botorch_patch/enable_nonlinear_constraint.py,sha256=b2-PP2HM46kJS4cJkBWnxnW9AS9JfeVkEjmkoKK_ziE,8949
43
- pyfemtet/opt/optimizer/_optuna/_optuna.py,sha256=nKlEYizSu6BQu8OMhRWRJxk5eXJ0LAPR7h6CQOjbdxE,16460
44
- pyfemtet/opt/optimizer/_optuna/_pof_botorch.py,sha256=yVyg1V3trqirSDtbRepgftvS02AEkAhrgjou21JS124,72717
60
+ pyfemtet/opt/optimizer/_optuna/_optuna.py,sha256=5Bjn8LrWpIFWH3lRq6Ke5XYzKQNwoGWNHz59HNMEALA,17093
61
+ pyfemtet/opt/optimizer/_optuna/_pof_botorch.py,sha256=FLx9p6IH8xcZl_SZYvs8grMqLEidj5YaBD8urDD88Pk,73768
45
62
  pyfemtet/opt/optimizer/_scipy.py,sha256=_2whhMNq6hC1lr5PlYhpZ8Zlh6-DkAjz8SVB5qHIpYg,4766
46
63
  pyfemtet/opt/optimizer/_scipy_scalar.py,sha256=rGvrLjrgfYzxK9GA0-r2Hhoaqt6A0TQsT_1M3moyklc,3615
47
64
  pyfemtet/opt/optimizer/parameter.py,sha256=YLE9lmYRaZA8isnTPJnbYXpUn6zsJFW4xg03QaSWey8,3950
48
65
  pyfemtet/opt/prediction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- pyfemtet/opt/prediction/_base.py,sha256=yAQ7Lhfaicynlb6wIHJtTHG4s42AYNRiBxq7bAqBw-Q,1798
50
- pyfemtet/opt/prediction/single_task_gp.py,sha256=89biIgHY3kM1ymvu0IcVNaODJ_87TXki-GQ_nkM2EwI,3475
66
+ pyfemtet/opt/prediction/_base.py,sha256=dEyEur3IntNokYK8NhPndHb2pWY_A4C1SjEejOTCUGw,2048
67
+ pyfemtet/opt/prediction/single_task_gp.py,sha256=6NMSjTtzXJWaW7NEqfqOjz_37mbHpXh9Oo5_KjivRU0,3922
51
68
  pyfemtet/opt/samples/femprj_sample/ParametricIF.femprj,sha256=9BtDHmc3cdom0Zq33DTdZ0mDAsIUY6i8SRkkg-n7GO0,442090
52
69
  pyfemtet/opt/samples/femprj_sample/ParametricIF.py,sha256=oXzchBZEbH69xacDht5HDnbZzKwapXsn6bp9qihY17Y,707
53
70
  pyfemtet/opt/samples/femprj_sample/ParametricIF_test_result.reccsv,sha256=TiOAqEDMub6SCGYClBv1JvQxphDOY3iIdr_pMmGgJ9M,2859
@@ -103,14 +120,14 @@ pyfemtet/opt/visualization/_base.py,sha256=xh6yIkoyBrV670JhAnR9rRewpH7P25wz0pnr0
103
120
  pyfemtet/opt/visualization/_complex_components/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
121
  pyfemtet/opt/visualization/_complex_components/alert_region.py,sha256=sX8xqT4NqhACagK4YgumF4ResrTqhOKQ8dN4q58shI8,2106
105
122
  pyfemtet/opt/visualization/_complex_components/control_femtet.py,sha256=LcMoh_MQQ1-hiz7nMGOmxSSoJLOX8viVxZB6uIggg_g,6243
106
- pyfemtet/opt/visualization/_complex_components/main_figure_creator.py,sha256=YRV3SK_N6f8FbWTxCcbibkPKyIbgA7CPyCcGBdaKBpU,9455
107
- pyfemtet/opt/visualization/_complex_components/main_graph.py,sha256=Med4fVTHPhmQXyyMjGcjdCacwOMp4JblAaLKdz_6gVQ,21533
108
- pyfemtet/opt/visualization/_complex_components/pm_graph.py,sha256=hX0OoJIUqqO4W1bqP1zaQUU2EjRzCg-pMhixkhJEAoA,24926
123
+ pyfemtet/opt/visualization/_complex_components/main_figure_creator.py,sha256=Wt_aL6srMNW-84LeZ86_OtljzmFoF9v0yklVpPAgNDE,9480
124
+ pyfemtet/opt/visualization/_complex_components/main_graph.py,sha256=qcofnuWfItYQJWs16zY1p7L4vE_rr5mHEtj9poU0i5I,21711
125
+ pyfemtet/opt/visualization/_complex_components/pm_graph.py,sha256=eh9rQYUcEefwN3eb7AnXXKFyknl_P_3xt4kl3Y4woI8,25010
109
126
  pyfemtet/opt/visualization/_complex_components/pm_graph_creator.py,sha256=f-ikYAPChazqyRQ0Y-tKrYrMBHzFHJJ4uV6QXBEBRKI,7304
110
127
  pyfemtet/opt/visualization/_create_wrapped_components.py,sha256=9AltJHr1DM6imZfpNp867rC-uAYqQ-emdgTLChKDrl8,2513
111
128
  pyfemtet/opt/visualization/_process_monitor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
- pyfemtet/opt/visualization/_process_monitor/application.py,sha256=zrl2Bzz4FdYIV-biOMpxdOIMQvQx41_sR2g4j38A04I,7925
113
- pyfemtet/opt/visualization/_process_monitor/pages.py,sha256=0EGe_Dd8DxQI9V4DMFjbZv8uEarFY1BLsRcVrvWfRzI,15120
129
+ pyfemtet/opt/visualization/_process_monitor/application.py,sha256=l9Z1SS4r1BXH6JlccjDyJNanG2JTu4OALbEdZje0XrY,7955
130
+ pyfemtet/opt/visualization/_process_monitor/pages.py,sha256=VEyP7cRyVjLOgKD6KUHu3AivbwbmjTucQWv-obH-HY0,15135
114
131
  pyfemtet/opt/visualization/_wrapped_components/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
115
132
  pyfemtet/opt/visualization/_wrapped_components/dbc.py,sha256=iSh4QRmLIQMfiAWowG1ThXLPhmKluRYOYPcdDFVI0t0,42162
116
133
  pyfemtet/opt/visualization/_wrapped_components/dcc.py,sha256=-Iw6MjFQmvJ__KcddPhFDqui6lk2ixB2U2tZH_Il5pA,17500
@@ -119,9 +136,9 @@ pyfemtet/opt/visualization/_wrapped_components/str_enum.py,sha256=NZqbh2jNEAckvJ
119
136
  pyfemtet/opt/visualization/result_viewer/.gitignore,sha256=ryvb4aqbbsHireHWlPQfxxqDHQJo6YkVYhE9imKt0b8,6
120
137
  pyfemtet/opt/visualization/result_viewer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
121
138
  pyfemtet/opt/visualization/result_viewer/application.py,sha256=WcHBx_J5eNLKSaprpk9BGifwhO04oN8FiNGYTWorrXA,1691
122
- pyfemtet/opt/visualization/result_viewer/pages.py,sha256=laEAKHAtdshCAHxgXo-zMNg3RP6lCxfszO3XwLnF1dU,32156
123
- pyfemtet-0.7.0.dist-info/LICENSE,sha256=sVQBhyoglGJUu65-BP3iR6ujORI6YgEU2Qm-V4fGlOA,1485
124
- pyfemtet-0.7.0.dist-info/METADATA,sha256=T0IJPwww5DvoZN1Z2ucdGJhBsThjPn05lVU27zfC_go,3371
125
- pyfemtet-0.7.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
126
- pyfemtet-0.7.0.dist-info/entry_points.txt,sha256=ZfYqRaoiPtuWqFi2_msccyrVF0LurMn-IHlYamAegZo,104
127
- pyfemtet-0.7.0.dist-info/RECORD,,
139
+ pyfemtet/opt/visualization/result_viewer/pages.py,sha256=zcsRmVpVK7xbmOpnKkSypNPsRyHcV3ingfNmuqln6nw,32171
140
+ pyfemtet-0.8.0.dist-info/LICENSE,sha256=sVQBhyoglGJUu65-BP3iR6ujORI6YgEU2Qm-V4fGlOA,1485
141
+ pyfemtet-0.8.0.dist-info/METADATA,sha256=5TFhm1LFB871xXJ4hel9flRi5SylmNjfnQ6w9qy5BJc,3408
142
+ pyfemtet-0.8.0.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
143
+ pyfemtet-0.8.0.dist-info/entry_points.txt,sha256=ZfYqRaoiPtuWqFi2_msccyrVF0LurMn-IHlYamAegZo,104
144
+ pyfemtet-0.8.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 2.0.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any