wolfhece 2.1.124__py3-none-any.whl → 2.1.125__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wolfhece/PyDraw.py +9 -2
- wolfhece/acceptability/acceptability_gui.py +243 -243
- wolfhece/apps/version.py +1 -1
- wolfhece/sigmoid/__init__.py +0 -0
- wolfhece/sigmoid/circle_jax.py +118 -0
- wolfhece/sigmoid/circle_jax_copilot.py +169 -0
- wolfhece/sigmoid/sigmoid.py +776 -0
- wolfhece/wolfresults_2D.py +16 -6
- {wolfhece-2.1.124.dist-info → wolfhece-2.1.125.dist-info}/METADATA +1 -1
- {wolfhece-2.1.124.dist-info → wolfhece-2.1.125.dist-info}/RECORD +13 -9
- {wolfhece-2.1.124.dist-info → wolfhece-2.1.125.dist-info}/WHEEL +1 -1
- {wolfhece-2.1.124.dist-info → wolfhece-2.1.125.dist-info}/entry_points.txt +0 -0
- {wolfhece-2.1.124.dist-info → wolfhece-2.1.125.dist-info}/top_level.txt +0 -0
wolfhece/apps/version.py
CHANGED
File without changes
|
@@ -0,0 +1,118 @@
|
|
1
|
+
import jax
|
2
|
+
import jax.numpy as jnp
|
3
|
+
from jax import jit, vmap
|
4
|
+
import matplotlib.pyplot as plt
|
5
|
+
import numpy as np
|
6
|
+
|
7
|
+
# Forcer float32
|
8
|
+
jax.config.update("jax_enable_x64", False)
|
9
|
+
|
10
|
+
# Fonction pour calculer l'aire immergée
|
11
|
+
def wet_area(h, R):
|
12
|
+
h = jnp.clip(h, 0, 2 * R)
|
13
|
+
theta = 2 * jnp.arccos(1 - h / R)
|
14
|
+
area = (R**2 / 2) * (theta - jnp.sin(theta))
|
15
|
+
return area
|
16
|
+
|
17
|
+
# Solution "analytique/numérique" classique pour h
|
18
|
+
def analytical_h(R, f):
|
19
|
+
def solve_theta(theta):
|
20
|
+
return (theta - jnp.sin(theta)) / (2 * jnp.pi) - f
|
21
|
+
theta_min, theta_max = jnp.float32(0.0), jnp.float32(2 * jnp.pi)
|
22
|
+
for _ in range(50):
|
23
|
+
theta_mid = (theta_min + theta_max) / 2
|
24
|
+
val = solve_theta(theta_mid)
|
25
|
+
theta_max = jnp.where(val > 0, theta_mid, theta_max)
|
26
|
+
theta_min = jnp.where(val <= 0, theta_mid, theta_min)
|
27
|
+
theta = (theta_min + theta_max) / 2
|
28
|
+
return R * (1 - jnp.cos(theta / 2))
|
29
|
+
|
30
|
+
# Fonction objectif avec sigmoïde adaptative aux deux bornes
|
31
|
+
def objective(h, R, f):
|
32
|
+
total_area = jnp.pi * R**2
|
33
|
+
target_area = f * total_area
|
34
|
+
diff = wet_area(h, R) - target_area
|
35
|
+
# Échelle adaptative : plus kleine pour f près de 0 ou 1
|
36
|
+
scale_factor = jnp.minimum(f, 1. - f) # Distance au bord le plus proche
|
37
|
+
scale = 0.05 * R**2 * jnp.maximum(scale_factor, 0.01) # Éviter 0
|
38
|
+
return 1 / (1 + jnp.exp(-diff / scale))
|
39
|
+
|
40
|
+
# Dichotomie douce améliorée
|
41
|
+
@jit
|
42
|
+
def soft_dichotomy(R, f, max_iter=200):
|
43
|
+
def body(state, _):
|
44
|
+
h_min, h_max = state
|
45
|
+
h_mid = (h_min + h_max) / 2
|
46
|
+
sigmoid_val = objective(h_mid, R, f)
|
47
|
+
h_min_new = h_min + (h_mid - h_min) * (1 - sigmoid_val)
|
48
|
+
h_max_new = h_max - (h_max - h_mid) * sigmoid_val
|
49
|
+
return (h_min_new, h_max_new), None
|
50
|
+
|
51
|
+
# Bornes initiales resserrées pour petits/grands f
|
52
|
+
h_min_init = jnp.float32(0.0)
|
53
|
+
h_max_init = jnp.float32(2 * R)
|
54
|
+
initial_state = (h_min_init, h_max_init)
|
55
|
+
final_state, _ = jax.lax.scan(body, initial_state, None, length=max_iter)
|
56
|
+
h_min, h_max = final_state
|
57
|
+
return (h_min + h_max) / 2
|
58
|
+
|
59
|
+
# Dérivée par rapport à f
|
60
|
+
grad_bisection = jax.grad(soft_dichotomy, argnums=1)
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
# Paramètres
|
65
|
+
R = jnp.float32(1.0)
|
66
|
+
f_values = jnp.linspace(jnp.float32(0.001), jnp.float32(0.999), 500) # Plus près des bords
|
67
|
+
|
68
|
+
# Calculs
|
69
|
+
h_numerical = vmap(lambda f: soft_dichotomy(R, f))(f_values)
|
70
|
+
h_analytical = vmap(lambda f: analytical_h(R, f))(f_values)
|
71
|
+
errors = jnp.abs(h_numerical - h_analytical)
|
72
|
+
|
73
|
+
grads = vmap(lambda f: grad_bisection(R, f))(f_values)
|
74
|
+
|
75
|
+
# Graphiques
|
76
|
+
plt.figure(figsize=(12, 5))
|
77
|
+
|
78
|
+
plt.subplot(1, 3, 1)
|
79
|
+
plt.plot(f_values, h_numerical, label="Numérique (sigmoïde)", color="blue")
|
80
|
+
plt.plot(f_values, h_analytical, "--", label="Analytique", color="orange")
|
81
|
+
plt.xlabel("Fraction immergée (f)")
|
82
|
+
plt.ylabel("Hauteur (h)")
|
83
|
+
plt.title("Hauteur en fonction de f (float32)")
|
84
|
+
plt.legend()
|
85
|
+
plt.grid(True)
|
86
|
+
plt.yscale("log") # Échelle log pour voir les extrêmes
|
87
|
+
|
88
|
+
plt.subplot(1, 3, 2)
|
89
|
+
plt.plot(f_values, errors, color="red")
|
90
|
+
plt.xlabel("Fraction immergée (f)")
|
91
|
+
plt.ylabel("Erreur absolue (|h_num - h_ana|)")
|
92
|
+
plt.title("Erreur par rapport à la solution analytique")
|
93
|
+
plt.grid(True)
|
94
|
+
plt.yscale("log") # Échelle log pour les erreurs
|
95
|
+
|
96
|
+
plt.subplot(1, 3, 3)
|
97
|
+
plt.plot(f_values, grads, label="Dérivée par rapport à f")
|
98
|
+
plt.xlabel("Fraction immergée (f)")
|
99
|
+
plt.ylabel("Gradient de f par rapport à h")
|
100
|
+
plt.legend()
|
101
|
+
plt.grid(True)
|
102
|
+
|
103
|
+
plt.tight_layout()
|
104
|
+
plt.show()
|
105
|
+
|
106
|
+
# Tests spécifiques aux deux bornes
|
107
|
+
for f_test in [0., 0.001, 0.01, 0.5, 0.99, 0.999]:
|
108
|
+
f_test = jnp.float32(f_test)
|
109
|
+
h_num = soft_dichotomy(R, f_test)
|
110
|
+
h_ana = analytical_h(R, f_test)
|
111
|
+
grad_h = grad_bisection(R, f_test)
|
112
|
+
print(f"f = {f_test:.4f}:")
|
113
|
+
print(f" h numérique = {h_num:.6f}")
|
114
|
+
print(f" h analytique = {h_ana:.6f}")
|
115
|
+
print(f" Gradient de h par rapport à f = {grad_h:.6f}")
|
116
|
+
print(f" Erreur = {jnp.abs(h_num - h_ana):.6f}")
|
117
|
+
print(f" Erreur relative = {jnp.abs(h_num - h_ana) / h_ana:.6e}")
|
118
|
+
pass
|
@@ -0,0 +1,169 @@
|
|
1
|
+
import jax
|
2
|
+
import jax.numpy as jnp
|
3
|
+
from jax import jit, vmap
|
4
|
+
import matplotlib.pyplot as plt
|
5
|
+
import numpy as np
|
6
|
+
from scipy.optimize import minimize, root_scalar
|
7
|
+
|
8
|
+
# Forcer float32
|
9
|
+
jax.config.update("jax_enable_x64", False)
|
10
|
+
|
11
|
+
# Fonction pour calculer l'aire immergée
|
12
|
+
def wet_area(h, R):
|
13
|
+
h = jnp.clip(h, 0, 2 * R)
|
14
|
+
theta = 2 * jnp.arccos(1 - h / R)
|
15
|
+
area = (R**2 / 2) * (theta - jnp.sin(theta))
|
16
|
+
return area
|
17
|
+
|
18
|
+
# Solution numérique "classique" pour h
|
19
|
+
def analytical_h(R, f):
|
20
|
+
def solve_theta(theta):
|
21
|
+
return (theta - jnp.sin(theta)) / (2 * jnp.pi) - f
|
22
|
+
theta_min, theta_max = jnp.float32(0.0), jnp.float32(2 * jnp.pi)
|
23
|
+
for _ in range(50):
|
24
|
+
theta_mid = (theta_min + theta_max) / 2
|
25
|
+
val = solve_theta(theta_mid)
|
26
|
+
theta_max = jnp.where(val > 0, theta_mid, theta_max)
|
27
|
+
theta_min = jnp.where(val <= 0, theta_mid, theta_min)
|
28
|
+
theta = (theta_min + theta_max) / 2
|
29
|
+
return R * (1 - jnp.cos(theta / 2))
|
30
|
+
|
31
|
+
# # Fonction objectif avec pondération quadratique
|
32
|
+
# def objective(h, R, f):
|
33
|
+
# total_area = jnp.pi * R**2
|
34
|
+
# target_area = f * total_area
|
35
|
+
# diff = wet_area(h, R) - target_area
|
36
|
+
# scale_factor = jnp.minimum(f, 1. - f)
|
37
|
+
# scale = 0.1 * R**2 * (scale_factor**2 + 0.01) # Échelle quadratique
|
38
|
+
# return 1 / (1 + jnp.exp(-diff / scale))
|
39
|
+
|
40
|
+
# def objective(h, R, f):
|
41
|
+
# total_area = jnp.pi * R**2
|
42
|
+
# target_area = f * total_area
|
43
|
+
# diff = wet_area(h, R) - target_area
|
44
|
+
# scale_factor = jnp.minimum(f, 1. - f)
|
45
|
+
# scale = 0.1 * R**2 * jnp.maximum(scale_factor, 0.01)
|
46
|
+
# return 0.5 * (1 + jnp.tanh(diff / scale))
|
47
|
+
|
48
|
+
|
49
|
+
# Fonction objectif avec lissage (inchangée)
|
50
|
+
def objective(h, R, f):
|
51
|
+
total_area = jnp.pi * R**2
|
52
|
+
target_area = f * total_area
|
53
|
+
diff = wet_area(h, R) - target_area
|
54
|
+
scale_factor = jnp.minimum(f, 1. - f)
|
55
|
+
scale = 0.05 * R**2 * jnp.maximum(scale_factor, 0.01)
|
56
|
+
return 1 / (1 + jnp.exp(-diff / scale))
|
57
|
+
|
58
|
+
# Dichotomie douce avec préservation de la racine
|
59
|
+
@jit
|
60
|
+
def soft_dichotomy(R, f, max_iter=10000):
|
61
|
+
f_sym = jnp.minimum(f, 1 - f)
|
62
|
+
|
63
|
+
def body(state, _):
|
64
|
+
h_min, h_max = state
|
65
|
+
h_mid = (h_min + h_max) / 2
|
66
|
+
sigmoid_val = objective(h_mid, R, f_sym)
|
67
|
+
|
68
|
+
# Différences pour h_min, h_mid et h_max
|
69
|
+
diff_min = wet_area(h_min, R) - f_sym * jnp.pi * R**2
|
70
|
+
diff_max = wet_area(h_max, R) - f_sym * jnp.pi * R**2
|
71
|
+
|
72
|
+
# Facteurs de préservation lisses
|
73
|
+
preserve_min = jnp.exp(-jnp.abs(diff_min) / (0.01)) # Proche de 1 si h_min est racine
|
74
|
+
preserve_max = jnp.exp(-jnp.abs(diff_max) / (0.01)) # Proche de 1 si h_max est racine
|
75
|
+
|
76
|
+
# Mise à jour des bornes avec préservation
|
77
|
+
h_min_new = h_min + (1. - preserve_min) * (h_mid - h_min) * (1. - sigmoid_val)
|
78
|
+
h_max_new = h_max - (1. - preserve_max) * (h_max - h_mid) * sigmoid_val
|
79
|
+
|
80
|
+
# # Garantie que h_min_new < h_max_new
|
81
|
+
# h_min_new = jnp.minimum(h_min_new, h_mid - 0.01 * R)
|
82
|
+
# h_max_new = jnp.maximum(h_max_new, h_mid + 0.01 * R)
|
83
|
+
|
84
|
+
return (h_min_new, h_max_new), None
|
85
|
+
|
86
|
+
h_min_init = jnp.float32(0.0)
|
87
|
+
h_max_init = jnp.float32(2 * R)
|
88
|
+
initial_state = (h_min_init, h_max_init)
|
89
|
+
final_state, _ = jax.lax.scan(body, initial_state, None, length=max_iter)
|
90
|
+
h_min, h_max = final_state
|
91
|
+
h_sym = (h_min + h_max) / 2
|
92
|
+
return jnp.where(f <= 0.5, h_sym, 2 * R - h_sym)
|
93
|
+
|
94
|
+
# Dérivée par rapport à f
|
95
|
+
grad_bisection = jax.grad(soft_dichotomy, argnums=1)
|
96
|
+
|
97
|
+
# Fonction objectif avec lissage
|
98
|
+
def section(h, R, f):
|
99
|
+
total_area = jnp.pi * R**2
|
100
|
+
target_area = f * total_area
|
101
|
+
return wet_area(jnp.clip(h, 0, 2 * R), R) - target_area
|
102
|
+
|
103
|
+
grad_section = jax.grad(section, argnums=0)
|
104
|
+
|
105
|
+
# recherche de la recine de la section
|
106
|
+
def find_root(R, f):
|
107
|
+
def fun(h):
|
108
|
+
return section(h, R, f)
|
109
|
+
def grad(h):
|
110
|
+
return grad_section(h, R, f)
|
111
|
+
|
112
|
+
h_root = root_scalar(fun, fprime=grad, x0=R)
|
113
|
+
return h_root.root
|
114
|
+
|
115
|
+
h = find_root(1.,0.5)
|
116
|
+
pass
|
117
|
+
|
118
|
+
if __name__ == "__main__":
|
119
|
+
R = jnp.float32(1.0)
|
120
|
+
f_values = jnp.linspace(jnp.float32(0.00), jnp.float32(1.), 5000, endpoint=True)
|
121
|
+
|
122
|
+
h_numerical = vmap(lambda f: soft_dichotomy(R, f))(f_values)
|
123
|
+
h_analytical = vmap(lambda f: analytical_h(R, f))(f_values)
|
124
|
+
errors = jnp.abs(h_numerical - h_analytical)
|
125
|
+
|
126
|
+
grads = vmap(lambda f: grad_bisection(R, f))(f_values)
|
127
|
+
|
128
|
+
plt.figure(figsize=(12, 5))
|
129
|
+
|
130
|
+
plt.subplot(1, 3, 1)
|
131
|
+
plt.plot(f_values, h_numerical, label="Numérique (symétrie centrale)", color="blue")
|
132
|
+
plt.plot(f_values, h_analytical, "--", label="Analytique", color="orange")
|
133
|
+
plt.xlabel("Fraction immergée (f)")
|
134
|
+
plt.ylabel("Hauteur (h)")
|
135
|
+
plt.title("Hauteur en fonction de f (float32)")
|
136
|
+
plt.legend()
|
137
|
+
plt.grid(True)
|
138
|
+
# plt.yscale("log")
|
139
|
+
|
140
|
+
plt.subplot(1, 3, 2)
|
141
|
+
plt.plot(f_values, errors, color="red")
|
142
|
+
plt.xlabel("Fraction immergée (f)")
|
143
|
+
plt.ylabel("Erreur absolue (|h_num - h_ana|)")
|
144
|
+
plt.title("Erreur par rapport à la solution analytique")
|
145
|
+
plt.grid(True)
|
146
|
+
plt.yscale("log")
|
147
|
+
|
148
|
+
plt.subplot(1, 3, 3)
|
149
|
+
plt.plot(f_values, grads, label="Dérivée par rapport à f")
|
150
|
+
plt.xlabel("Fraction immergée (f)")
|
151
|
+
plt.ylabel("Gradient de f par rapport à h")
|
152
|
+
plt.legend()
|
153
|
+
plt.grid(True)
|
154
|
+
|
155
|
+
plt.tight_layout()
|
156
|
+
plt.show()
|
157
|
+
|
158
|
+
for f_test in [0., 0.001, 0.01, 0.5, 0.99, 0.999]:
|
159
|
+
f_test = jnp.float32(f_test)
|
160
|
+
h_num = soft_dichotomy(R, f_test)
|
161
|
+
h_ana = analytical_h(R, f_test)
|
162
|
+
grad_h = grad_bisection(R, f_test)
|
163
|
+
print(f"f = {f_test:.4f}:")
|
164
|
+
print(f" h numérique = {h_num:.6f}")
|
165
|
+
print(f" h analytique = {h_ana:.6f}")
|
166
|
+
print(f" Gradient de h par rapport à f = {grad_h:.6f}")
|
167
|
+
print(f" Erreur = {jnp.abs(h_num - h_ana):.6f}")
|
168
|
+
print(f" Erreur relative = {jnp.abs(h_num - h_ana) / h_ana:.6e}")
|
169
|
+
pass
|