femagtools 1.3.0__py3-none-any.whl → 1.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- femagtools/__init__.py +1 -1
- femagtools/airgap.py +11 -37
- femagtools/amela.py +148 -13
- femagtools/bch.py +19 -3
- femagtools/dxfsl/area.py +68 -15
- femagtools/dxfsl/converter.py +15 -6
- femagtools/dxfsl/fslrenderer.py +13 -8
- femagtools/dxfsl/functions.py +1 -1
- femagtools/dxfsl/geom.py +415 -62
- femagtools/dxfsl/machine.py +97 -5
- femagtools/dxfsl/shape.py +46 -2
- femagtools/ecloss.py +393 -0
- femagtools/femag.py +25 -1
- femagtools/fsl.py +3 -2
- femagtools/hxy.py +126 -0
- femagtools/isa7.py +37 -24
- femagtools/machine/__init__.py +14 -13
- femagtools/machine/effloss.py +153 -32
- femagtools/machine/im.py +137 -56
- femagtools/machine/pm.py +584 -202
- femagtools/machine/sm.py +218 -64
- femagtools/machine/utils.py +12 -8
- femagtools/mcv.py +6 -8
- femagtools/model.py +11 -1
- femagtools/parstudy.py +1 -1
- femagtools/plot.py +159 -35
- femagtools/templates/afm_rotor.mako +102 -0
- femagtools/templates/afm_stator.mako +141 -0
- femagtools/templates/airgapinduc.mako +3 -3
- femagtools/templates/basic_modpar.mako +23 -2
- femagtools/templates/cogg_calc.mako +28 -5
- femagtools/templates/cu_losses.mako +1 -1
- femagtools/templates/fieldcalc.mako +39 -0
- femagtools/templates/gen_winding.mako +52 -47
- femagtools/templates/mesh-airgap.mako +43 -0
- femagtools/templates/stator3Linear.mako +5 -4
- femagtools/templates/therm-dynamic.mako +12 -6
- femagtools/templates/therm-static.mako +12 -0
- femagtools/templates/torq_calc.mako +2 -4
- femagtools/utils.py +45 -0
- femagtools/windings.py +2 -1
- {femagtools-1.3.0.dist-info → femagtools-1.3.2.dist-info}/METADATA +1 -1
- {femagtools-1.3.0.dist-info → femagtools-1.3.2.dist-info}/RECORD +47 -41
- {femagtools-1.3.0.dist-info → femagtools-1.3.2.dist-info}/WHEEL +1 -1
- {femagtools-1.3.0.dist-info → femagtools-1.3.2.dist-info}/LICENSE +0 -0
- {femagtools-1.3.0.dist-info → femagtools-1.3.2.dist-info}/entry_points.txt +0 -0
- {femagtools-1.3.0.dist-info → femagtools-1.3.2.dist-info}/top_level.txt +0 -0
femagtools/hxy.py
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
femagtools.hxy
|
4
|
+
~~~~~~~~~~~~~~
|
5
|
+
|
6
|
+
Reading HXY files (EXPERIMENTAL)
|
7
|
+
|
8
|
+
"""
|
9
|
+
|
10
|
+
import numpy as np
|
11
|
+
from collections import defaultdict
|
12
|
+
|
13
|
+
# K-means clustering
|
14
|
+
class point():
|
15
|
+
def __init__(self, index, k, coord):
|
16
|
+
self.index = index
|
17
|
+
self.coord = coord
|
18
|
+
self.k = k
|
19
|
+
|
20
|
+
def make_k_mapping(points):
|
21
|
+
region = defaultdict(list)
|
22
|
+
for p in points:
|
23
|
+
region[p.k] = region[p.k] + [p.coord]
|
24
|
+
return region
|
25
|
+
|
26
|
+
def calc_k_means(region):
|
27
|
+
return [np.mean(region[k], axis=0) for k in region]
|
28
|
+
|
29
|
+
def update_k(points, means):
|
30
|
+
for p in points:
|
31
|
+
dists = [np.linalg.norm(m - p.coord) for m in means]
|
32
|
+
p.k = np.argmin(dists)
|
33
|
+
|
34
|
+
def fit(points, epochs=10):
|
35
|
+
for e in range(epochs):
|
36
|
+
region = make_k_mapping(points)
|
37
|
+
means = calc_k_means(region)
|
38
|
+
update_k(points, means)
|
39
|
+
return means, points
|
40
|
+
|
41
|
+
def evaluate(points):
|
42
|
+
region = make_k_mapping(points)
|
43
|
+
means = calc_k_means(region)
|
44
|
+
dists = [np.linalg.norm(means[p.k]-p.coord) for p in points]
|
45
|
+
return np.mean(dists)
|
46
|
+
|
47
|
+
def llf_(y, X, pr):
|
48
|
+
# return maximized log likelihood
|
49
|
+
nobs = float(X.shape[0])
|
50
|
+
nobs2 = nobs / 2.0
|
51
|
+
nobs = float(nobs)
|
52
|
+
resid = y - pr
|
53
|
+
ssr = np.sum((resid)**2)
|
54
|
+
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
|
55
|
+
return llf
|
56
|
+
|
57
|
+
|
58
|
+
def aic(y, X, pr, p):
|
59
|
+
# return aic metric
|
60
|
+
llf = llf_(y, X, pr)
|
61
|
+
return -2*llf+2*p
|
62
|
+
|
63
|
+
|
64
|
+
def readSections(f):
|
65
|
+
section = []
|
66
|
+
movepos = False
|
67
|
+
for line in f:
|
68
|
+
if line.startswith(' MOVE POSITION'):
|
69
|
+
movepos = True
|
70
|
+
if section:
|
71
|
+
# skip empty lines
|
72
|
+
i = 0
|
73
|
+
try:
|
74
|
+
while not section[i]:
|
75
|
+
i = i+1
|
76
|
+
except IndexError:
|
77
|
+
i = i-1
|
78
|
+
yield section[i:]
|
79
|
+
section = []
|
80
|
+
if movepos:
|
81
|
+
section.append(line.strip())
|
82
|
+
yield section
|
83
|
+
|
84
|
+
|
85
|
+
def read(filename, num_magnets):
|
86
|
+
"""read hxy file and return values grouped to magnets"""
|
87
|
+
hxy = []
|
88
|
+
with open(filename, encoding='latin1', errors='ignore') as f:
|
89
|
+
for s in readSections(f):
|
90
|
+
pos = float(s[0].split()[-1])
|
91
|
+
num = np.array([[float(x) for x in l.split()] for l in s[5:] if l])
|
92
|
+
hxy.append({'pos': pos, 'e': num[:, :2], 'hxy': num[:, 2:4],
|
93
|
+
'bxy': num[:, 4:6], 'mxy':num[:, 6:]})
|
94
|
+
K = num_magnets
|
95
|
+
points = [point(i, np.random.randint(0,K), xy)
|
96
|
+
for i, xy in enumerate(hxy[0]['e'])]
|
97
|
+
new_means, new_points = fit(points)
|
98
|
+
# move values to magnets:
|
99
|
+
magnets = [{'e': [p.coord for p in new_points if p.k == k],
|
100
|
+
'pos': [], 'hxy': [], 'bxy': [], 'mxy': []}
|
101
|
+
for k in range(K)]
|
102
|
+
hkeys = ['hxy', 'bxy', 'mxy']
|
103
|
+
for i, h in enumerate(hxy): # all positions
|
104
|
+
for mag in magnets:
|
105
|
+
mag['pos'].append(h['pos'])
|
106
|
+
m = [{k: [] for k in hkeys}
|
107
|
+
for kk in range(K)]
|
108
|
+
for p in new_points: # all elements
|
109
|
+
for k in hkeys:
|
110
|
+
m[p.k][k].append(h[k][p.k])
|
111
|
+
for mk, magk in zip(m, magnets):
|
112
|
+
for k in hkeys:
|
113
|
+
magk[k].append(mk[k])
|
114
|
+
for mag in magnets:
|
115
|
+
for k in ['e'] + hkeys:
|
116
|
+
mag[k] = np.array(mag[k])
|
117
|
+
mag['havg'] = []
|
118
|
+
mag['hmax'] = []
|
119
|
+
for hpos in mag['hxy']:
|
120
|
+
h = np.abs(np.linalg.norm(hpos, axis=1))
|
121
|
+
mag['havg'].append(np.mean(h))
|
122
|
+
mag['hmax'].append(np.max(h))
|
123
|
+
|
124
|
+
# Note dimension of hkeys is (positions x elements x 2)
|
125
|
+
|
126
|
+
return magnets
|
femagtools/isa7.py
CHANGED
@@ -196,7 +196,9 @@ class Reader(object):
|
|
196
196
|
self.el_fe_induction_1.append([[]])
|
197
197
|
self.el_fe_induction_2.append([[]])
|
198
198
|
for i in range(NUM_FE_EVAL_MOVE_STEP + 1):
|
199
|
-
self.
|
199
|
+
b = self.next_block("h")
|
200
|
+
logger.debug("el_fe_induction move step %d: %d", i, len(b))
|
201
|
+
self.el_fe_induction_1[0][0].append(b)
|
200
202
|
self.el_fe_induction_2[0][0].append(self.next_block("h"))
|
201
203
|
|
202
204
|
FC_NUM_MOVE_CALC_LOAD_PMS, FC_NUM_FLX = self.next_block("i")[0:2]
|
@@ -270,7 +272,9 @@ class Reader(object):
|
|
270
272
|
self.el_fe_induction_2.append([[]])
|
271
273
|
self.eddy_cu_vpot.append([[]])
|
272
274
|
for i in range(NUM_FE_EVAL_MOVE_STEP + 1):
|
273
|
-
self.
|
275
|
+
b = self.next_block("h")
|
276
|
+
logger.debug("el_fe_induction move losses step %d: %d", i, len(b))
|
277
|
+
self.el_fe_induction_1[1][0].append(b)
|
274
278
|
self.el_fe_induction_2[1][0].append(self.next_block("h"))
|
275
279
|
for i in range(NUM_FE_EVAL_MOVE_STEP + 1):
|
276
280
|
self.eddy_cu_vpot[1][0].append(self.next_block("h"))
|
@@ -335,7 +339,10 @@ class Reader(object):
|
|
335
339
|
self.el_fe_induction_2.append([[]])
|
336
340
|
self.eddy_cu_vpot.append([[]])
|
337
341
|
for i in range(NUM_FE_EVAL_MOVE_STEP + 1):
|
338
|
-
self.
|
342
|
+
b = self.next_block("h")
|
343
|
+
logger.debug("el_fe_induction move losses (2) step %d: %d",
|
344
|
+
i, len(b))
|
345
|
+
self.el_fe_induction_1[2][0].append(b)
|
339
346
|
self.el_fe_induction_2[2][0].append(self.next_block("h"))
|
340
347
|
for i in range(NUM_FE_EVAL_MOVE_STEP + 1):
|
341
348
|
self.eddy_cu_vpot[2][0].append(self.next_block("h"))
|
@@ -401,8 +408,8 @@ class Reader(object):
|
|
401
408
|
offset += chunksize
|
402
409
|
logger.debug("%s: %d %d", fmt_, blockSize, len(unpacked))
|
403
410
|
except struct.error as e:
|
404
|
-
logger.warning("Invalid Blocksize %s",
|
405
|
-
blockSize)
|
411
|
+
logger.warning("Invalid Blocksize %s at pos %i",
|
412
|
+
blockSize, self.pos-4)
|
406
413
|
|
407
414
|
self.pos += blockSize + 4
|
408
415
|
|
@@ -688,25 +695,31 @@ class Isa7(object):
|
|
688
695
|
self.curr_loss = np.array([c/np.sqrt(2) for c in reader.curr_loss])
|
689
696
|
except AttributeError:
|
690
697
|
pass
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
698
|
+
|
699
|
+
try:
|
700
|
+
el_fe_ind = [np.array(reader.el_fe_induction_1).T/1000,
|
701
|
+
np.array(reader.el_fe_induction_2).T/1000]
|
702
|
+
eddy_cu_vpot = np.array(reader.eddy_cu_vpot).T/1000
|
703
|
+
except (ValueError, TypeError) as e:
|
704
|
+
# inhomogenous array
|
705
|
+
l = len(reader.el_fe_induction_1[0][0])
|
706
|
+
shape = []
|
707
|
+
for i in reader.el_fe_induction_1:
|
708
|
+
for j in i:
|
709
|
+
n = 0
|
710
|
+
for k in j:
|
711
|
+
if len(k) < l:
|
712
|
+
break
|
713
|
+
n += 1
|
714
|
+
if n > 0:
|
715
|
+
shape.append(n)
|
716
|
+
el_fe_ind = [np.array([[reader.el_fe_induction_1[0][0][:shape[0]]]]).T/1000,
|
717
|
+
np.array([[reader.el_fe_induction_2[0][0][:shape[0]]]]).T/1000]
|
718
|
+
eddy_cu_vpot = np.array([[reader.eddy_cu_vpot[0][0][:shape[0]]]]).T/1000
|
719
|
+
|
720
|
+
self.el_fe_induction_1 = el_fe_ind[0]
|
721
|
+
self.el_fe_induction_2 = el_fe_ind[1]
|
722
|
+
self.eddy_cu_vpot = eddy_cu_vpot
|
710
723
|
|
711
724
|
self.iron_loss_coefficients = getattr(
|
712
725
|
reader, 'iron_loss_coefficients', [])
|
femagtools/machine/__init__.py
CHANGED
@@ -35,6 +35,13 @@ def create_from_eecpars(temp, eecpars, lfe=1, wdg=1):
|
|
35
35
|
PM, EESM or IM"""
|
36
36
|
rlfe = lfe
|
37
37
|
rwdg = wdg
|
38
|
+
opts = {k: eecpars[k] for k in ('zeta1', 'gam', 'kh', 'kpfe',
|
39
|
+
'kfric_b') if k in eecpars}
|
40
|
+
try:
|
41
|
+
opts['rotor_mass'] = rlfe*eecpars['rotor_mass']
|
42
|
+
except KeyError:
|
43
|
+
pass
|
44
|
+
|
38
45
|
if 'ldq' in eecpars or 'psidq' in eecpars: # this is a PM (or EESM)
|
39
46
|
try:
|
40
47
|
dqpars = eecpars['ldq']
|
@@ -48,13 +55,9 @@ def create_from_eecpars(temp, eecpars, lfe=1, wdg=1):
|
|
48
55
|
smpars['tcu1'] = temp[0]
|
49
56
|
smpars['tcu2'] = temp[1]
|
50
57
|
if 'ldq' in smpars:
|
51
|
-
machine = SynchronousMachineLdq(smpars, lfe=rlfe, wdg=rwdg)
|
58
|
+
machine = SynchronousMachineLdq(smpars, lfe=rlfe, wdg=rwdg, **opts)
|
52
59
|
else:
|
53
|
-
machine = SynchronousMachinePsidq(smpars, lfe=rlfe, wdg=rwdg)
|
54
|
-
try:
|
55
|
-
machine.rotor_mass = rlfe*eecpars['rotor_mass']
|
56
|
-
except KeyError:
|
57
|
-
pass
|
60
|
+
machine = SynchronousMachinePsidq(smpars, lfe=rlfe, wdg=rwdg, **opts)
|
58
61
|
return machine
|
59
62
|
|
60
63
|
if isinstance(dqpars, list) and len(dqpars) > 1:
|
@@ -84,7 +87,8 @@ def create_from_eecpars(temp, eecpars, lfe=1, wdg=1):
|
|
84
87
|
losses=losses,
|
85
88
|
id=np.array(dqp['id'])/rwdg,
|
86
89
|
iq=np.array(dqp['iq'])/rwdg,
|
87
|
-
tcu1=temp[0]
|
90
|
+
tcu1=temp[0],
|
91
|
+
**opts)
|
88
92
|
else:
|
89
93
|
beta = dqp['beta']
|
90
94
|
i1 = np.array(dqp['i1'])/rwdg
|
@@ -97,11 +101,8 @@ def create_from_eecpars(temp, eecpars, lfe=1, wdg=1):
|
|
97
101
|
losses=losses,
|
98
102
|
beta=beta,
|
99
103
|
i1=i1,
|
100
|
-
tcu1=temp[0]
|
101
|
-
|
102
|
-
machine.rotor_mass = rlfe*eecpars['rotor_mass']
|
103
|
-
except KeyError:
|
104
|
-
pass
|
104
|
+
tcu1=temp[0],
|
105
|
+
**opts)
|
105
106
|
return machine
|
106
107
|
|
107
108
|
# must be an induction machine (TODO: check scaling)
|
@@ -111,7 +112,6 @@ def create_from_eecpars(temp, eecpars, lfe=1, wdg=1):
|
|
111
112
|
pars['lsigma2'] = rlfe*pars['lsigma2']
|
112
113
|
pars['psiref'] = rwdg*rlfe*pars['psiref']
|
113
114
|
pars['u1ref'] = rwdg*rlfe*pars['u1ref']
|
114
|
-
pars['rotor_mass'] = rlfe*pars['rotor_mass']
|
115
115
|
pars['r2'] = rlfe*pars['r2']
|
116
116
|
pars['fec'] = rlfe*pars['fec']
|
117
117
|
pars['fee'] = rlfe*pars['fee']
|
@@ -119,6 +119,7 @@ def create_from_eecpars(temp, eecpars, lfe=1, wdg=1):
|
|
119
119
|
pars['psi'] = [psi*rwdg*rlfe for psi in pars['psi']]
|
120
120
|
pars['tcu1'] = temp[0]
|
121
121
|
pars['tcu2'] = temp[1]
|
122
|
+
pars.update(opts)
|
122
123
|
return InductionMachine(pars)
|
123
124
|
|
124
125
|
|
femagtools/machine/effloss.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import numpy as np
|
2
2
|
import scipy.interpolate as ip
|
3
3
|
import logging
|
4
|
+
import multiprocessing
|
4
5
|
from .utils import betai1
|
5
6
|
from .pm import PmRelMachineLdq, PmRelMachinePsidq, PmRelMachine
|
6
7
|
from .sm import SynchronousMachine, SynchronousMachineLdq, SynchronousMachinePsidq
|
@@ -9,6 +10,88 @@ from . import create_from_eecpars
|
|
9
10
|
logger = logging.getLogger("femagtools.effloss")
|
10
11
|
|
11
12
|
|
13
|
+
def iqd_tmech_umax(m, u1, with_mtpa, progress, speed_torque, iq, id, iex):
|
14
|
+
"""calculate iq, id for each load (n, T) from speed_torque at voltage u1
|
15
|
+
|
16
|
+
Arguments:
|
17
|
+
m: PmRelMachine or SynchronousMachine
|
18
|
+
u1: (float) phase voltage (V)
|
19
|
+
speed_torque: list of (n, T) pairs
|
20
|
+
with_mtpa: (bool) use mtpa in const flux range if True
|
21
|
+
progress: logging pipe """
|
22
|
+
nsamples = len(speed_torque)
|
23
|
+
num_iv = round(nsamples/7)
|
24
|
+
try:
|
25
|
+
for i, nT in enumerate(speed_torque):
|
26
|
+
iqde = m.iqd_tmech_umax(
|
27
|
+
nT[1], 2*np.pi*nT[0]*m.p,
|
28
|
+
u1, with_mtpa=with_mtpa)[:-1]
|
29
|
+
iq[i] = iqde[0]
|
30
|
+
id[i] = iqde[1]
|
31
|
+
if len(iqde) > 2:
|
32
|
+
iex[i] = iqde[2]
|
33
|
+
if i % num_iv == 0:
|
34
|
+
progress.send(f"{100*i/nsamples:.1f}%")
|
35
|
+
except Exception as e:
|
36
|
+
progress.send(e)
|
37
|
+
finally:
|
38
|
+
progress.close()
|
39
|
+
|
40
|
+
def iqd_tmech_umax_multi(num_proc, ntmesh, m, u1, with_mtpa):
|
41
|
+
"""calculate iqd for sm and pm using multiproc
|
42
|
+
"""
|
43
|
+
progress_readers = []
|
44
|
+
chunksize = int(np.ceil(ntmesh.shape[1]/num_proc))
|
45
|
+
procs = []
|
46
|
+
iq = []
|
47
|
+
id = []
|
48
|
+
iex = []
|
49
|
+
iexk = []
|
50
|
+
k = 0
|
51
|
+
for i in range(0, num_proc*chunksize, chunksize):
|
52
|
+
prog_reader, prog_writer = multiprocessing.Pipe(duplex=False)
|
53
|
+
progress_readers.append(prog_reader)
|
54
|
+
iq.append(multiprocessing.Array('d', chunksize))
|
55
|
+
id.append(multiprocessing.Array('d', chunksize))
|
56
|
+
if isinstance(m, SynchronousMachine):
|
57
|
+
iex.append(multiprocessing.Array('d', chunksize))
|
58
|
+
iexk = iex[k]
|
59
|
+
p = multiprocessing.Process(target=iqd_tmech_umax,
|
60
|
+
args=(m, u1, with_mtpa,
|
61
|
+
prog_writer,
|
62
|
+
ntmesh.T[i:i+chunksize],
|
63
|
+
iq[k], id[k], iexk))
|
64
|
+
k += 1
|
65
|
+
p.start()
|
66
|
+
procs.append(p)
|
67
|
+
prog_writer.close()
|
68
|
+
|
69
|
+
i = 0
|
70
|
+
collected_msg = []
|
71
|
+
while progress_readers:
|
72
|
+
for r in multiprocessing.connection.wait(progress_readers):
|
73
|
+
try:
|
74
|
+
collected_msg.append(r.recv())
|
75
|
+
i += 1
|
76
|
+
except EOFError:
|
77
|
+
progress_readers.remove(r)
|
78
|
+
else:
|
79
|
+
if i % len(progress_readers) == 0:
|
80
|
+
logger.info("Losses/Eff Map: %s",
|
81
|
+
', '.join(collected_msg))
|
82
|
+
collected_msg = []
|
83
|
+
for p in procs:
|
84
|
+
p.join()
|
85
|
+
siz = ntmesh.shape[1]
|
86
|
+
if iex:
|
87
|
+
return np.array([np.array(iq).flatten(),
|
88
|
+
np.array(id).flatten(),
|
89
|
+
np.array(iex).flatten()])[:, :siz]
|
90
|
+
|
91
|
+
return np.array([np.array(iq).flatten(),
|
92
|
+
np.array(id).flatten()])[:, :siz]
|
93
|
+
|
94
|
+
|
12
95
|
def _generate_mesh(n, T, nb, Tb, npoints):
|
13
96
|
"""return speed and torque list for driving/braking speed range
|
14
97
|
|
@@ -34,7 +117,7 @@ def _generate_mesh(n, T, nb, Tb, npoints):
|
|
34
117
|
def tbip(x): return 0
|
35
118
|
|
36
119
|
nxtx = []
|
37
|
-
for nx in np.linspace(1, nmax, npoints[0]):
|
120
|
+
for nx in np.linspace(n[1], nmax, npoints[0]):
|
38
121
|
t0 = tbip(nx)
|
39
122
|
t1 = tip(nx)
|
40
123
|
npnts = max(round((t1-t0) / (tmax-tmin) * tnum), 2)
|
@@ -49,7 +132,10 @@ def _generate_mesh(n, T, nb, Tb, npoints):
|
|
49
132
|
return np.array(nxtx).T
|
50
133
|
|
51
134
|
|
52
|
-
def efficiency_losses_map(eecpars, u1, T, temp, n, npoints=(60, 40)
|
135
|
+
def efficiency_losses_map(eecpars, u1, T, temp, n, npoints=(60, 40),
|
136
|
+
with_mtpv=True, with_mtpa=True, with_pmconst=True,
|
137
|
+
with_tmech=True, driving_only=False,
|
138
|
+
num_proc=0, progress=None):
|
53
139
|
"""return speed, torque efficiency and losses
|
54
140
|
|
55
141
|
arguments:
|
@@ -57,10 +143,16 @@ def efficiency_losses_map(eecpars, u1, T, temp, n, npoints=(60, 40)):
|
|
57
143
|
dicts at different temperatures (or machine object)
|
58
144
|
u1: (float) phase voltage (V rms)
|
59
145
|
T: (float) starting torque (Nm)
|
60
|
-
temp: temperature (°C)
|
146
|
+
temp: temperature (°C) (ignored if eecpars is machine objectb)
|
61
147
|
n: (float) maximum speed (1/s)
|
62
148
|
npoints: (list) number of values of speed and torque
|
63
|
-
|
149
|
+
driving_only: (bool) do not calculate braking speed/torque samples if True
|
150
|
+
with_mtpv -- (optional) use mtpv if True (default)
|
151
|
+
with_pmconst -- (optional) use pmax if True (default)
|
152
|
+
with_mtpa -- (optional) use mtpa if True (default), disables mtpv if False
|
153
|
+
with_tmech -- (optional) use friction and windage losses (default)
|
154
|
+
num_proc -- (optional) number of parallel processes (default 0)
|
155
|
+
progress -- (optional) custom function for progress logging
|
64
156
|
"""
|
65
157
|
if isinstance(eecpars, dict):
|
66
158
|
if isinstance(temp, (list, tuple)):
|
@@ -77,39 +169,67 @@ def efficiency_losses_map(eecpars, u1, T, temp, n, npoints=(60, 40)):
|
|
77
169
|
nmax = n
|
78
170
|
nsamples = npoints[0]
|
79
171
|
rb = {}
|
80
|
-
r = m.characteristics(T, nmax, u1, nsamples=nsamples
|
81
|
-
|
172
|
+
r = m.characteristics(T, nmax, u1, nsamples=nsamples,
|
173
|
+
with_mtpv=with_mtpv, with_mtpa=with_mtpa,
|
174
|
+
with_pmconst=with_pmconst, with_tmech=with_tmech) # driving mode
|
175
|
+
if driving_only:
|
176
|
+
rb['n'] = None
|
177
|
+
rb['T'] = None
|
178
|
+
elif isinstance(m, (PmRelMachineLdq, SynchronousMachineLdq)):
|
82
179
|
if min(m.betarange) >= -np.pi/2: # driving mode only
|
83
180
|
rb['n'] = None
|
84
181
|
rb['T'] = None
|
85
|
-
|
182
|
+
elif isinstance(m, (PmRelMachinePsidq, SynchronousMachinePsidq)):
|
86
183
|
if min(m.iqrange) >= 0: # driving mode only
|
87
184
|
rb['n'] = None
|
88
185
|
rb['T'] = None
|
89
186
|
if 'n' not in rb:
|
90
|
-
rb = m.characteristics(-T, max(r['n']), u1, nsamples=nsamples
|
187
|
+
rb = m.characteristics(-T, max(r['n']), u1, nsamples=nsamples,
|
188
|
+
with_mtpv=with_mtpv, with_mtpa=with_mtpa,
|
189
|
+
with_pmconst=with_pmconst, with_tmech=with_tmech) # braking mode
|
91
190
|
ntmesh = _generate_mesh(r['n'], r['T'],
|
92
191
|
rb['n'], rb['T'], npoints)
|
93
192
|
|
94
|
-
|
95
|
-
def __init__(self, nsamples):
|
96
|
-
self.n = 0
|
97
|
-
self.nsamples = nsamples
|
98
|
-
self.num_iv = round(nsamples/15)
|
99
|
-
def __call__(self, iqd):
|
100
|
-
self.n += 1
|
101
|
-
if self.n % self.num_iv == 0:
|
102
|
-
logger.info("Losses/Eff Map: %d%%",
|
103
|
-
round(100*self.n/self.nsamples))
|
104
|
-
|
193
|
+
logger.info("total speed,torque samples %d", ntmesh.shape[1])
|
105
194
|
if isinstance(m, (PmRelMachine, SynchronousMachine)):
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
195
|
+
if num_proc > 1:
|
196
|
+
iqd = iqd_tmech_umax_multi(num_proc, ntmesh, m, u1, with_mtpa)
|
197
|
+
else:
|
198
|
+
class ProgressLogger:
|
199
|
+
def __init__(self, nsamples):
|
200
|
+
self.n = 0
|
201
|
+
self.nsamples = nsamples
|
202
|
+
self.num_iv = round(nsamples/15)
|
203
|
+
def __call__(self, iqd):
|
204
|
+
self.n += 1
|
205
|
+
if self.n % self.num_iv == 0:
|
206
|
+
logger.info("Losses/Eff Map: %d%%",
|
207
|
+
round(100*self.n/self.nsamples))
|
208
|
+
if progress is None:
|
209
|
+
progress = ProgressLogger(ntmesh.shape[1])
|
210
|
+
else:
|
211
|
+
try:
|
212
|
+
progress.nsamples=ntmesh.shape[1]
|
213
|
+
progress(0) # To check conformity
|
214
|
+
progress.n = 0
|
215
|
+
except:
|
216
|
+
logger.warning("Invalid ProgressLogger given to efficiency_losses_map, using default one!")
|
217
|
+
progress = ProgressLogger(ntmesh.shape[1])
|
218
|
+
if with_tmech:
|
219
|
+
iqd = np.array([
|
220
|
+
m.iqd_tmech_umax(
|
221
|
+
nt[1],
|
222
|
+
2*np.pi*nt[0]*m.p,
|
223
|
+
u1, log=progress, with_mtpa=with_mtpa)[:-1]
|
224
|
+
for nt in ntmesh.T]).T
|
225
|
+
else:
|
226
|
+
iqd = np.array([
|
227
|
+
m.iqd_torque_umax(
|
228
|
+
nt[1],
|
229
|
+
2*np.pi*nt[0]*m.p,
|
230
|
+
u1, log=progress, with_mtpa=with_mtpa)[:-1]
|
231
|
+
for nt in ntmesh.T]).T
|
232
|
+
|
113
233
|
beta, i1 = betai1(iqd[0], iqd[1])
|
114
234
|
uqd = [m.uqd(2*np.pi*n*m.p, *i)
|
115
235
|
for n, i in zip(ntmesh[0], iqd.T)]
|
@@ -138,10 +258,7 @@ def efficiency_losses_map(eecpars, u1, T, temp, n, npoints=(60, 40)):
|
|
138
258
|
plmag = m.iqd_plmag(iqd[0], iqd[1], f1)
|
139
259
|
plcu1 = m.iqd_plcu1(iqd[0], iqd[1], 2*np.pi*f1)
|
140
260
|
plcu2 = m.iqd_plcu2(*iqd)
|
141
|
-
|
142
|
-
tfric = m.kfric_b*m.rotor_mass*30e-3/np.pi
|
143
|
-
except AttributeError:
|
144
|
-
tfric = 0
|
261
|
+
tfric = m.tfric
|
145
262
|
else:
|
146
263
|
plfe1 = np.array(r['plfe1'])
|
147
264
|
plfe2 = np.zeros(ntmesh.shape[1])
|
@@ -152,12 +269,16 @@ def efficiency_losses_map(eecpars, u1, T, temp, n, npoints=(60, 40)):
|
|
152
269
|
u1 = np.array(r['u1'])
|
153
270
|
i1 = np.array(r['i1'])
|
154
271
|
try:
|
155
|
-
|
272
|
+
if isinstance(eecpars, dict):
|
273
|
+
tfric = eecpars['kfric_b']*eecpars['rotor_mass']*30e-3/np.pi
|
274
|
+
else:
|
275
|
+
tfric = m.tfric
|
156
276
|
except KeyError:
|
157
277
|
tfric = 0
|
158
278
|
|
159
279
|
plfric = 2*np.pi*ntmesh[0]*tfric
|
160
|
-
|
280
|
+
if not with_tmech:
|
281
|
+
ntmesh[1] -= tfric
|
161
282
|
pmech = np.array(
|
162
283
|
[2*np.pi*nt[0]*nt[1]
|
163
284
|
for nt in ntmesh.T])
|