pyactup 2.1__tar.gz → 2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyactup
3
- Version: 2.1
3
+ Version: 2.2
4
4
  Summary: A lightweight Python implementation of a subset of the ACT-R cognitive architecture’s Declarative Memory
5
5
  Home-page: https://bitbucket.org/dfmorrison/pyactup/
6
6
  Author: Don Morrison
7
7
  Author-email: dfm2@cmu.edu
8
- License: UNKNOWN
9
8
  Platform: any
10
9
  Classifier: Intended Audience :: Science/Research
11
10
  Classifier: License :: OSI Approved :: MIT License
@@ -56,5 +55,3 @@ PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIG
56
55
  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
57
56
  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
58
57
  OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
59
-
60
-
@@ -1,11 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyactup
3
- Version: 2.1
3
+ Version: 2.2
4
4
  Summary: A lightweight Python implementation of a subset of the ACT-R cognitive architecture’s Declarative Memory
5
5
  Home-page: https://bitbucket.org/dfmorrison/pyactup/
6
6
  Author: Don Morrison
7
7
  Author-email: dfm2@cmu.edu
8
- License: UNKNOWN
9
8
  Platform: any
10
9
  Classifier: Intended Audience :: Science/Research
11
10
  Classifier: License :: OSI Approved :: MIT License
@@ -56,5 +55,3 @@ PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIG
56
55
  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
57
56
  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
58
57
  OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
59
-
60
-
@@ -37,7 +37,7 @@ may be strictly algorithmic, may interact with human subjects, or may be embedde
37
37
  sites.
38
38
  """
39
39
 
40
- __version__ = "2.1"
40
+ __version__ = "2.2"
41
41
 
42
42
  if "dev" in __version__:
43
43
  print("PyACTUp version", __version__)
@@ -47,7 +47,6 @@ import csv
47
47
  import io
48
48
  import math
49
49
  import numpy as np
50
- import numpy.ma as ma
51
50
  import operator
52
51
  import random
53
52
  import re
@@ -214,7 +213,7 @@ class Memory(dict):
214
213
 
215
214
  >>> m = Memory()
216
215
  >>> m.learn({"color": "red"})
217
- True
216
+ <Chunk 0000 {'color': 'red'} 1>
218
217
  >>> m.advance()
219
218
  1
220
219
  >>> m.activation_history = []
@@ -342,11 +341,11 @@ class Memory(dict):
342
341
 
343
342
  >>> m = Memory(temperature=1, noise=0)
344
343
  >>> m.learn({"size": 1})
345
- True
344
+ <Chunk 0000 {'size': 1} 1>
346
345
  >>> m.advance(10)
347
346
  10
348
347
  >>> m.learn({"size": 10})
349
- True
348
+ <Chunk 0001 {'size': 10} 1>
350
349
  >>> m.advance()
351
350
  11
352
351
  >>> m.blend("size")
@@ -653,11 +652,11 @@ class Memory(dict):
653
652
 
654
653
  >>> m = Memory()
655
654
  >>> m.learn({"color": "red", "size": 3})
656
- True
655
+ <Chunk 0005 {'color': 'red', 'size': 3} 1>
657
656
  >>> m.advance()
658
657
  1
659
658
  >>> m.learn({"color": "red", "size": 5})
660
- True
659
+ <Chunk 0006 {'color': 'red', 'size': 5} 1>
661
660
  >>> m.advance()
662
661
  2
663
662
  >>> m.activation_history = []
@@ -684,7 +683,6 @@ class Memory(dict):
684
683
  'activation_noise': 0.4191470689622754,
685
684
  'activation': 0.4191470689622754,
686
685
  'retrieval_probability': 0.905269525909957}]
687
-
688
686
  """
689
687
  return self._activation_history
690
688
 
@@ -778,16 +776,16 @@ class Memory(dict):
778
776
 
779
777
  >>> m = Memory()
780
778
  >>> m.learn({"color":"red", "size":4})
781
- True
779
+ <Chunk 0000 {'color': 'red', 'size': 4} 1>
782
780
  >>> m.advance()
783
781
  1
784
782
  >>> m.learn({"color":"blue", "size":4}, advance=1)
783
+ <Chunk 0001 {'color': 'blue', 'size': 4} 1>
784
+ >>> m.learn({"color":"red", "size":4}) is None
785
785
  True
786
- >>> m.learn({"color":"red", "size":4})
787
- False
788
786
  >>> m.advance()
789
787
  3
790
- >>> m.retrieve({"color": "red"})
788
+ >>>
791
789
  <Chunk 0000 {'color': 'red', 'size': 4} 2>
792
790
  """
793
791
  slots = self._ensure_slots(slots, True)
@@ -954,9 +952,9 @@ class Memory(dict):
954
952
  - self._decay * np.log(ages))
955
953
  else:
956
954
  result = np.empty(nchunks)
957
- counts = ma.masked_all(nchunks)
958
- ages = ma.masked_all(nchunks)
959
- middles = ma.masked_all(nchunks)
955
+ counts = np.ma.masked_all(nchunks)
956
+ ages = np.ma.masked_all(nchunks)
957
+ middles = np.ma.masked_all(nchunks)
960
958
  for c, i in zip(chunks, count()):
961
959
  if c._reference_count <= self._optimized_learning:
962
960
  result[i] = np.sum((self._time - c._references[0:c._reference_count])
@@ -1011,6 +1009,12 @@ class Memory(dict):
1011
1009
  penalties = np.empty((nchunks, len(partial_slots)))
1012
1010
  for c, row in zip(chunks, count()):
1013
1011
  penalties[row] = [s._similarity(c[n], v) for n, v, s in partial_slots]
1012
+ if self._activation_history is not None:
1013
+ offset = 0 if self.use_actr_similarity else 1
1014
+ for i, pens in zip(count(initial_history_length), penalties):
1015
+ similarities = {ps[0]: p + offset
1016
+ for ps, p in zip(partial_slots, pens)}
1017
+ self._activation_history[i]["similarities"] = similarities
1014
1018
  penalties = np.sum(penalties, 1) * self._mismatch
1015
1019
  result += penalties
1016
1020
  if self._activation_history is not None:
@@ -1034,9 +1038,9 @@ class Memory(dict):
1034
1038
  self._activation_history[i]["meets_threshold"] = (r >= self._threshold)
1035
1039
  raw_activations_count = len(result)
1036
1040
  if self._threshold is not None:
1037
- m = ma.masked_less(result, self._threshold)
1038
- if ma.is_masked(m):
1039
- chunks = ma.array(chunks, mask=ma.getmask(m)).compressed()
1041
+ m = np.ma.masked_less(result, self._threshold)
1042
+ if np.ma.is_masked(m):
1043
+ chunks = np.ma.array(chunks, mask=np.ma.getmask(m)).compressed()
1040
1044
  result = m.compressed()
1041
1045
  except FloatingPointError as e:
1042
1046
  raise RuntimeError(f"Error when computing activations, perhaps a chunk's "
@@ -1065,11 +1069,11 @@ class Memory(dict):
1065
1069
 
1066
1070
  >>> m = Memory()
1067
1071
  >>> m.learn({"widget":"thromdibulator", "color":"red", "size":2})
1068
- True
1072
+ <Chunk 0000 {'widget': 'thromdibulator', 'color': 'red', 'size': 2} 1>
1069
1073
  >>> m.advance()
1070
1074
  1
1071
1075
  >>> m.learn({"widget":"snackleizer", "color":"blue", "size":1})
1072
- True
1076
+ <Chunk 0001 {'widget': 'snackleizer', 'color': 'blue', 'size': 1} 1>
1073
1077
  >>> m.advance()
1074
1078
  2
1075
1079
  >>> m.retrieve({"color":"blue"})["widget"]
@@ -1094,60 +1098,123 @@ class Memory(dict):
1094
1098
  self._cite(result)
1095
1099
  return result
1096
1100
 
1097
- def _blend(self, outcome_attribute, slots):
1101
+ def _blend(self, outcome_attribute, slots, instance_salience, feature_salience):
1098
1102
  Memory._ensure_slot_name(outcome_attribute)
1099
1103
  activations, chunks, raw = self._activations(self._ensure_slots(slots),
1100
1104
  extra=outcome_attribute)
1101
1105
  if chunks is None:
1102
- return None, None
1106
+ return None, None, None, None
1103
1107
  with np.errstate(divide="raise", over="raise", under="ignore", invalid="raise"):
1104
1108
  wp = np.exp(activations / self._temperature)
1105
1109
  wp /= np.sum(wp)
1106
- if self._activation_history is not None:
1107
- h = self._activation_history
1108
- # this i malarkey is in case one or more candidates didn't clear the threshold
1109
- i = len(h) - raw
1110
- for p, c in zip(wp, chunks):
1111
- while h[i]["name"] != c._name:
1112
- i += 1
1113
- assert i < len(h)
1114
- h[i]["retrieval_probability"] = p
1115
- return wp, chunks
1116
-
1117
- def blend(self, outcome_attribute, slots={}):
1110
+ if self._activation_history is not None:
1111
+ h = self._activation_history
1112
+ # this i malarkey is in case one or more candidates didn't clear the threshold
1113
+ i = len(h) - raw
1114
+ for p, c in zip(wp, chunks):
1115
+ while h[i]["name"] != c._name:
1116
+ i += 1
1117
+ assert i < len(h)
1118
+ h[i]["retrieval_probability"] = p
1119
+ def normalize(v):
1120
+ v = np.array(v)
1121
+ norm = np.linalg.norm(v)
1122
+ return v / norm if norm > 0 else v
1123
+ isal = None
1124
+ if instance_salience:
1125
+ vals = np.array([c[outcome_attribute] for c in chunks])
1126
+ isal = normalize(wp * (vals - np.sum(wp * vals)) / self._temperature)
1127
+ fsal = None
1128
+ if feature_salience and self._mismatch is not None:
1129
+ pslots = [a for a in slots if self._similarities.get(a)]
1130
+ if self._mismatch != 0:
1131
+ def slot_salience(attr, attrval):
1132
+ deriv = self._similarities[attr]._derivative
1133
+ weight = self._similarities[attr]._weight
1134
+ if not deriv:
1135
+ raise RuntimeError(f"No derivative defined for {attr} similarities")
1136
+ dvals = np.array([weight * deriv(c[attr], attrval) for c in chunks])
1137
+ dsum = np.sum(wp * dvals)
1138
+ return np.sum(wp * (dvals - dsum) * np.array([c[outcome_attribute]
1139
+ for c in chunks]))
1140
+ # Doing the division up front could make for loss of precision
1141
+ # but this is unlikely to matter in any realistic use case.
1142
+ coef = self._mismatch / self._temperature
1143
+ fsal = [coef * slot_salience(a, slots[a]) for a in pslots]
1144
+ else:
1145
+ fsal = [0] * len(pslots)
1146
+ fsal = dict(zip(pslots, normalize(fsal)))
1147
+ return wp, chunks, isal, fsal
1148
+
1149
+ def blend(self, outcome_attribute, slots={}, instance_salience=False, feature_salience=False):
1118
1150
  """Returns a blended value for the given attribute of those chunks matching *slots*, and which contain *outcome_attribute*, and have activations greater than or equal to this Memory's threshold, if any.
1119
1151
  Returns ``None`` if there are no matching chunks that contain
1120
1152
  *outcome_attribute*. If any matching chunk has a value of *outcome_attribute*
1121
1153
  that is not a real number an :exc:`Exception` is raised.
1122
1154
 
1155
+ If neither ``instance_salience`` nor ``feature_salience`` is true, the sole return
1156
+ value is the blended value; otherwise a tuple of three values is returned. The
1157
+ first the blended value. If ``instance_salience`` is true the second is a dict
1158
+ mapping a descriptions of the slot values of each of the matched chunks that
1159
+ contributed to the blended value to the normalized instance salience value, a real
1160
+ number between -1 and 1, inclusive; otherwise the second value is ``None``. The
1161
+ slot representation of slot values in this dict is a tuple of tuples, the inner
1162
+ tuples being the slot name and value.
1163
+
1164
+ If ``feature_salience`` is true the third value is a dict mapping slot names,
1165
+ corresponding to those slots that were partially matched in this blending
1166
+ operation, to their normalized feature salience values, a real number between -1
1167
+ and 1, inclusive; otherwise the third value is ``None``. To compute feature
1168
+ salience a derivative of the similarity function must have been specified for
1169
+ every partially match slot using :meth:`similarity`; if any are missing a
1170
+ :exc:`RuntimeError`` is raised.
1171
+
1123
1172
  >>> m = Memory()
1124
1173
  >>> m.learn({"color":"red", "size":2})
1125
- True
1174
+ <Chunk 0000 {'color': 'red', 'size': 2} 1>
1126
1175
  >>> m.advance()
1127
1176
  1
1128
1177
  >>> m.learn({"color":"blue", "size":30})
1129
- True
1178
+ <Chunk 0001 {'color': 'blue', 'size': 30} 1>
1130
1179
  >>> m.advance()
1131
1180
  2
1132
1181
  >>> m.learn({"color":"red", "size":1})
1133
- True
1182
+ <Chunk 0002 {'color': 'red', 'size': 1} 1>
1134
1183
  >>> m.advance()
1135
1184
  3
1136
1185
  >>> m.blend("size", {"color":"red"})
1137
- 1.221272238515685
1186
+ 1.3660254037844388
1187
+ >>> m.blend("size", {"color":"red"}, instance_salience=True)
1188
+ (1.3660254037844388,
1189
+ {(('color', 'red'), ('size', 2)): 0.7071067811865472,
1190
+ (('color', 'red'), ('size', 1)): -0.7071067811865478},
1191
+ None)
1192
+
1138
1193
  """
1139
- probs, chunks = self._blend(outcome_attribute, slots)
1140
- if chunks is None:
1141
- return None
1142
- with np.errstate(divide="raise", over="raise", under="ignore", invalid="raise"):
1143
- try:
1144
- return np.average(np.array([c[outcome_attribute] for c in chunks],
1145
- dtype=np.float64),
1146
- weights=probs)
1147
- except Exception as e:
1148
- raise RuntimeError(f"Error computing blended value, is perhaps the value "
1149
- f"of the {outcome_attribute} slotis not numeric in "
1150
- f"one of the matching chunks? ({e})")
1194
+ probs, chunks, isal, fsal = self._blend(outcome_attribute, slots,
1195
+ instance_salience, feature_salience)
1196
+ if chunks is not None:
1197
+ with np.errstate(divide="raise", over="raise", under="ignore", invalid="raise"):
1198
+ try:
1199
+ result = np.average(np.array([c[outcome_attribute] for c in chunks],
1200
+ dtype=np.float64),
1201
+ weights=probs)
1202
+ except Exception as e:
1203
+ raise RuntimeError(f"Error computing blended value, is perhaps the value "
1204
+ f"of the {outcome_attribute} slotis not numeric in "
1205
+ f"one of the matching chunks? ({e})")
1206
+ else:
1207
+ result = None
1208
+ if not instance_salience and not feature_salience:
1209
+ return result
1210
+ if instance_salience:
1211
+ if isal is not None:
1212
+ isal = {tuple(c.items()): s for c, s in zip(chunks, isal)}
1213
+ else:
1214
+ isal = {}
1215
+ if feature_salience and fsal is None:
1216
+ fsal = {}
1217
+ return result, isal, fsal
1151
1218
 
1152
1219
  def best_blend(self, outcome_attribute, iterable, select_attribute=None, minimize=False):
1153
1220
  """Returns two values (as a 2-tuple), describing the extreme blended value of the *outcome_attribute* over the values provided by *iterable*.
@@ -1173,25 +1240,25 @@ class Memory(dict):
1173
1240
 
1174
1241
  >>> m = Memory()
1175
1242
  >>> m.learn({"color":"red", "utility":1})
1176
- True
1243
+ <Chunk 0000 {'color': 'red', 'utility': 1} 1>
1177
1244
  >>> m.advance()
1178
1245
  1
1179
1246
  >>> m.learn({"color":"blue", "utility":2})
1180
- True
1247
+ <Chunk 0001 {'color': 'blue', 'utility': 2} 1>
1181
1248
  >>> m.advance()
1182
1249
  2
1183
1250
  >>> m.learn({"color":"red", "utility":1.8})
1184
- True
1251
+ <Chunk 0002 {'color': 'red', 'utility': 1.8} 1>
1185
1252
  >>> m.advance()
1186
1253
  3
1187
1254
  >>> m.learn({"color":"blue", "utility":0.9})
1188
- True
1255
+ <Chunk 0003 {'color': 'blue', 'utility': 0.9} 1>
1189
1256
  >>> m.advance()
1190
1257
  4
1191
1258
  >>> m.best_blend("utility", ({"color": c} for c in ("red", "blue")))
1192
1259
  ({'color': 'blue'}, 1.5149259914576285)
1193
1260
  >>> m.learn({"color":"blue", "utility":-1})
1194
- True
1261
+ <Chunk 0004 {'color': 'blue', 'utility': -1} 1>
1195
1262
  >>> m.advance()
1196
1263
  5
1197
1264
  >>> m.best_blend("utility", ("red", "blue"), "color")
@@ -1231,25 +1298,24 @@ class Memory(dict):
1231
1298
 
1232
1299
  >>> m = Memory()
1233
1300
  >>> m.learn({"kind": "tilset", "age": "old"})
1234
- True
1301
+ <Chunk 0000 {'kind': 'tilset', 'age': 'old'} 1>
1235
1302
  >>> m.advance()
1236
1303
  1
1237
1304
  >>> m.learn({"kind": "limburger", "age": "old"})
1238
- True
1305
+ <Chunk 0001 {'kind': 'limburger', 'age': 'old'} 1>
1239
1306
  >>> m.advance()
1240
1307
  2
1241
1308
  >>> m.learn({"kind": "tilset", "age": "old"})
1242
- False
1243
1309
  >>> m.advance()
1244
1310
  3
1245
1311
  >>> m.learn({"kind": "tilset", "age": "new"})
1246
- True
1312
+ <Chunk 0002 {'kind': 'tilset', 'age': 'new'} 1>
1247
1313
  >>> m.advance()
1248
1314
  4
1249
1315
  >>> m.discrete_blend("kind", {"age": "old"})
1250
1316
  ('tilset', {'tilset': 0.9540373563209859, 'limburger': 0.04596264367901423})
1251
1317
  """
1252
- probs, chunks = self._blend(outcome_attribute, slots)
1318
+ probs, chunks, isal, fsal = self._blend(outcome_attribute, slots, False, False)
1253
1319
  if not chunks:
1254
1320
  return None, None
1255
1321
  candidates = defaultdict(list)
@@ -1268,7 +1334,7 @@ class Memory(dict):
1268
1334
  return (random.choice(best),
1269
1335
  dict(sorted(candidates.items(), key=lambda x: x[1], reverse=True)))
1270
1336
 
1271
- def similarity(self, attributes, function=None, weight=None):
1337
+ def similarity(self, attributes, function=None, weight=None, derivative=None):
1272
1338
  """Assigns a similarity function and/or corresponding weight to be used when comparing attribute values with the given *attributes*.
1273
1339
  The *attributes* should be an :class:`Iterable` of strings, attribute names.
1274
1340
  The *function* should take two arguments, and return a real number between 0 and 1,
@@ -1281,12 +1347,23 @@ class Memory(dict):
1281
1347
  will, in most cases, be meaningless if they are.
1282
1348
  If ``True`` is supplied as the *function* a default similarity function is used
1283
1349
  that returns one if its two arguments are ``==`` and zero otherwise.
1284
- If only one of *function* or *weight* is supplied, it is changed without
1285
- changing the other; the initial defaults are ``True`` for *function* and ``1``
1286
- for *weight*.
1287
- If neither *function* nor *weight* is supplied both are removed, and these
1288
- *attributes* will no longer have an associated similarity computation, and will
1289
- be matched only exactly.
1350
+
1351
+ If *derivative* is supplied it should be a callable, the first partial derivative
1352
+ of the similarity function with respect to its first argument, and will be used
1353
+ if the feature saliences are requested in :meth:`blend`. The *derivative* must
1354
+ be defined for all values that may occur for the relevant slots. It is common
1355
+ that the strict mathematical derivative may not exists for one or a small number
1356
+ of possibly values, most commonly when the similarly involves the absolute value
1357
+ of the difference between the two arguments of the similarly function. Even in
1358
+ these cases the argument to :meth:`similarity` should return a value; often zero
1359
+ is a good choice in these cases.
1360
+
1361
+ If only one or two of *function*, *weight* and *derivatve* are supplied, they
1362
+ changed without changing those not supplied; the initial defaults are ``True`` for
1363
+ *function*, ``1`` for *weight*, and ``None`` for *derivative*. If none
1364
+ of*function*, *weight* nor *derivative* are supplied all are removed, and these
1365
+ *attributes* will no longer have an associated similarity computation, and will be
1366
+ matched only exactly.
1290
1367
 
1291
1368
  As a convenience, if none of the attribute names contains commas or spaces, a
1292
1369
  string may be used instead of a list as the first argument to ``similarity``, the
@@ -1308,14 +1385,15 @@ class Memory(dict):
1308
1385
  ... return f(y, x)
1309
1386
  ... return 1 - (y - x) / y
1310
1387
  >>> similarity(["length", "width"], f, weight=2)
1311
-
1312
1388
  """
1313
1389
  if function is not None and not (callable(function) or function is True):
1314
1390
  raise ValueError(f"Function {function} is neither callable nor True")
1391
+ if derivative is not None and not callable(derivative):
1392
+ raise(ValueError(f"Derivative {derivative} is not callable"))
1315
1393
  if weight is not None and weight <= 0:
1316
1394
  raise ValueError(f"Similarity weight, {weight}, is not a positive number")
1317
1395
  for a in Memory._ensure_slot_names(attributes):
1318
- if function is None and weight is None:
1396
+ if function is None and weight is None and derivative is None:
1319
1397
  if a in self._similarities:
1320
1398
  del self._similarities[a]
1321
1399
  else:
@@ -1323,12 +1401,19 @@ class Memory(dict):
1323
1401
  sim._memory = self
1324
1402
  if function is not None and function != sim._function:
1325
1403
  sim._function = function
1404
+ if derivative is not None and function != sim._derivative:
1405
+ sim._derivative = derivative
1326
1406
  if weight is not None and weight != sim._weight:
1327
1407
  sim._weight = weight
1328
1408
  sim._cache.clear()
1329
1409
 
1330
1410
 
1331
1411
  class Chunk(dict):
1412
+ """A learned item.
1413
+
1414
+ A chunk acts much like a dictionary, and its slots can be retrieved with the usual
1415
+ `[]` notation, or with `.get()`.
1416
+ """
1332
1417
 
1333
1418
  __slots__ = ["_name", "_memory", "_creation", "_references", "_reference_count" ]
1334
1419
 
@@ -1350,6 +1435,11 @@ class Chunk(dict):
1350
1435
  def __str__(self):
1351
1436
  return f"Chunk-{self._name}"
1352
1437
 
1438
+ @property
1439
+ def memory(self):
1440
+ """The :class:`Memory` object that contains this chunk."""
1441
+ return self._memory
1442
+
1353
1443
  @property
1354
1444
  def reference_count(self):
1355
1445
  """A non-negative integer, the number of times that this :class:`Chunk` has been reinforced.
@@ -1358,21 +1448,22 @@ class Chunk(dict):
1358
1448
 
1359
1449
  @property
1360
1450
  def references(self):
1361
- """A list of real numbers, the times at which that this :class:`Chunk` has been reinforced.
1451
+ """A tuple of real numbers, the times at which that this :class:`Chunk` has been reinforced.
1362
1452
  If :attr:`optimized_learning` is being used this may be just the most recent
1363
1453
  reinforcements, or an empty list, depending upon the value of
1364
- :attr:`optimized_learning`
1454
+ :attr:`optimized_learning`.
1365
1455
  """
1366
- return list(self._references[:(self._reference_count
1367
- if self._memory._optimized_learning is None
1368
- else min(self._reference_count,
1369
- self._memory._optimized_learning))])
1456
+ return tuple(self._references[:(self._reference_count
1457
+ if self._memory._optimized_learning is None
1458
+ else min(self._reference_count,
1459
+ self._memory._optimized_learning))])
1370
1460
 
1371
1461
 
1372
1462
  @dataclass
1373
1463
  class Similarity:
1374
1464
  _memory: Memory = None
1375
1465
  _function: callable = True
1466
+ _derivative: callable = None
1376
1467
  _weight: float = 1.0
1377
1468
  _cache: lrucache = field(default_factory=lambda: lrucache(SIMILARITY_CACHE_SIZE))
1378
1469
 
File without changes
File without changes
@@ -1,3 +1,3 @@
1
1
  numpy
2
- prettytable
3
2
  pylru
3
+ prettytable
File without changes
File without changes