pyactup 2.0__tar.gz → 2.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- Copyright (c) 2018-2022 Carnegie Mellon University
1
+ Copyright (c) 2018-2024 Carnegie Mellon University
2
2
 
3
3
  Permission is hereby granted, free of charge, to any person obtaining a copy of this
4
4
  software and associated documentation files (the "Software"), to deal in the Software
@@ -1,11 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyactup
3
- Version: 2.0
3
+ Version: 2.2.3
4
4
  Summary: A lightweight Python implementation of a subset of the ACT-R cognitive architecture’s Declarative Memory
5
- Home-page: https://bitbucket.org/dfmorrison/pyactup/
5
+ Home-page: https://dfmorrison.github.io/pyactup-documentation/
6
6
  Author: Don Morrison
7
7
  Author-email: dfm2@cmu.edu
8
- License: UNKNOWN
9
8
  Platform: any
10
9
  Classifier: Intended Audience :: Science/Research
11
10
  Classifier: License :: OSI Approved :: MIT License
@@ -25,20 +24,20 @@ ACT-R cognitive architecture’s Declarative Memory, suitable for
25
24
  incorporating into other Python models and applications. Its design
26
25
  is inspired by the ACT-UP cognitive modeling toolbox.
27
26
 
28
- There is [online documentation of PyACTUp](http://halle.psy.cmu.edu/pyactup/),
29
- and the [sources](https://bitbucket.org/dfmorrison/pyactup/) are on Bitbucket.
27
+ There is [online documentation of PyACTUp](http://koalemos.psy.cmu.edu/pyactup/),
28
+ and the [sources](https://github.com/dfmorrison/pyactup/) are on GitHub.
30
29
 
31
- The latest version of PyACTUp can be download and install from PyPi with ``pip``:
30
+ The latest version of PyACTUp can be download and installed from PyPi with pip:
32
31
 
33
- .. parsed-literal:: pip install pyactup
32
+ pip install pyactup
34
33
 
35
- Use of a virtual environment for Python, such as ``venv`` or Anaconda is recommended.
34
+ Use of a virtual environment for Python, such as venv or Anaconda is recommended.
36
35
 
37
36
  PyACTUp requires Python version 3.8 or later.
38
37
 
39
38
  PyACTUp is released under the following MIT style license:
40
39
 
41
- Copyright (c) 2018-2022 Carnegie Mellon University
40
+ Copyright (c) 2018-2024 Carnegie Mellon University
42
41
 
43
42
  Permission is hereby granted, free of charge, to any person obtaining a copy of this
44
43
  software and associated documentation files (the "Software"), to deal in the Software
@@ -56,5 +55,3 @@ PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIG
56
55
  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
57
56
  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
58
57
  OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
59
-
60
-
@@ -3,20 +3,20 @@ ACT-R cognitive architecture’s Declarative Memory, suitable for
3
3
  incorporating into other Python models and applications. Its design
4
4
  is inspired by the ACT-UP cognitive modeling toolbox.
5
5
 
6
- There is [online documentation of PyACTUp](http://halle.psy.cmu.edu/pyactup/),
7
- and the [sources](https://bitbucket.org/dfmorrison/pyactup/) are on Bitbucket.
6
+ There is [online documentation of PyACTUp](http://koalemos.psy.cmu.edu/pyactup/),
7
+ and the [sources](https://github.com/dfmorrison/pyactup/) are on GitHub.
8
8
 
9
- The latest version of PyACTUp can be download and install from PyPi with ``pip``:
9
+ The latest version of PyACTUp can be download and installed from PyPi with pip:
10
10
 
11
- .. parsed-literal:: pip install pyactup
11
+ pip install pyactup
12
12
 
13
- Use of a virtual environment for Python, such as ``venv`` or Anaconda is recommended.
13
+ Use of a virtual environment for Python, such as venv or Anaconda is recommended.
14
14
 
15
15
  PyACTUp requires Python version 3.8 or later.
16
16
 
17
17
  PyACTUp is released under the following MIT style license:
18
18
 
19
- Copyright (c) 2018-2022 Carnegie Mellon University
19
+ Copyright (c) 2018-2024 Carnegie Mellon University
20
20
 
21
21
  Permission is hereby granted, free of charge, to any person obtaining a copy of this
22
22
  software and associated documentation files (the "Software"), to deal in the Software
@@ -1,11 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyactup
3
- Version: 2.0
3
+ Version: 2.2.3
4
4
  Summary: A lightweight Python implementation of a subset of the ACT-R cognitive architecture’s Declarative Memory
5
- Home-page: https://bitbucket.org/dfmorrison/pyactup/
5
+ Home-page: https://dfmorrison.github.io/pyactup-documentation/
6
6
  Author: Don Morrison
7
7
  Author-email: dfm2@cmu.edu
8
- License: UNKNOWN
9
8
  Platform: any
10
9
  Classifier: Intended Audience :: Science/Research
11
10
  Classifier: License :: OSI Approved :: MIT License
@@ -25,20 +24,20 @@ ACT-R cognitive architecture’s Declarative Memory, suitable for
25
24
  incorporating into other Python models and applications. Its design
26
25
  is inspired by the ACT-UP cognitive modeling toolbox.
27
26
 
28
- There is [online documentation of PyACTUp](http://halle.psy.cmu.edu/pyactup/),
29
- and the [sources](https://bitbucket.org/dfmorrison/pyactup/) are on Bitbucket.
27
+ There is [online documentation of PyACTUp](http://koalemos.psy.cmu.edu/pyactup/),
28
+ and the [sources](https://github.com/dfmorrison/pyactup/) are on GitHub.
30
29
 
31
- The latest version of PyACTUp can be download and install from PyPi with ``pip``:
30
+ The latest version of PyACTUp can be download and installed from PyPi with pip:
32
31
 
33
- .. parsed-literal:: pip install pyactup
32
+ pip install pyactup
34
33
 
35
- Use of a virtual environment for Python, such as ``venv`` or Anaconda is recommended.
34
+ Use of a virtual environment for Python, such as venv or Anaconda is recommended.
36
35
 
37
36
  PyACTUp requires Python version 3.8 or later.
38
37
 
39
38
  PyACTUp is released under the following MIT style license:
40
39
 
41
- Copyright (c) 2018-2022 Carnegie Mellon University
40
+ Copyright (c) 2018-2024 Carnegie Mellon University
42
41
 
43
42
  Permission is hereby granted, free of charge, to any person obtaining a copy of this
44
43
  software and associated documentation files (the "Software"), to deal in the Software
@@ -56,5 +55,3 @@ PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIG
56
55
  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
57
56
  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
58
57
  OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
59
-
60
-
@@ -1,3 +1,4 @@
1
1
  numpy
2
- prettytable
3
2
  pylru
3
+ prettytable
4
+ packaging
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2018-2022 Carnegie Mellon University
1
+ # Copyright (c) 2018-2024 Carnegie Mellon University
2
2
  #
3
3
  # Permission is hereby granted, free of charge, to any person obtaining a copy of this
4
4
  # software and associated documentation files (the "Software"), to deal in the Software
@@ -37,7 +37,7 @@ may be strictly algorithmic, may interact with human subjects, or may be embedde
37
37
  sites.
38
38
  """
39
39
 
40
- __version__ = "2.0"
40
+ __version__ = "2.2.3"
41
41
 
42
42
  if "dev" in __version__:
43
43
  print("PyACTUp version", __version__)
@@ -47,9 +47,9 @@ import csv
47
47
  import io
48
48
  import math
49
49
  import numpy as np
50
- import numpy.ma as ma
51
50
  import operator
52
51
  import random
52
+ import re
53
53
  import sys
54
54
 
55
55
  from dataclasses import dataclass, field
@@ -61,7 +61,8 @@ from prettytable import PrettyTable
61
61
  from pylru import lrucache
62
62
  from warnings import warn
63
63
 
64
- __all__ = ["Memory"]
64
+ __all__ = ["__version__", "Memory"]
65
+
65
66
 
66
67
  DEFAULT_NOISE = 0.25
67
68
  DEFAULT_DECAY = 0.5
@@ -91,7 +92,7 @@ class Memory(dict):
91
92
  saved to and restored from persistent storage, so long as any similarity functions it
92
93
  contains are defined at the top level of a module using ``def``. Note that attempts to
93
94
  pickle a Memory object containing a similarity function defined as a lambda function,
94
- or as an inner function, will cause a :exc:`Exception` to be raised. And note further
95
+ or as an inner function, will cause an :exc:`Exception` to be raised. And note further
95
96
  that pickle only includes the function name in the pickled object, not its definition.
96
97
  Also, if the contents of a ``Memory`` object are sufficiently complicated it may be
97
98
  necessary to raise Python's recursion limit with
@@ -99,18 +100,18 @@ class Memory(dict):
99
100
 
100
101
  A common use case for PyACTUp involves all of the chunks in a ``Memory`` having the
101
102
  same attributes, and some of those attributes are always used, by matching exactly,
102
- not partially, some of those attributes. The ``index``keyword argument declares that
103
+ not partially, some of those attributes. The ``index`` keyword argument declares that
103
104
  such a set of attributes is present, and can result in significant performance
104
105
  improvements for models with a *very* large number of chunks. The value of this
105
106
  keyword argument should be a list of attribute names. As a convenience, if none of the
106
- attribute names contains commas or spaces, a string maybe used instead of a list, the
107
+ attribute names contain commas or spaces, a string maybe used instead of a list, the
107
108
  attribute names being separated by spaces or commas; either spaces or commas must be
108
109
  used, not a mixture. For example, both ``index="decision utility"`` and
109
110
  ``index="decision,utiliy"`` are equivalent to ``index=["decision", "utility"]``. A
110
111
  list of he attributes in a :class:`Memory`'s *index* can be retrieved with the
111
112
  :attr:`index` property. If the ``Memory`` is empty, containing no chunks, the *index*
112
113
  can be modified by setting that property, but otherwise the *index* cannot be changed
113
- after the ``Memory`` was created. `All chunks in a ``Memory`` with an *index* must
114
+ after the ``Memory`` was created. All chunks in a ``Memory`` with an *index* must
114
115
  contain values for all the attributes listed in the *index*; if any are omitted in the
115
116
  argument to :meth:`learn` they will be automatically added with a value of ``None``.
116
117
 
@@ -134,12 +135,14 @@ class Memory(dict):
134
135
  self._fixed_noise_time = None
135
136
  self._temperature_param = 1 # will be reset below, but is needed for noise assignment
136
137
  self._noise = None
138
+ self._noise_distribution = None
137
139
  self._decay = None
138
140
  self._optimized_learning = None
139
141
  self._use_actr_similarity = False
140
142
  self._minimum_similarity = 0
141
143
  self._maximum_similarity = 1
142
144
  self._similarities = defaultdict(Similarity)
145
+ self._extra_activation = None
143
146
  self.noise = noise
144
147
  self.decay = decay
145
148
  if temperature is None and not self._validate_temperature(None, noise):
@@ -168,11 +171,19 @@ class Memory(dict):
168
171
  """Deletes this :class:`Memory`'s chunks and resets its time to zero.
169
172
  If *preserve_prepopulated* is ``False`` it deletes all chunks; if it is ``True``
170
173
  it deletes all chunk references later than time zero, completely deleting those
171
- chunks that were created at a time other than zero. If *index* is supplied it
174
+ chunks that were created at a time greater than zero. If *index* is supplied it
172
175
  sets the :class:`Memory`'s index to that value.
173
176
  """
177
+ if preserve_prepopulated and self._optimized_learning is not None:
178
+ preserve_prepopulated = False
179
+ warn("The preserve_prepopulated argument to reset() cannot be used when "
180
+ "optimized_learning is on, and is being ignored")
174
181
  if preserve_prepopulated:
175
- preserved = {k: v for k, v in self.items() if v._creation == 0}
182
+ preserved = {k: c for k, c in self.items() if c._creation <= 0}
183
+ for c in preserved.values():
184
+ c._references = np.array([r for r in c._references[:c._reference_count]
185
+ if r <= 0])
186
+ c._reference_count = len(c._references)
176
187
  self.clear()
177
188
  self._slot_name_index.clear()
178
189
  self._index.clear()
@@ -182,11 +193,12 @@ class Memory(dict):
182
193
  if index is not None:
183
194
  self.index = index
184
195
  if preserve_prepopulated:
185
- for k, v in preserved.items():
186
- v._references = np.empty(1, dtype=np.int32) if self._optimized_learning else np.array([0])
187
- v._reference_count = 1
188
- self[k] = v
189
- self._slot_name_index[frozenset(v.keys())].append(v)
196
+ for k, c in preserved.items():
197
+ self[k] = c
198
+ self._slot_name_index[frozenset(c.keys())].append(c)
199
+ if self._indexed_attributes:
200
+ self._index[Memory._signature(c, "learn", self._indexed_attributes)
201
+ ].append(c)
190
202
 
191
203
  @property
192
204
  @contextmanager
@@ -201,8 +213,8 @@ class Memory(dict):
201
213
  practical.
202
214
 
203
215
  >>> m = Memory()
204
- >>> m.learn(color="red")
205
- True
216
+ >>> m.learn({"color": "red"})
217
+ <Chunk 0000 {'color': 'red'} 1>
206
218
  >>> m.advance()
207
219
  1
208
220
  >>> m.activation_history = []
@@ -266,13 +278,13 @@ class Memory(dict):
266
278
 
267
279
  @property
268
280
  def index(self):
269
- """A list of the attribute names in this ``Memory``'s index.
281
+ """A tuple of the attribute names in this ``Memory``'s index.
270
282
  If this :class:`Memory` is empty, containing no chunks, this can also be set,
271
283
  using the same syntax as in the :class:`Memory` constructor. However, if
272
284
  this ``Memory`` contains chunks an attempt to set the ``index`` will raise
273
285
  a :exc:`RuntimeError`.
274
286
  """
275
- return sorted(self._indexed_attributes)
287
+ return tuple(sorted(self._indexed_attributes))
276
288
 
277
289
  @index.setter
278
290
  def index(self, value):
@@ -284,18 +296,33 @@ class Memory(dict):
284
296
  assert not self._index and not self._slot_name_index
285
297
  self._indexed_attributes = indexed_attributes
286
298
 
299
+ @staticmethod
300
+ def is_real(x, name, non_negative=True, positive=False, none_allowed=True):
301
+ if none_allowed and x is None:
302
+ return
303
+ if (x is True or x is False
304
+ or (positive and x <= 0)
305
+ or (non_negative and x < 0)):
306
+ if positive:
307
+ mod = " positive"
308
+ elif non_negative:
309
+ mod = " non-negative"
310
+ else:
311
+ mod = ""
312
+ raise ValueError(f"The {name}, {x}, must be{' None or' if none_allowed else ''} a{mod} real number")
313
+
287
314
  @property
288
315
  def time(self):
289
316
  """This ``Memory``'s current time.
290
317
  Time in PyACTUp is a dimensionless quantity, the interpretation of which is at the
291
- discretion of the modeler.
318
+ discretion of the modeler. Attempting to set the ``time`` to anything but a real
319
+ number raises a :exc:`ValueError`.
292
320
  """
293
321
  return self._time
294
322
 
295
323
  @time.setter
296
324
  def time(self, value):
297
- if not isinstance(value, Real):
298
- raise ValueError(f"Time {value} is not a real number")
325
+ Memory.is_real(value, "time", False, False, False)
299
326
  self._time = value
300
327
  if value != self._time:
301
328
  self._clear_fixed_noise()
@@ -308,9 +335,10 @@ class Memory(dict):
308
335
  While *amount* can be negative, this is rarely appropriate. Backward time can
309
336
  easily result in biologically implausible models, and attempts to perform
310
337
  retrievals or similar operations at times preceding those at which relevant
311
- chunks were created will result in infinite or complex valued base-level
312
- activations and raise an :exc:`Exception`.
338
+ chunks were created or reinforced will result in infinite or complex valued
339
+ base-level activations and raise an :exc:`Exception`.
313
340
  """
341
+ Memory.is_real(amount, "time increment", False)
314
342
  if amount is not None:
315
343
  self.time += amount
316
344
  return self._time
@@ -328,20 +356,13 @@ class Memory(dict):
328
356
  chunks created or reinforced in the future results in failures of attempts to
329
357
  retrieve them.
330
358
 
331
- >>> m = Memory(temperature=1, noise=0)
332
- >>> m.learn(size=1)
333
- Traceback (most recent call last):
334
- File "<stdin>", line 1, in <module>
335
- TypeError: learn() got an unexpected keyword argument 'size'
336
- >>>
337
- >>>
338
359
  >>> m = Memory(temperature=1, noise=0)
339
360
  >>> m.learn({"size": 1})
340
- True
361
+ <Chunk 0000 {'size': 1} 1>
341
362
  >>> m.advance(10)
342
363
  10
343
364
  >>> m.learn({"size": 10})
344
- True
365
+ <Chunk 0001 {'size': 10} 1>
345
366
  >>> m.advance()
346
367
  11
347
368
  >>> m.blend("size")
@@ -377,8 +398,9 @@ class Memory(dict):
377
398
 
378
399
  @noise.setter
379
400
  def noise(self, value):
380
- if value < 0:
381
- raise ValueError(f"The noise, {value}, must not be negative")
401
+ Memory.is_real(value, "noise")
402
+ if value is None:
403
+ value = 0
382
404
  if self._temperature_param is None:
383
405
  t = Memory._validate_temperature(None, value)
384
406
  if not t:
@@ -387,38 +409,65 @@ class Memory(dict):
387
409
  else:
388
410
  self._temperature = t
389
411
  if value != self._noise:
390
- self._noise = value
412
+ self._noise = float(value)
391
413
  self._clear_fixed_noise()
392
414
 
415
+ @property
416
+ def noise_distribution(self):
417
+ """ Provide an alternative distribution from which noise is sampled.
418
+ If ``None`` the default logistic distribution is used. Otherwise the value of this
419
+ attribute should be a callable that takes no arguments and returns a real number.
420
+ It will be called once each time activation noise is required and the value,
421
+ scaled as usual by the :attr:`noise` parameter, will be used as the activation
422
+ noise. A :exc:`ValueError` is raised if an attempt is made to set this attribute
423
+ to anything other than a callable or ``None``.
424
+
425
+ .. warning::
426
+ It is rarely appropriate to use ``noise_distribution``. The default logistic
427
+ distribution is almost always a more appropriate choice. The ability to change
428
+ the distribution is provided only for esoteric purposes, and care should be
429
+ exercised lest biologically implausible models result.
430
+ """
431
+ return self._noise_distribution
432
+
433
+ @noise_distribution.setter
434
+ def noise_distribution(self, value):
435
+ if value is None or callable(value):
436
+ self._noise_distribution = value
437
+ else:
438
+ raise ValueError(f"The provided noise_distribution, {value}, is neither Callable nor None")
439
+
393
440
  @property
394
441
  def decay(self):
395
442
  """Controls the rate at which activation for chunks in memory decay with the passage of time.
396
443
  Time in PyACTUp is dimensionless.
397
444
  The :attr:`decay` is typically between about 0.1 and 2.0.
398
- The default value is 0.5. If zero memory does not decay.
445
+ The default value is 0.5. If set to zero then memory does not decay.
399
446
  If set to ``None`` no base-level activation is computed or used; note that this is
400
447
  significantly different than setting it to zero which causes base-level activation
401
448
  to still be computed and used, but with no decay.
402
449
  Attempting to set it to a negative number raises a :exc:`ValueError`.
403
- It must be less one 1 if this memory's :attr:`optimized_learning` parameter is set.
450
+ If this memory's :attr:`optimized_learning` parameter is true, then :attr:`decay`
451
+ must be less than one.
404
452
  """
405
453
  return self._decay
406
454
 
407
455
  @decay.setter
408
456
  def decay(self, value):
457
+ Memory.is_real(value, "decay")
409
458
  if value is not None:
410
- if value < 0:
411
- raise ValueError(f"The decay, {value}, must not be negative")
412
459
  if value >= 1 and self._optimized_learning is not None:
413
460
  raise ValueError(f"The decay, {value}, must be less than one if optimized_learning is used")
414
- self._decay = value
461
+ self._decay = float(value)
462
+ else:
463
+ self._decay = None
415
464
 
416
465
  @property
417
466
  def temperature(self):
418
467
  """The temperature parameter used for blending values.
419
468
  If ``None``, the default, the square root of 2 times the value of
420
469
  :attr:`noise` will be used. If the temperature is too close to zero, which
421
- can also happen if it is ``None`` and the :attr:`noise` is too low, or negative, a
470
+ can also happen if it is ``None`` and the :attr:`noise` is too low, a
422
471
  :exc:`ValueError` is raised.
423
472
  """
424
473
  return self._temperature_param
@@ -430,6 +479,7 @@ class Memory(dict):
430
479
  if value is None or value is False:
431
480
  value = None
432
481
  else:
482
+ Memory.is_real(value, "temperature", True, True)
433
483
  value = float(value)
434
484
  t = Memory._validate_temperature(value, self._noise)
435
485
  if not t:
@@ -469,7 +519,8 @@ class Memory(dict):
469
519
 
470
520
  @threshold.setter
471
521
  def threshold(self, value):
472
- if value is None or value is False:
522
+ Memory.is_real(value, "threshold", False)
523
+ if value is None:
473
524
  self._threshold = None
474
525
  else:
475
526
  self._threshold = float(value)
@@ -484,25 +535,26 @@ class Memory(dict):
484
535
  from the activation.
485
536
 
486
537
  Attributes for which no similarity function has been defined are always compared
487
- exactly, and chunks not matching on this attributes are not included at all in the
488
- corresponding partial retrievals or blending operations.
538
+ exactly, and chunks not matching on these attributes are not included at all in
539
+ the corresponding partial retrievals or blending operations.
489
540
 
490
541
  While for the likelihoods of retrieval the values of :attr:`time` are normally
491
542
  scale free, not depending upon the magnitudes of :attr:`time`, but rather the
492
543
  ratios of various times, the :attr:`mismatch` is sensitive to the actual
493
544
  magnitude. Suitable care should be exercised when adjusting it.
494
545
 
495
- Attempting to set this parameter to a value other than ``None`` or a real number
496
- raises a :exc:`ValueError`.
546
+ Attempting to set this parameter to a value other than ``None`` or a non-negative
547
+ real number raises a :exc:`ValueError`.
497
548
  """
498
549
  return self._mismatch
499
550
 
500
551
  @mismatch.setter
501
552
  def mismatch(self, value):
502
- if value is None or value is False:
553
+ if value is False:
554
+ value = None
555
+ Memory.is_real(value, "mismatch")
556
+ if value is None:
503
557
  self._mismatch = None
504
- elif value < 0:
505
- raise ValueError(f"The mismatch penalty, {value}, must not be negative")
506
558
  else:
507
559
  self._mismatch = float(value)
508
560
 
@@ -585,6 +637,49 @@ class Memory(dict):
585
637
  self._maximum_similarity = 1
586
638
  self._use_actr_similarity = bool(value)
587
639
 
640
+ @property
641
+ def extra_activation(self):
642
+ """A tuple of callables that are called to add additional terms to the activations of chunks.
643
+
644
+ For advanced purposes it is sometimes useful to add additional terms to chunks'
645
+ activation computations, for example for implementing a constant base level
646
+ offset for one or more chunks, or for implementing spreading activation.
647
+ This property can be set to None (or another falsey value) meaning no such
648
+ additional activation is added to any chunks; this is the default. Otherwise it
649
+ should be set to an iterable of callables, each of which should take a single
650
+ argument, a chunk, and returns a real number. For convenience it may also be set
651
+ to a single callable, which is equivalent to setting it to a tuple of length one
652
+ containing that callable.
653
+
654
+ Attempting to set a value that is not a callable, an iterable of callables or
655
+ falsey raises an :exc:`RuntimeError` will be raised when it is used in computing
656
+ activations.
657
+
658
+ .. warning::
659
+ The use of extra_activation requires care lest biologically implausible models
660
+ result. In addition to the ease with which artificial adjustments to the
661
+ activations can be made with this method, the appropriate magnitudes of
662
+ activation values depend upon the units in which time is measured.
663
+ """
664
+ return self._extra_activation
665
+
666
+ @extra_activation.setter
667
+ def extra_activation(self, value):
668
+ if not value:
669
+ self._extra_activation = None
670
+ elif callable(value):
671
+ self._extra_activation = (value,)
672
+ else:
673
+ try:
674
+ value = tuple(value)
675
+ for v in value:
676
+ if not callable(v):
677
+ raise ValueError()
678
+ except:
679
+ raise ValueError(
680
+ f"The extra_activation must be either a callable or an iterable of callables ({value})")
681
+ self._extra_activation = value
682
+
588
683
  @property
589
684
  def activation_history(self):
590
685
  """A :class:`MutableSequence`, typically a :class:`list`, into which details of the computations underlying PyACTUp operation are appended.
@@ -595,7 +690,7 @@ class Memory(dict):
595
690
  As a convenience setting :attr:`activation_history` to ``True`` assigns a fresh,
596
691
  empty list as its value.
597
692
 
598
- If PyACTUp is being using in a loop, the details collected will likely become
693
+ If PyACTUp is being used in a loop, the details collected will likely become
599
694
  voluminous. It is usually best to clear them frequently, such as on each
600
695
  iteration.
601
696
 
@@ -604,18 +699,19 @@ class Memory(dict):
604
699
 
605
700
  >>> m = Memory()
606
701
  >>> m.learn({"color": "red", "size": 3})
607
- True
702
+ <Chunk 0005 {'color': 'red', 'size': 3} 1>
608
703
  >>> m.advance()
609
704
  1
610
705
  >>> m.learn({"color": "red", "size": 5})
611
- True
706
+ <Chunk 0006 {'color': 'red', 'size': 5} 1>
612
707
  >>> m.advance()
613
708
  2
614
709
  >>> m.activation_history = []
615
710
  >>> m.blend("size", {"color": "red"})
616
711
  4.810539051819914
617
712
  >>> pprint(m.activation_history, sort_dicts=False)
618
- [{'name': '0005',
713
+ [{'time': 2,
714
+ 'name': '0005',
619
715
  'creation_time': 0,
620
716
  'attributes': (('color', 'red'), ('size', 3)),
621
717
  'reference_count': 1,
@@ -624,7 +720,8 @@ class Memory(dict):
624
720
  'activation_noise': -0.032318983984613185,
625
721
  'activation': -0.3788925742645858,
626
722
  'retrieval_probability': 0.09473047409004302},
627
- {'name': '0006',
723
+ {'time': 2,
724
+ 'name': '0006',
628
725
  'creation_time': 1,
629
726
  'attributes': (('color', 'red'), ('size', 5)),
630
727
  'reference_count': 1,
@@ -633,7 +730,6 @@ class Memory(dict):
633
730
  'activation_noise': 0.4191470689622754,
634
731
  'activation': 0.4191470689622754,
635
732
  'retrieval_probability': 0.905269525909957}]
636
-
637
733
  """
638
734
  return self._activation_history
639
735
 
@@ -679,7 +775,7 @@ class Memory(dict):
679
775
  "chunk contents": dict(k).__repr__()[1:-1],
680
776
  "chunk created at": c._creation,
681
777
  "chunk reference count": c._reference_count,
682
- "chunk references": Memory._elide_long_list(c._references)}
778
+ "chunk references": Memory._elide_long_list(c._references[:c._reference_count])}
683
779
  for k, c in self.items()]
684
780
  if pretty:
685
781
  tab = PrettyTable()
@@ -716,10 +812,10 @@ class Memory(dict):
716
812
  Note that after learning one or more chunks, before :meth:`retrieve`,
717
813
  :meth:`blend` or similar methods can be called :meth:`advance` must be called,
718
814
  lest the chunk(s) learned have infinite activation.
719
- Because it is so common to call :meth:`advance` immediately after :meth"`learn`
815
+ Because it is so common to call :meth:`advance` immediately after :meth:`learn`
720
816
  as a convenience if *advance* is not None just before :meth:`learn` returns
721
- :meth:`advance` with *advance* as its argument, without an argument if *advance*
722
- is ``True``.
817
+ it calls :meth:`advance` with *advance* as its argument, or without any argument
818
+ if *advance* is ``True``.
723
819
 
724
820
  Raises a :exc:`TypeError` if an attempt is made to learn an attribute value that
725
821
  is not :class:`Hashable`. Raises a :exc:`ValueError` if no *slots* are provided,
@@ -727,16 +823,16 @@ class Memory(dict):
727
823
 
728
824
  >>> m = Memory()
729
825
  >>> m.learn({"color":"red", "size":4})
730
- True
826
+ <Chunk 0000 {'color': 'red', 'size': 4} 1>
731
827
  >>> m.advance()
732
828
  1
733
829
  >>> m.learn({"color":"blue", "size":4}, advance=1)
830
+ <Chunk 0001 {'color': 'blue', 'size': 4} 1>
831
+ >>> m.learn({"color":"red", "size":4}) is None
734
832
  True
735
- >>> m.learn({"color":"red", "size":4})
736
- False
737
833
  >>> m.advance()
738
834
  3
739
- >>> m.retrieve({"color": "red"})
835
+ >>>
740
836
  <Chunk 0000 {'color': 'red', 'size': 4} 2>
741
837
  """
742
838
  slots = self._ensure_slots(slots, True)
@@ -767,10 +863,7 @@ class Memory(dict):
767
863
  if thing is None:
768
864
  return []
769
865
  if isinstance(thing, str):
770
- if "," in thing:
771
- names = [s.strip() for s in thing.split(",")]
772
- else:
773
- names = thing.split()
866
+ names = re.split(r"\s*(?:,|\s)\s*", thing.strip())
774
867
  else:
775
868
  names = list(thing)
776
869
  s = set()
@@ -779,7 +872,7 @@ class Memory(dict):
779
872
  if n in s:
780
873
  raise ValueError(f"Duplicate attribute name {n}")
781
874
  s.add(n)
782
- return names
875
+ return tuple(names)
783
876
 
784
877
  def _ensure_slots(self, slots, learn=False):
785
878
  slots = dict(slots)
@@ -862,7 +955,7 @@ class Memory(dict):
862
955
  slot_names = set(slot_names)
863
956
  slot_names.add(extra)
864
957
  partial_slots = []
865
- if partial and self._mismatch:
958
+ if partial and self._mismatch is not None:
866
959
  exact_slots =[]
867
960
  for n, v in conditions.items():
868
961
  if s := self._similarities.get(n):
@@ -906,9 +999,9 @@ class Memory(dict):
906
999
  - self._decay * np.log(ages))
907
1000
  else:
908
1001
  result = np.empty(nchunks)
909
- counts = ma.masked_all(nchunks)
910
- ages = ma.masked_all(nchunks)
911
- middles = ma.masked_all(nchunks)
1002
+ counts = np.ma.masked_all(nchunks)
1003
+ ages = np.ma.masked_all(nchunks)
1004
+ middles = np.ma.masked_all(nchunks)
912
1005
  for c, i in zip(chunks, count()):
913
1006
  if c._reference_count <= self._optimized_learning:
914
1007
  result[i] = np.sum((self._time - c._references[0:c._reference_count])
@@ -935,14 +1028,20 @@ class Memory(dict):
935
1028
  if self._activation_history is not None:
936
1029
  initial_history_length = len(self._activation_history)
937
1030
  for c, r in zip(chunks, result):
938
- self._activation_history.append({"name": c._name,
1031
+ self._activation_history.append({"time": self.time,
1032
+ "name": c._name,
939
1033
  "creation_time": c._creation,
940
1034
  "attributes": tuple(c.items()),
941
1035
  "reference_count": c.reference_count,
942
1036
  "references": c.references,
943
1037
  "base_level_activation": r})
944
1038
  if self._noise:
945
- noise = self._rng.logistic(scale=self._noise, size=nchunks)
1039
+ if self._noise_distribution is not None:
1040
+ noise = self._noise * np.array([self._noise_distribution()
1041
+ for i in range(nchunks)],
1042
+ dtype=np.float64)
1043
+ else:
1044
+ noise = self._rng.logistic(scale=self._noise, size=nchunks)
946
1045
  if self._fixed_noise is not None:
947
1046
  if self._fixed_noise_time != self._time:
948
1047
  self._clear_fixed_noise()
@@ -962,11 +1061,28 @@ class Memory(dict):
962
1061
  penalties = np.empty((nchunks, len(partial_slots)))
963
1062
  for c, row in zip(chunks, count()):
964
1063
  penalties[row] = [s._similarity(c[n], v) for n, v, s in partial_slots]
1064
+ if self._activation_history is not None:
1065
+ offset = 0 if self.use_actr_similarity else 1
1066
+ for i, pens in zip(count(initial_history_length), penalties):
1067
+ similarities = {ps[0]: p + offset
1068
+ for ps, p in zip(partial_slots, pens)}
1069
+ self._activation_history[i]["similarities"] = similarities
965
1070
  penalties = np.sum(penalties, 1) * self._mismatch
966
1071
  result += penalties
967
1072
  if self._activation_history is not None:
968
1073
  for i, p in zip(count(initial_history_length), penalties):
969
1074
  self._activation_history[i]["mismatch"] = p
1075
+ if self._extra_activation is not None:
1076
+ extra_activations = np.empty((nchunks))
1077
+ try:
1078
+ for c, row in zip(chunks, count()):
1079
+ extra_activations[row] = sum(f(c) for f in self._extra_activation)
1080
+ except:
1081
+ raise RuntimeError("Error attempting to compute extra activation values")
1082
+ result += extra_activations
1083
+ if self._activation_history is not None:
1084
+ for i, ea in zip(count(initial_history_length), extra_activations):
1085
+ self._activation_history[i]["extra_activation"] = ea
970
1086
  if self._activation_history is not None:
971
1087
  for i, r in zip(count(initial_history_length), result):
972
1088
  self._activation_history[i]["activation"] = r
@@ -974,9 +1090,9 @@ class Memory(dict):
974
1090
  self._activation_history[i]["meets_threshold"] = (r >= self._threshold)
975
1091
  raw_activations_count = len(result)
976
1092
  if self._threshold is not None:
977
- m = ma.masked_less(result, self._threshold)
978
- if ma.is_masked(m):
979
- chunks = ma.array(chunks, mask=ma.getmask(m)).compressed()
1093
+ m = np.ma.masked_less(result, self._threshold)
1094
+ if np.ma.is_masked(m):
1095
+ chunks = np.ma.array(chunks, mask=np.ma.getmask(m)).compressed()
980
1096
  result = m.compressed()
981
1097
  except FloatingPointError as e:
982
1098
  raise RuntimeError(f"Error when computing activations, perhaps a chunk's "
@@ -991,7 +1107,7 @@ class Memory(dict):
991
1107
  If there is no such matching chunk returns ``None``.
992
1108
  Normally only retrieves chunks exactly matching the *slots*; if *partial* is
993
1109
  ``True`` it also retrieves those only approximately matching, using similarity
994
- (see :meth:`similarity`) and :attr:`mismatch` to determine closeness
1110
+ (see :meth:`similarity`) and the value of :attr:`mismatch` to determine closeness
995
1111
  of match.
996
1112
 
997
1113
  If *rehearse* is supplied and true it also reinforces this chunk at the current
@@ -1005,11 +1121,11 @@ class Memory(dict):
1005
1121
 
1006
1122
  >>> m = Memory()
1007
1123
  >>> m.learn({"widget":"thromdibulator", "color":"red", "size":2})
1008
- True
1124
+ <Chunk 0000 {'widget': 'thromdibulator', 'color': 'red', 'size': 2} 1>
1009
1125
  >>> m.advance()
1010
1126
  1
1011
1127
  >>> m.learn({"widget":"snackleizer", "color":"blue", "size":1})
1012
- True
1128
+ <Chunk 0001 {'widget': 'snackleizer', 'color': 'blue', 'size': 1} 1>
1013
1129
  >>> m.advance()
1014
1130
  2
1015
1131
  >>> m.retrieve({"color":"blue"})["widget"]
@@ -1034,60 +1150,123 @@ class Memory(dict):
1034
1150
  self._cite(result)
1035
1151
  return result
1036
1152
 
1037
- def _blend(self, outcome_attribute, slots):
1153
+ def _blend(self, outcome_attribute, slots, instance_salience, feature_salience):
1038
1154
  Memory._ensure_slot_name(outcome_attribute)
1039
1155
  activations, chunks, raw = self._activations(self._ensure_slots(slots),
1040
1156
  extra=outcome_attribute)
1041
1157
  if chunks is None:
1042
- return None, None
1158
+ return None, None, None, None
1043
1159
  with np.errstate(divide="raise", over="raise", under="ignore", invalid="raise"):
1044
1160
  wp = np.exp(activations / self._temperature)
1045
1161
  wp /= np.sum(wp)
1046
- if self._activation_history is not None:
1047
- h = self._activation_history
1048
- # this i malarkey is in case one or more candidates didn't clear the threshold
1049
- i = len(h) - raw
1050
- for p, c in zip(wp, chunks):
1051
- while h[i]["name"] != c._name:
1052
- i += 1
1053
- assert i < len(h)
1054
- h[i]["retrieval_probability"] = p
1055
- return wp, chunks
1056
-
1057
- def blend(self, outcome_attribute, slots={}):
1162
+ if self._activation_history is not None:
1163
+ h = self._activation_history
1164
+ # this i malarkey is in case one or more candidates didn't clear the threshold
1165
+ i = len(h) - raw
1166
+ for p, c in zip(wp, chunks):
1167
+ while h[i]["name"] != c._name:
1168
+ i += 1
1169
+ assert i < len(h)
1170
+ h[i]["retrieval_probability"] = p
1171
+ def normalize(v):
1172
+ v = np.array(v)
1173
+ norm = np.linalg.norm(v)
1174
+ return v / norm if norm > 0 else v
1175
+ isal = None
1176
+ if instance_salience:
1177
+ vals = np.array([c[outcome_attribute] for c in chunks])
1178
+ isal = normalize(wp * (vals - np.sum(wp * vals)) / self._temperature)
1179
+ fsal = None
1180
+ if feature_salience and self._mismatch is not None:
1181
+ pslots = [a for a in slots if self._similarities.get(a)]
1182
+ if self._mismatch != 0:
1183
+ def slot_salience(attr, attrval):
1184
+ deriv = self._similarities[attr]._derivative
1185
+ weight = self._similarities[attr]._weight
1186
+ if not deriv:
1187
+ raise RuntimeError(f"No derivative defined for {attr} similarities")
1188
+ dvals = np.array([weight * deriv(c[attr], attrval) for c in chunks])
1189
+ dsum = np.sum(wp * dvals)
1190
+ return np.sum(wp * (dvals - dsum) * np.array([c[outcome_attribute]
1191
+ for c in chunks]))
1192
+ # Doing the division up front could make for loss of precision
1193
+ # but this is unlikely to matter in any realistic use case.
1194
+ coef = self._mismatch / self._temperature
1195
+ fsal = [coef * slot_salience(a, slots[a]) for a in pslots]
1196
+ else:
1197
+ fsal = [0] * len(pslots)
1198
+ fsal = dict(zip(pslots, normalize(fsal)))
1199
+ return wp, chunks, isal, fsal
1200
+
1201
+ def blend(self, outcome_attribute, slots={}, instance_salience=False, feature_salience=False):
1058
1202
  """Returns a blended value for the given attribute of those chunks matching *slots*, and which contain *outcome_attribute*, and have activations greater than or equal to this Memory's threshold, if any.
1059
1203
  Returns ``None`` if there are no matching chunks that contain
1060
1204
  *outcome_attribute*. If any matching chunk has a value of *outcome_attribute*
1061
1205
  that is not a real number an :exc:`Exception` is raised.
1062
1206
 
1207
+ If neither ``instance_salience`` nor ``feature_salience`` is true, the sole return
1208
+ value is the blended value; otherwise a tuple of three values is returned. The
1209
+ first the blended value. If ``instance_salience`` is true the second is a dict
1210
+ mapping a descriptions of the slot values of each of the matched chunks that
1211
+ contributed to the blended value to the normalized instance salience value, a real
1212
+ number between -1 and 1, inclusive; otherwise the second value is ``None``. The
1213
+ slot representation of slot values in this dict is a tuple of tuples, the inner
1214
+ tuples being the slot name and value.
1215
+
1216
+ If ``feature_salience`` is true the third value is a dict mapping slot names,
1217
+ corresponding to those slots that were partially matched in this blending
1218
+ operation, to their normalized feature salience values, a real number between -1
1219
+ and 1, inclusive; otherwise the third value is ``None``. To compute feature
1220
+ salience a derivative of the similarity function must have been specified for
1221
+ every partially match slot using :meth:`similarity`; if any are missing a
1222
+ :exc:`RuntimeError`` is raised.
1223
+
1063
1224
  >>> m = Memory()
1064
1225
  >>> m.learn({"color":"red", "size":2})
1065
- True
1226
+ <Chunk 0000 {'color': 'red', 'size': 2} 1>
1066
1227
  >>> m.advance()
1067
1228
  1
1068
1229
  >>> m.learn({"color":"blue", "size":30})
1069
- True
1230
+ <Chunk 0001 {'color': 'blue', 'size': 30} 1>
1070
1231
  >>> m.advance()
1071
1232
  2
1072
1233
  >>> m.learn({"color":"red", "size":1})
1073
- True
1234
+ <Chunk 0002 {'color': 'red', 'size': 1} 1>
1074
1235
  >>> m.advance()
1075
1236
  3
1076
1237
  >>> m.blend("size", {"color":"red"})
1077
- 1.221272238515685
1238
+ 1.3660254037844388
1239
+ >>> m.blend("size", {"color":"red"}, instance_salience=True)
1240
+ (1.3660254037844388,
1241
+ {(('color', 'red'), ('size', 2)): 0.7071067811865472,
1242
+ (('color', 'red'), ('size', 1)): -0.7071067811865478},
1243
+ None)
1244
+
1078
1245
  """
1079
- probs, chunks = self._blend(outcome_attribute, slots)
1080
- if chunks is None:
1081
- return None
1082
- with np.errstate(divide="raise", over="raise", under="ignore", invalid="raise"):
1083
- try:
1084
- return np.average(np.array([c[outcome_attribute] for c in chunks],
1085
- dtype=np.float64),
1086
- weights=probs)
1087
- except Exception as e:
1088
- raise RuntimeError(f"Error computing blended value, is perhaps the value "
1089
- f"of the {outcome_attribute} slotis not numeric in "
1090
- f"one of the matching chunks? ({e})")
1246
+ probs, chunks, isal, fsal = self._blend(outcome_attribute, slots,
1247
+ instance_salience, feature_salience)
1248
+ if chunks is not None:
1249
+ with np.errstate(divide="raise", over="raise", under="ignore", invalid="raise"):
1250
+ try:
1251
+ result = np.average(np.array([c[outcome_attribute] for c in chunks],
1252
+ dtype=np.float64),
1253
+ weights=probs)
1254
+ except Exception as e:
1255
+ raise RuntimeError(f"Error computing blended value, is perhaps the value "
1256
+ f"of the {outcome_attribute} slotis not numeric in "
1257
+ f"one of the matching chunks? ({e})")
1258
+ else:
1259
+ result = None
1260
+ if not instance_salience and not feature_salience:
1261
+ return result
1262
+ if instance_salience:
1263
+ if isal is not None:
1264
+ isal = {tuple(c.items()): s for c, s in zip(chunks, isal)}
1265
+ else:
1266
+ isal = {}
1267
+ if feature_salience and fsal is None:
1268
+ fsal = {}
1269
+ return result, isal, fsal
1091
1270
 
1092
1271
  def best_blend(self, outcome_attribute, iterable, select_attribute=None, minimize=False):
1093
1272
  """Returns two values (as a 2-tuple), describing the extreme blended value of the *outcome_attribute* over the values provided by *iterable*.
@@ -1101,7 +1280,8 @@ class Memory(dict):
1101
1280
  none of the values from *iterable* result in blended values of *outcome_attribute*
1102
1281
  then both return values are ``None``.
1103
1282
 
1104
- This operation is particularly useful for building Instance Based Learning models.
1283
+ This operation is particularly useful for building `Instance Based Learning models
1284
+ <https://www.sciencedirect.com/science/article/abs/pii/S0364021303000314>`_.
1105
1285
 
1106
1286
  For the common case where *iterable* iterates over only the values of a single
1107
1287
  slot the *select_attribute* parameter may be used to simplify the iteration. If
@@ -1112,25 +1292,25 @@ class Memory(dict):
1112
1292
 
1113
1293
  >>> m = Memory()
1114
1294
  >>> m.learn({"color":"red", "utility":1})
1115
- True
1295
+ <Chunk 0000 {'color': 'red', 'utility': 1} 1>
1116
1296
  >>> m.advance()
1117
1297
  1
1118
1298
  >>> m.learn({"color":"blue", "utility":2})
1119
- True
1299
+ <Chunk 0001 {'color': 'blue', 'utility': 2} 1>
1120
1300
  >>> m.advance()
1121
1301
  2
1122
1302
  >>> m.learn({"color":"red", "utility":1.8})
1123
- True
1303
+ <Chunk 0002 {'color': 'red', 'utility': 1.8} 1>
1124
1304
  >>> m.advance()
1125
1305
  3
1126
1306
  >>> m.learn({"color":"blue", "utility":0.9})
1127
- True
1307
+ <Chunk 0003 {'color': 'blue', 'utility': 0.9} 1>
1128
1308
  >>> m.advance()
1129
1309
  4
1130
1310
  >>> m.best_blend("utility", ({"color": c} for c in ("red", "blue")))
1131
1311
  ({'color': 'blue'}, 1.5149259914576285)
1132
1312
  >>> m.learn({"color":"blue", "utility":-1})
1133
- True
1313
+ <Chunk 0004 {'color': 'blue', 'utility': -1} 1>
1134
1314
  >>> m.advance()
1135
1315
  5
1136
1316
  >>> m.best_blend("utility", ("red", "blue"), "color")
@@ -1170,25 +1350,24 @@ class Memory(dict):
1170
1350
 
1171
1351
  >>> m = Memory()
1172
1352
  >>> m.learn({"kind": "tilset", "age": "old"})
1173
- True
1353
+ <Chunk 0000 {'kind': 'tilset', 'age': 'old'} 1>
1174
1354
  >>> m.advance()
1175
1355
  1
1176
1356
  >>> m.learn({"kind": "limburger", "age": "old"})
1177
- True
1357
+ <Chunk 0001 {'kind': 'limburger', 'age': 'old'} 1>
1178
1358
  >>> m.advance()
1179
1359
  2
1180
1360
  >>> m.learn({"kind": "tilset", "age": "old"})
1181
- False
1182
1361
  >>> m.advance()
1183
1362
  3
1184
1363
  >>> m.learn({"kind": "tilset", "age": "new"})
1185
- True
1364
+ <Chunk 0002 {'kind': 'tilset', 'age': 'new'} 1>
1186
1365
  >>> m.advance()
1187
1366
  4
1188
1367
  >>> m.discrete_blend("kind", {"age": "old"})
1189
1368
  ('tilset', {'tilset': 0.9540373563209859, 'limburger': 0.04596264367901423})
1190
1369
  """
1191
- probs, chunks = self._blend(outcome_attribute, slots)
1370
+ probs, chunks, isal, fsal = self._blend(outcome_attribute, slots, False, False)
1192
1371
  if not chunks:
1193
1372
  return None, None
1194
1373
  candidates = defaultdict(list)
@@ -1207,7 +1386,7 @@ class Memory(dict):
1207
1386
  return (random.choice(best),
1208
1387
  dict(sorted(candidates.items(), key=lambda x: x[1], reverse=True)))
1209
1388
 
1210
- def similarity(self, attributes, function=None, weight=None):
1389
+ def similarity(self, attributes, function=None, weight=None, derivative=None):
1211
1390
  """Assigns a similarity function and/or corresponding weight to be used when comparing attribute values with the given *attributes*.
1212
1391
  The *attributes* should be an :class:`Iterable` of strings, attribute names.
1213
1392
  The *function* should take two arguments, and return a real number between 0 and 1,
@@ -1220,15 +1399,26 @@ class Memory(dict):
1220
1399
  will, in most cases, be meaningless if they are.
1221
1400
  If ``True`` is supplied as the *function* a default similarity function is used
1222
1401
  that returns one if its two arguments are ``==`` and zero otherwise.
1223
- If only one of *function* or *weight* is supplied, it is changed without
1224
- changing the other; the initial defaults are ``Treu`` for *function* and ``1``
1225
- for *weight*.
1226
- If neither *function* nor *weight* is supplied both are removed, and these
1227
- *attributes* will no longer have an associated similarity computation, and will
1228
- be matched only exactly.
1402
+
1403
+ If *derivative* is supplied it should be a callable, the first partial derivative
1404
+ of the similarity function with respect to its first argument, and will be used
1405
+ if the feature saliences are requested in :meth:`blend`. The *derivative* must
1406
+ be defined for all values that may occur for the relevant slots. It is common
1407
+ that the strict mathematical derivative may not exists for one or a small number
1408
+ of possibly values, most commonly when the similarly involves the absolute value
1409
+ of the difference between the two arguments of the similarly function. Even in
1410
+ these cases the argument to :meth:`similarity` should return a value; often zero
1411
+ is a good choice in these cases.
1412
+
1413
+ If only one or two of *function*, *weight* and *derivatve* are supplied, they
1414
+ changed without changing those not supplied; the initial defaults are ``True`` for
1415
+ *function*, ``1`` for *weight*, and ``None`` for *derivative*. If none
1416
+ of*function*, *weight* nor *derivative* are supplied all are removed, and these
1417
+ *attributes* will no longer have an associated similarity computation, and will be
1418
+ matched only exactly.
1229
1419
 
1230
1420
  As a convenience, if none of the attribute names contains commas or spaces, a
1231
- string maybe used instead of a list as the first argument to ``similarity``, the
1421
+ string may be used instead of a list as the first argument to ``similarity``, the
1232
1422
  attribute names being separated by spaces or commas; either spaces or commas must
1233
1423
  be used, not a mixture. For example, both ``"decision utility"`` and
1234
1424
  ``"decision,utiliy"`` are equivalent to ``["decision", "utility"]``.
@@ -1247,14 +1437,15 @@ class Memory(dict):
1247
1437
  ... return f(y, x)
1248
1438
  ... return 1 - (y - x) / y
1249
1439
  >>> similarity(["length", "width"], f, weight=2)
1250
-
1251
1440
  """
1252
1441
  if function is not None and not (callable(function) or function is True):
1253
1442
  raise ValueError(f"Function {function} is neither callable nor True")
1443
+ if derivative is not None and not callable(derivative):
1444
+ raise(ValueError(f"Derivative {derivative} is not callable"))
1254
1445
  if weight is not None and weight <= 0:
1255
1446
  raise ValueError(f"Similarity weight, {weight}, is not a positive number")
1256
1447
  for a in Memory._ensure_slot_names(attributes):
1257
- if function is None and weight is None:
1448
+ if function is None and weight is None and derivative is None:
1258
1449
  if a in self._similarities:
1259
1450
  del self._similarities[a]
1260
1451
  else:
@@ -1262,12 +1453,19 @@ class Memory(dict):
1262
1453
  sim._memory = self
1263
1454
  if function is not None and function != sim._function:
1264
1455
  sim._function = function
1456
+ if derivative is not None and function != sim._derivative:
1457
+ sim._derivative = derivative
1265
1458
  if weight is not None and weight != sim._weight:
1266
1459
  sim._weight = weight
1267
1460
  sim._cache.clear()
1268
1461
 
1269
1462
 
1270
1463
  class Chunk(dict):
1464
+ """A learned item.
1465
+
1466
+ A chunk acts much like a dictionary, and its slots can be retrieved with the usual
1467
+ `[]` notation, or with `.get()`.
1468
+ """
1271
1469
 
1272
1470
  __slots__ = ["_name", "_memory", "_creation", "_references", "_reference_count" ]
1273
1471
 
@@ -1289,6 +1487,11 @@ class Chunk(dict):
1289
1487
  def __str__(self):
1290
1488
  return f"Chunk-{self._name}"
1291
1489
 
1490
+ @property
1491
+ def memory(self):
1492
+ """The :class:`Memory` object that contains this chunk."""
1493
+ return self._memory
1494
+
1292
1495
  @property
1293
1496
  def reference_count(self):
1294
1497
  """A non-negative integer, the number of times that this :class:`Chunk` has been reinforced.
@@ -1297,26 +1500,28 @@ class Chunk(dict):
1297
1500
 
1298
1501
  @property
1299
1502
  def references(self):
1300
- """A list of real numbers, the times at which that this :class:`Chunk` has been reinforced.
1503
+ """A tuple of real numbers, the times at which that this :class:`Chunk` has been reinforced.
1301
1504
  If :attr:`optimized_learning` is being used this may be just the most recent
1302
1505
  reinforcements, or an empty list, depending upon the value of
1303
- :attr:`optimized_learning`
1506
+ :attr:`optimized_learning`.
1304
1507
  """
1305
- return list(self._references[:(self._reference_count
1306
- if self._memory._optimized_learning is None
1307
- else min(self._reference_count,
1308
- self._memory._optimized_learning))])
1508
+ return tuple(self._references[:(self._reference_count
1509
+ if self._memory._optimized_learning is None
1510
+ else min(self._reference_count,
1511
+ self._memory._optimized_learning))])
1309
1512
 
1310
1513
 
1311
1514
  @dataclass
1312
1515
  class Similarity:
1313
1516
  _memory: Memory = None
1314
1517
  _function: callable = True
1518
+ _derivative: callable = None
1315
1519
  _weight: float = 1.0
1316
1520
  _cache: lrucache = field(default_factory=lambda: lrucache(SIMILARITY_CACHE_SIZE))
1317
1521
 
1318
1522
  def _similarity(self, x, y):
1319
- # returns a non-positive number that has already been weighted on a per slot basis
1523
+ # returns the mismatch penalty, a non-positive number that has already been
1524
+ # weighted on a per slot basis
1320
1525
  if x == y:
1321
1526
  return 0
1322
1527
  if self._function is True:
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2022 Carnegie Mellon University
1
+ # Copyright 2018-2024 Carnegie Mellon University
2
2
 
3
3
  from setuptools import setup
4
4
  from pyactup import __version__
@@ -10,7 +10,7 @@ setup(name="pyactup",
10
10
  description="A lightweight Python implementation of a subset of the ACT-R cognitive architecture’s Declarative Memory",
11
11
  author="Don Morrison",
12
12
  author_email="dfm2@cmu.edu",
13
- url="https://bitbucket.org/dfmorrison/pyactup/",
13
+ url="https://dfmorrison.github.io/pyactup-documentation/",
14
14
  platforms=["any"],
15
15
  long_description=DESCRIPTION,
16
16
  long_description_content_type="text/markdown",
@@ -18,7 +18,8 @@ setup(name="pyactup",
18
18
  install_requires=[
19
19
  "numpy",
20
20
  "pylru",
21
- "prettytable"],
21
+ "prettytable",
22
+ "packaging"],
22
23
  tests_require=["pytest"],
23
24
  python_requires=">=3.8",
24
25
  classifiers=["Intended Audience :: Science/Research",
File without changes