hindsight-client 0.2.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. hindsight_client/__init__.py +9 -8
  2. hindsight_client/hindsight_client.py +396 -59
  3. {hindsight_client-0.2.1.dist-info → hindsight_client-0.4.0.dist-info}/METADATA +1 -1
  4. hindsight_client-0.4.0.dist-info/RECORD +89 -0
  5. hindsight_client_api/__init__.py +27 -0
  6. hindsight_client_api/api/__init__.py +2 -0
  7. hindsight_client_api/api/banks_api.py +1012 -131
  8. hindsight_client_api/api/directives_api.py +1619 -0
  9. hindsight_client_api/api/entities_api.py +29 -9
  10. hindsight_client_api/api/memory_api.py +713 -73
  11. hindsight_client_api/api/mental_models_api.py +1897 -0
  12. hindsight_client_api/api/monitoring_api.py +246 -0
  13. hindsight_client_api/api/operations_api.py +350 -4
  14. hindsight_client_api/models/__init__.py +25 -0
  15. hindsight_client_api/models/add_background_request.py +2 -2
  16. hindsight_client_api/models/async_operation_submit_response.py +89 -0
  17. hindsight_client_api/models/background_response.py +10 -3
  18. hindsight_client_api/models/bank_list_item.py +6 -6
  19. hindsight_client_api/models/bank_profile_response.py +11 -4
  20. hindsight_client_api/models/bank_stats_response.py +15 -4
  21. hindsight_client_api/models/consolidation_response.py +89 -0
  22. hindsight_client_api/models/create_bank_request.py +8 -1
  23. hindsight_client_api/models/create_directive_request.py +95 -0
  24. hindsight_client_api/models/create_mental_model_request.py +100 -0
  25. hindsight_client_api/models/create_mental_model_response.py +87 -0
  26. hindsight_client_api/models/directive_list_response.py +95 -0
  27. hindsight_client_api/models/directive_response.py +113 -0
  28. hindsight_client_api/models/document_response.py +5 -3
  29. hindsight_client_api/models/entity_list_response.py +9 -3
  30. hindsight_client_api/models/features_info.py +91 -0
  31. hindsight_client_api/models/graph_data_response.py +4 -2
  32. hindsight_client_api/models/list_tags_response.py +101 -0
  33. hindsight_client_api/models/memory_item.py +9 -2
  34. hindsight_client_api/models/mental_model_list_response.py +95 -0
  35. hindsight_client_api/models/mental_model_response.py +126 -0
  36. hindsight_client_api/models/mental_model_trigger.py +87 -0
  37. hindsight_client_api/models/operation_response.py +1 -1
  38. hindsight_client_api/models/operation_status_response.py +131 -0
  39. hindsight_client_api/models/operations_list_response.py +8 -2
  40. hindsight_client_api/models/recall_request.py +22 -3
  41. hindsight_client_api/models/recall_result.py +9 -2
  42. hindsight_client_api/models/reflect_based_on.py +115 -0
  43. hindsight_client_api/models/reflect_directive.py +91 -0
  44. hindsight_client_api/models/reflect_include_options.py +13 -2
  45. hindsight_client_api/models/reflect_llm_call.py +89 -0
  46. hindsight_client_api/models/reflect_mental_model.py +96 -0
  47. hindsight_client_api/models/reflect_request.py +22 -3
  48. hindsight_client_api/models/reflect_response.py +34 -11
  49. hindsight_client_api/models/reflect_tool_call.py +100 -0
  50. hindsight_client_api/models/reflect_trace.py +105 -0
  51. hindsight_client_api/models/retain_request.py +10 -3
  52. hindsight_client_api/models/retain_response.py +21 -3
  53. hindsight_client_api/models/tag_item.py +89 -0
  54. hindsight_client_api/models/token_usage.py +91 -0
  55. hindsight_client_api/models/tool_calls_include_options.py +87 -0
  56. hindsight_client_api/models/update_directive_request.py +120 -0
  57. hindsight_client_api/models/update_mental_model_request.py +125 -0
  58. hindsight_client_api/models/version_response.py +93 -0
  59. hindsight_client-0.2.1.dist-info/RECORD +0 -62
  60. {hindsight_client-0.2.1.dist-info → hindsight_client-0.4.0.dist-info}/WHEEL +0 -0
@@ -6,23 +6,23 @@ easy-to-use interface on top of the auto-generated OpenAPI client.
6
6
  """
7
7
 
8
8
  import asyncio
9
- from typing import Optional, List, Dict, Any
10
9
  from datetime import datetime
10
+ from typing import Any, Literal
11
11
 
12
12
  import hindsight_client_api
13
- from hindsight_client_api.api import memory_api, banks_api
13
+ from hindsight_client_api.api import banks_api, directives_api, memory_api, mental_models_api
14
14
  from hindsight_client_api.models import (
15
- recall_request,
16
- retain_request,
17
15
  memory_item,
16
+ recall_request,
18
17
  reflect_request,
18
+ retain_request,
19
19
  )
20
- from hindsight_client_api.models.retain_response import RetainResponse
20
+ from hindsight_client_api.models.bank_profile_response import BankProfileResponse
21
+ from hindsight_client_api.models.list_memory_units_response import ListMemoryUnitsResponse
21
22
  from hindsight_client_api.models.recall_response import RecallResponse
22
23
  from hindsight_client_api.models.recall_result import RecallResult
23
24
  from hindsight_client_api.models.reflect_response import ReflectResponse
24
- from hindsight_client_api.models.list_memory_units_response import ListMemoryUnitsResponse
25
- from hindsight_client_api.models.bank_profile_response import BankProfileResponse
25
+ from hindsight_client_api.models.retain_response import RetainResponse
26
26
 
27
27
 
28
28
  def _run_async(coro):
@@ -63,7 +63,7 @@ class Hindsight:
63
63
  ```
64
64
  """
65
65
 
66
- def __init__(self, base_url: str, api_key: Optional[str] = None, timeout: float = 30.0):
66
+ def __init__(self, base_url: str, api_key: str | None = None, timeout: float = 30.0):
67
67
  """
68
68
  Initialize the Hindsight client.
69
69
 
@@ -74,8 +74,12 @@ class Hindsight:
74
74
  """
75
75
  config = hindsight_client_api.Configuration(host=base_url, access_token=api_key)
76
76
  self._api_client = hindsight_client_api.ApiClient(config)
77
+ if api_key:
78
+ self._api_client.set_default_header("Authorization", f"Bearer {api_key}")
77
79
  self._memory_api = memory_api.MemoryApi(self._api_client)
78
80
  self._banks_api = banks_api.BanksApi(self._api_client)
81
+ self._mental_models_api = mental_models_api.MentalModelsApi(self._api_client)
82
+ self._directives_api = directives_api.DirectivesApi(self._api_client)
79
83
 
80
84
  def __enter__(self):
81
85
  """Context manager entry."""
@@ -108,11 +112,12 @@ class Hindsight:
108
112
  self,
109
113
  bank_id: str,
110
114
  content: str,
111
- timestamp: Optional[datetime] = None,
112
- context: Optional[str] = None,
113
- document_id: Optional[str] = None,
114
- metadata: Optional[Dict[str, str]] = None,
115
- entities: Optional[List[Dict[str, str]]] = None,
115
+ timestamp: datetime | None = None,
116
+ context: str | None = None,
117
+ document_id: str | None = None,
118
+ metadata: dict[str, str] | None = None,
119
+ entities: list[dict[str, str]] | None = None,
120
+ tags: list[str] | None = None,
116
121
  ) -> RetainResponse:
117
122
  """
118
123
  Store a single memory (simplified interface).
@@ -125,21 +130,32 @@ class Hindsight:
125
130
  document_id: Optional document ID for grouping
126
131
  metadata: Optional user-defined metadata
127
132
  entities: Optional list of entities [{"text": "...", "type": "..."}]
133
+ tags: Optional list of tags for filtering memories during recall/reflect
128
134
 
129
135
  Returns:
130
136
  RetainResponse with success status
131
137
  """
132
138
  return self.retain_batch(
133
139
  bank_id=bank_id,
134
- items=[{"content": content, "timestamp": timestamp, "context": context, "metadata": metadata, "entities": entities}],
140
+ items=[
141
+ {
142
+ "content": content,
143
+ "timestamp": timestamp,
144
+ "context": context,
145
+ "metadata": metadata,
146
+ "entities": entities,
147
+ "tags": tags,
148
+ }
149
+ ],
135
150
  document_id=document_id,
136
151
  )
137
152
 
138
153
  def retain_batch(
139
154
  self,
140
155
  bank_id: str,
141
- items: List[Dict[str, Any]],
142
- document_id: Optional[str] = None,
156
+ items: list[dict[str, Any]],
157
+ document_id: str | None = None,
158
+ document_tags: list[str] | None = None,
143
159
  retain_async: bool = False,
144
160
  ) -> RetainResponse:
145
161
  """
@@ -147,8 +163,9 @@ class Hindsight:
147
163
 
148
164
  Args:
149
165
  bank_id: The memory bank ID
150
- items: List of memory items with 'content' and optional 'timestamp', 'context', 'metadata', 'document_id', 'entities'
166
+ items: List of memory items with 'content' and optional 'timestamp', 'context', 'metadata', 'document_id', 'entities', 'tags'
151
167
  document_id: Optional document ID for grouping memories (applied to items that don't have their own)
168
+ document_tags: Optional list of tags applied to all items in this batch (merged with per-item tags)
152
169
  retain_async: If True, process asynchronously in background (default: False)
153
170
 
154
171
  Returns:
@@ -160,10 +177,7 @@ class Hindsight:
160
177
  for item in items:
161
178
  entities = None
162
179
  if item.get("entities"):
163
- entities = [
164
- EntityInput(text=e["text"], type=e.get("type"))
165
- for e in item["entities"]
166
- ]
180
+ entities = [EntityInput(text=e["text"], type=e.get("type")) for e in item["entities"]]
167
181
  memory_items.append(
168
182
  memory_item.MemoryItem(
169
183
  content=item["content"],
@@ -173,12 +187,14 @@ class Hindsight:
173
187
  # Use item's document_id if provided, otherwise fall back to batch-level document_id
174
188
  document_id=item.get("document_id") or document_id,
175
189
  entities=entities,
190
+ tags=item.get("tags"),
176
191
  )
177
192
  )
178
193
 
179
194
  request_obj = retain_request.RetainRequest(
180
195
  items=memory_items,
181
196
  async_=retain_async,
197
+ document_tags=document_tags,
182
198
  )
183
199
 
184
200
  return _run_async(self._memory_api.retain_memories(bank_id, request_obj))
@@ -187,15 +203,17 @@ class Hindsight:
187
203
  self,
188
204
  bank_id: str,
189
205
  query: str,
190
- types: Optional[List[str]] = None,
206
+ types: list[str] | None = None,
191
207
  max_tokens: int = 4096,
192
208
  budget: str = "mid",
193
209
  trace: bool = False,
194
- query_timestamp: Optional[str] = None,
210
+ query_timestamp: str | None = None,
195
211
  include_entities: bool = False,
196
212
  max_entity_tokens: int = 500,
197
213
  include_chunks: bool = False,
198
214
  max_chunk_tokens: int = 8192,
215
+ tags: list[str] | None = None,
216
+ tags_match: Literal["any", "all", "any_strict", "all_strict"] = "any",
199
217
  ) -> RecallResponse:
200
218
  """
201
219
  Recall memories using semantic similarity.
@@ -212,14 +230,19 @@ class Hindsight:
212
230
  max_entity_tokens: Maximum tokens for entity observations (default: 500)
213
231
  include_chunks: Include raw text chunks in results (default: False)
214
232
  max_chunk_tokens: Maximum tokens for chunks (default: 8192)
233
+ tags: Optional list of tags to filter memories by
234
+ tags_match: How to match tags - "any" (OR, includes untagged), "all" (AND, includes untagged),
235
+ "any_strict" (OR, excludes untagged), "all_strict" (AND, excludes untagged). Default: "any"
215
236
 
216
237
  Returns:
217
238
  RecallResponse with results, optional entities, optional chunks, and optional trace
218
239
  """
219
- from hindsight_client_api.models import include_options, entity_include_options, chunk_include_options
240
+ from hindsight_client_api.models import chunk_include_options, entity_include_options, include_options
220
241
 
221
242
  include_opts = include_options.IncludeOptions(
222
- entities=entity_include_options.EntityIncludeOptions(max_tokens=max_entity_tokens) if include_entities else None,
243
+ entities=entity_include_options.EntityIncludeOptions(max_tokens=max_entity_tokens)
244
+ if include_entities
245
+ else None,
223
246
  chunks=chunk_include_options.ChunkIncludeOptions(max_tokens=max_chunk_tokens) if include_chunks else None,
224
247
  )
225
248
 
@@ -231,6 +254,8 @@ class Hindsight:
231
254
  trace=trace,
232
255
  query_timestamp=query_timestamp,
233
256
  include=include_opts,
257
+ tags=tags,
258
+ tags_match=tags_match,
234
259
  )
235
260
 
236
261
  return _run_async(self._memory_api.recall_memories(bank_id, request_obj))
@@ -240,9 +265,11 @@ class Hindsight:
240
265
  bank_id: str,
241
266
  query: str,
242
267
  budget: str = "low",
243
- context: Optional[str] = None,
244
- max_tokens: Optional[int] = None,
245
- response_schema: Optional[Dict[str, Any]] = None,
268
+ context: str | None = None,
269
+ max_tokens: int | None = None,
270
+ response_schema: dict[str, Any] | None = None,
271
+ tags: list[str] | None = None,
272
+ tags_match: Literal["any", "all", "any_strict", "all_strict"] = "any",
246
273
  ) -> ReflectResponse:
247
274
  """
248
275
  Generate a contextual answer based on bank identity and memories.
@@ -256,6 +283,9 @@ class Hindsight:
256
283
  response_schema: Optional JSON Schema for structured output. When provided,
257
284
  the response will include a 'structured_output' field with the LLM
258
285
  response parsed according to this schema.
286
+ tags: Optional list of tags to filter memories by
287
+ tags_match: How to match tags - "any" (OR, includes untagged), "all" (AND, includes untagged),
288
+ "any_strict" (OR, excludes untagged), "all_strict" (AND, excludes untagged). Default: "any"
259
289
 
260
290
  Returns:
261
291
  ReflectResponse with answer text, optionally facts used, and optionally
@@ -267,6 +297,8 @@ class Hindsight:
267
297
  context=context,
268
298
  max_tokens=max_tokens,
269
299
  response_schema=response_schema,
300
+ tags=tags,
301
+ tags_match=tags_match,
270
302
  )
271
303
 
272
304
  return _run_async(self._memory_api.reflect(bank_id, request_obj))
@@ -274,28 +306,37 @@ class Hindsight:
274
306
  def list_memories(
275
307
  self,
276
308
  bank_id: str,
277
- type: Optional[str] = None,
278
- search_query: Optional[str] = None,
309
+ type: str | None = None,
310
+ search_query: str | None = None,
279
311
  limit: int = 100,
280
312
  offset: int = 0,
281
313
  ) -> ListMemoryUnitsResponse:
282
314
  """List memory units with pagination."""
283
- return _run_async(self._memory_api.list_memories(
284
- bank_id=bank_id,
285
- type=type,
286
- q=search_query,
287
- limit=limit,
288
- offset=offset,
289
- ))
315
+ return _run_async(
316
+ self._memory_api.list_memories(
317
+ bank_id=bank_id,
318
+ type=type,
319
+ q=search_query,
320
+ limit=limit,
321
+ offset=offset,
322
+ )
323
+ )
290
324
 
291
325
  def create_bank(
292
326
  self,
293
327
  bank_id: str,
294
- name: Optional[str] = None,
295
- background: Optional[str] = None,
296
- disposition: Optional[Dict[str, float]] = None,
328
+ name: str | None = None,
329
+ mission: str | None = None,
330
+ disposition: dict[str, float] | None = None,
297
331
  ) -> BankProfileResponse:
298
- """Create or update a memory bank."""
332
+ """Create or update a memory bank.
333
+
334
+ Args:
335
+ bank_id: Unique identifier for the bank
336
+ name: Human-readable display name
337
+ mission: Instructions guiding what Hindsight should learn and remember (for mental models)
338
+ disposition: Optional disposition traits (skepticism, literalism, empathy)
339
+ """
299
340
  from hindsight_client_api.models import create_bank_request, disposition_traits
300
341
 
301
342
  disposition_obj = None
@@ -304,19 +345,40 @@ class Hindsight:
304
345
 
305
346
  request_obj = create_bank_request.CreateBankRequest(
306
347
  name=name,
307
- background=background,
348
+ mission=mission,
308
349
  disposition=disposition_obj,
309
350
  )
310
351
 
311
352
  return _run_async(self._banks_api.create_or_update_bank(bank_id, request_obj))
312
353
 
354
+ def set_mission(
355
+ self,
356
+ bank_id: str,
357
+ mission: str,
358
+ ) -> BankProfileResponse:
359
+ """
360
+ Set or update the mission for a memory bank.
361
+
362
+ Args:
363
+ bank_id: The memory bank ID
364
+ mission: The mission text describing the agent's purpose
365
+
366
+ Returns:
367
+ BankProfileResponse with updated bank profile
368
+ """
369
+ from hindsight_client_api.models import create_bank_request
370
+
371
+ request_obj = create_bank_request.CreateBankRequest(mission=mission)
372
+ return _run_async(self._banks_api.create_or_update_bank(bank_id, request_obj))
373
+
313
374
  # Async methods (native async, no _run_async wrapper)
314
375
 
315
376
  async def aretain_batch(
316
377
  self,
317
378
  bank_id: str,
318
- items: List[Dict[str, Any]],
319
- document_id: Optional[str] = None,
379
+ items: list[dict[str, Any]],
380
+ document_id: str | None = None,
381
+ document_tags: list[str] | None = None,
320
382
  retain_async: bool = False,
321
383
  ) -> RetainResponse:
322
384
  """
@@ -324,8 +386,9 @@ class Hindsight:
324
386
 
325
387
  Args:
326
388
  bank_id: The memory bank ID
327
- items: List of memory items with 'content' and optional 'timestamp', 'context', 'metadata', 'document_id', 'entities'
389
+ items: List of memory items with 'content' and optional 'timestamp', 'context', 'metadata', 'document_id', 'entities', 'tags'
328
390
  document_id: Optional document ID for grouping memories (applied to items that don't have their own)
391
+ document_tags: Optional list of tags applied to all items in this batch (merged with per-item tags)
329
392
  retain_async: If True, process asynchronously in background (default: False)
330
393
 
331
394
  Returns:
@@ -337,10 +400,7 @@ class Hindsight:
337
400
  for item in items:
338
401
  entities = None
339
402
  if item.get("entities"):
340
- entities = [
341
- EntityInput(text=e["text"], type=e.get("type"))
342
- for e in item["entities"]
343
- ]
403
+ entities = [EntityInput(text=e["text"], type=e.get("type")) for e in item["entities"]]
344
404
  memory_items.append(
345
405
  memory_item.MemoryItem(
346
406
  content=item["content"],
@@ -350,12 +410,14 @@ class Hindsight:
350
410
  # Use item's document_id if provided, otherwise fall back to batch-level document_id
351
411
  document_id=item.get("document_id") or document_id,
352
412
  entities=entities,
413
+ tags=item.get("tags"),
353
414
  )
354
415
  )
355
416
 
356
417
  request_obj = retain_request.RetainRequest(
357
418
  items=memory_items,
358
419
  async_=retain_async,
420
+ document_tags=document_tags,
359
421
  )
360
422
 
361
423
  return await self._memory_api.retain_memories(bank_id, request_obj)
@@ -364,11 +426,12 @@ class Hindsight:
364
426
  self,
365
427
  bank_id: str,
366
428
  content: str,
367
- timestamp: Optional[datetime] = None,
368
- context: Optional[str] = None,
369
- document_id: Optional[str] = None,
370
- metadata: Optional[Dict[str, str]] = None,
371
- entities: Optional[List[Dict[str, str]]] = None,
429
+ timestamp: datetime | None = None,
430
+ context: str | None = None,
431
+ document_id: str | None = None,
432
+ metadata: dict[str, str] | None = None,
433
+ entities: list[dict[str, str]] | None = None,
434
+ tags: list[str] | None = None,
372
435
  ) -> RetainResponse:
373
436
  """
374
437
  Store a single memory (async).
@@ -381,13 +444,23 @@ class Hindsight:
381
444
  document_id: Optional document ID for grouping
382
445
  metadata: Optional user-defined metadata
383
446
  entities: Optional list of entities [{"text": "...", "type": "..."}]
447
+ tags: Optional list of tags for filtering memories during recall/reflect
384
448
 
385
449
  Returns:
386
450
  RetainResponse with success status
387
451
  """
388
452
  return await self.aretain_batch(
389
453
  bank_id=bank_id,
390
- items=[{"content": content, "timestamp": timestamp, "context": context, "metadata": metadata, "entities": entities}],
454
+ items=[
455
+ {
456
+ "content": content,
457
+ "timestamp": timestamp,
458
+ "context": context,
459
+ "metadata": metadata,
460
+ "entities": entities,
461
+ "tags": tags,
462
+ }
463
+ ],
391
464
  document_id=document_id,
392
465
  )
393
466
 
@@ -395,10 +468,12 @@ class Hindsight:
395
468
  self,
396
469
  bank_id: str,
397
470
  query: str,
398
- types: Optional[List[str]] = None,
471
+ types: list[str] | None = None,
399
472
  max_tokens: int = 4096,
400
473
  budget: str = "mid",
401
- ) -> List[RecallResult]:
474
+ tags: list[str] | None = None,
475
+ tags_match: Literal["any", "all", "any_strict", "all_strict"] = "any",
476
+ ) -> list[RecallResult]:
402
477
  """
403
478
  Recall memories using semantic similarity (async).
404
479
 
@@ -408,6 +483,9 @@ class Hindsight:
408
483
  types: Optional list of fact types to filter (world, experience, opinion, observation)
409
484
  max_tokens: Maximum tokens in results (default: 4096)
410
485
  budget: Budget level for recall - "low", "mid", or "high" (default: "mid")
486
+ tags: Optional list of tags to filter memories by
487
+ tags_match: How to match tags - "any" (OR, includes untagged), "all" (AND, includes untagged),
488
+ "any_strict" (OR, excludes untagged), "all_strict" (AND, excludes untagged). Default: "any"
411
489
 
412
490
  Returns:
413
491
  List of RecallResult objects
@@ -418,17 +496,21 @@ class Hindsight:
418
496
  budget=budget,
419
497
  max_tokens=max_tokens,
420
498
  trace=False,
499
+ tags=tags,
500
+ tags_match=tags_match,
421
501
  )
422
502
 
423
503
  response = await self._memory_api.recall_memories(bank_id, request_obj)
424
- return response.results if hasattr(response, 'results') else []
504
+ return response.results if hasattr(response, "results") else []
425
505
 
426
506
  async def areflect(
427
507
  self,
428
508
  bank_id: str,
429
509
  query: str,
430
510
  budget: str = "low",
431
- context: Optional[str] = None,
511
+ context: str | None = None,
512
+ tags: list[str] | None = None,
513
+ tags_match: Literal["any", "all", "any_strict", "all_strict"] = "any",
432
514
  ) -> ReflectResponse:
433
515
  """
434
516
  Generate a contextual answer based on bank identity and memories (async).
@@ -438,6 +520,9 @@ class Hindsight:
438
520
  query: The question or prompt
439
521
  budget: Budget level for reflection - "low", "mid", or "high" (default: "low")
440
522
  context: Optional additional context
523
+ tags: Optional list of tags to filter memories by
524
+ tags_match: How to match tags - "any" (OR, includes untagged), "all" (AND, includes untagged),
525
+ "any_strict" (OR, excludes untagged), "all_strict" (AND, excludes untagged). Default: "any"
441
526
 
442
527
  Returns:
443
528
  ReflectResponse with answer text and optionally facts used
@@ -446,6 +531,258 @@ class Hindsight:
446
531
  query=query,
447
532
  budget=budget,
448
533
  context=context,
534
+ tags=tags,
535
+ tags_match=tags_match,
449
536
  )
450
537
 
451
538
  return await self._memory_api.reflect(bank_id, request_obj)
539
+
540
+ # Mental Models methods
541
+
542
+ def create_mental_model(
543
+ self,
544
+ bank_id: str,
545
+ name: str,
546
+ source_query: str,
547
+ tags: list[str] | None = None,
548
+ max_tokens: int | None = None,
549
+ trigger: dict[str, Any] | None = None,
550
+ ):
551
+ """
552
+ Create a mental model (runs reflect in background).
553
+
554
+ Args:
555
+ bank_id: The memory bank ID
556
+ name: Human-readable name for the mental model
557
+ source_query: The query to run to generate content
558
+ tags: Optional tags for filtering during retrieval
559
+ max_tokens: Optional maximum tokens for the mental model content
560
+ trigger: Optional trigger settings (e.g., {"refresh_after_consolidation": True})
561
+
562
+ Returns:
563
+ CreateMentalModelResponse with operation_id
564
+ """
565
+ from hindsight_client_api.models import create_mental_model_request, mental_model_trigger
566
+
567
+ trigger_obj = None
568
+ if trigger:
569
+ trigger_obj = mental_model_trigger.MentalModelTrigger(**trigger)
570
+
571
+ request_obj = create_mental_model_request.CreateMentalModelRequest(
572
+ name=name,
573
+ source_query=source_query,
574
+ tags=tags,
575
+ max_tokens=max_tokens,
576
+ trigger=trigger_obj,
577
+ )
578
+
579
+ return _run_async(self._mental_models_api.create_mental_model(bank_id, request_obj))
580
+
581
+ def list_mental_models(self, bank_id: str, tags: list[str] | None = None):
582
+ """
583
+ List all mental models in a bank.
584
+
585
+ Args:
586
+ bank_id: The memory bank ID
587
+ tags: Optional tags to filter by
588
+
589
+ Returns:
590
+ ListMentalModelsResponse with items
591
+ """
592
+ return _run_async(self._mental_models_api.list_mental_models(bank_id, tags=tags))
593
+
594
+ def get_mental_model(self, bank_id: str, mental_model_id: str):
595
+ """
596
+ Get a specific mental model.
597
+
598
+ Args:
599
+ bank_id: The memory bank ID
600
+ mental_model_id: The mental model ID
601
+
602
+ Returns:
603
+ MentalModelResponse
604
+ """
605
+ return _run_async(self._mental_models_api.get_mental_model(bank_id, mental_model_id))
606
+
607
+ def refresh_mental_model(self, bank_id: str, mental_model_id: str):
608
+ """
609
+ Refresh a mental model to update with current knowledge.
610
+
611
+ Args:
612
+ bank_id: The memory bank ID
613
+ mental_model_id: The mental model ID
614
+
615
+ Returns:
616
+ RefreshMentalModelResponse with operation_id
617
+ """
618
+ return _run_async(self._mental_models_api.refresh_mental_model(bank_id, mental_model_id))
619
+
620
+ def update_mental_model(
621
+ self,
622
+ bank_id: str,
623
+ mental_model_id: str,
624
+ name: str | None = None,
625
+ source_query: str | None = None,
626
+ tags: list[str] | None = None,
627
+ max_tokens: int | None = None,
628
+ trigger: dict[str, Any] | None = None,
629
+ ):
630
+ """
631
+ Update a mental model's metadata.
632
+
633
+ Args:
634
+ bank_id: The memory bank ID
635
+ mental_model_id: The mental model ID
636
+ name: Optional new name
637
+ source_query: Optional new source query
638
+ tags: Optional new tags
639
+ max_tokens: Optional new max tokens
640
+ trigger: Optional trigger settings (e.g., {"refresh_after_consolidation": True})
641
+
642
+ Returns:
643
+ MentalModelResponse
644
+ """
645
+ from hindsight_client_api.models import mental_model_trigger, update_mental_model_request
646
+
647
+ trigger_obj = None
648
+ if trigger:
649
+ trigger_obj = mental_model_trigger.MentalModelTrigger(**trigger)
650
+
651
+ request_obj = update_mental_model_request.UpdateMentalModelRequest(
652
+ name=name,
653
+ source_query=source_query,
654
+ tags=tags,
655
+ max_tokens=max_tokens,
656
+ trigger=trigger_obj,
657
+ )
658
+
659
+ return _run_async(self._mental_models_api.update_mental_model(bank_id, mental_model_id, request_obj))
660
+
661
+ def delete_mental_model(self, bank_id: str, mental_model_id: str):
662
+ """
663
+ Delete a mental model.
664
+
665
+ Args:
666
+ bank_id: The memory bank ID
667
+ mental_model_id: The mental model ID
668
+ """
669
+ return _run_async(self._mental_models_api.delete_mental_model(bank_id, mental_model_id))
670
+
671
+ # Directives methods
672
+
673
+ def create_directive(
674
+ self,
675
+ bank_id: str,
676
+ name: str,
677
+ content: str,
678
+ priority: int = 0,
679
+ is_active: bool = True,
680
+ tags: list[str] | None = None,
681
+ ):
682
+ """
683
+ Create a directive (hard rule for reflect).
684
+
685
+ Args:
686
+ bank_id: The memory bank ID
687
+ name: Human-readable name for the directive
688
+ content: The directive content/rules
689
+ priority: Priority level (higher = injected first)
690
+ is_active: Whether the directive is active
691
+ tags: Optional tags for filtering
692
+
693
+ Returns:
694
+ DirectiveResponse
695
+ """
696
+ from hindsight_client_api.models import create_directive_request
697
+
698
+ request_obj = create_directive_request.CreateDirectiveRequest(
699
+ name=name,
700
+ content=content,
701
+ priority=priority,
702
+ is_active=is_active,
703
+ tags=tags,
704
+ )
705
+
706
+ return _run_async(self._directives_api.create_directive(bank_id, request_obj))
707
+
708
+ def list_directives(self, bank_id: str, tags: list[str] | None = None):
709
+ """
710
+ List all directives in a bank.
711
+
712
+ Args:
713
+ bank_id: The memory bank ID
714
+ tags: Optional tags to filter by
715
+
716
+ Returns:
717
+ ListDirectivesResponse with items
718
+ """
719
+ return _run_async(self._directives_api.list_directives(bank_id, tags=tags))
720
+
721
+ def get_directive(self, bank_id: str, directive_id: str):
722
+ """
723
+ Get a specific directive.
724
+
725
+ Args:
726
+ bank_id: The memory bank ID
727
+ directive_id: The directive ID
728
+
729
+ Returns:
730
+ DirectiveResponse
731
+ """
732
+ return _run_async(self._directives_api.get_directive(bank_id, directive_id))
733
+
734
+ def update_directive(
735
+ self,
736
+ bank_id: str,
737
+ directive_id: str,
738
+ name: str | None = None,
739
+ content: str | None = None,
740
+ priority: int | None = None,
741
+ is_active: bool | None = None,
742
+ tags: list[str] | None = None,
743
+ ):
744
+ """
745
+ Update a directive.
746
+
747
+ Args:
748
+ bank_id: The memory bank ID
749
+ directive_id: The directive ID
750
+ name: Optional new name
751
+ content: Optional new content
752
+ priority: Optional new priority
753
+ is_active: Optional new active status
754
+ tags: Optional new tags
755
+
756
+ Returns:
757
+ DirectiveResponse
758
+ """
759
+ from hindsight_client_api.models import update_directive_request
760
+
761
+ request_obj = update_directive_request.UpdateDirectiveRequest(
762
+ name=name,
763
+ content=content,
764
+ priority=priority,
765
+ is_active=is_active,
766
+ tags=tags,
767
+ )
768
+
769
+ return _run_async(self._directives_api.update_directive(bank_id, directive_id, request_obj))
770
+
771
+ def delete_directive(self, bank_id: str, directive_id: str):
772
+ """
773
+ Delete a directive.
774
+
775
+ Args:
776
+ bank_id: The memory bank ID
777
+ directive_id: The directive ID
778
+ """
779
+ return _run_async(self._directives_api.delete_directive(bank_id, directive_id))
780
+
781
+ def delete_bank(self, bank_id: str):
782
+ """
783
+ Delete a memory bank.
784
+
785
+ Args:
786
+ bank_id: The memory bank ID
787
+ """
788
+ return _run_async(self._banks_api.delete_bank(bank_id))