tactus 0.31.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. tactus/__init__.py +49 -0
  2. tactus/adapters/__init__.py +9 -0
  3. tactus/adapters/broker_log.py +76 -0
  4. tactus/adapters/cli_hitl.py +189 -0
  5. tactus/adapters/cli_log.py +223 -0
  6. tactus/adapters/cost_collector_log.py +56 -0
  7. tactus/adapters/file_storage.py +367 -0
  8. tactus/adapters/http_callback_log.py +109 -0
  9. tactus/adapters/ide_log.py +71 -0
  10. tactus/adapters/lua_tools.py +336 -0
  11. tactus/adapters/mcp.py +289 -0
  12. tactus/adapters/mcp_manager.py +196 -0
  13. tactus/adapters/memory.py +53 -0
  14. tactus/adapters/plugins.py +419 -0
  15. tactus/backends/http_backend.py +58 -0
  16. tactus/backends/model_backend.py +35 -0
  17. tactus/backends/pytorch_backend.py +110 -0
  18. tactus/broker/__init__.py +12 -0
  19. tactus/broker/client.py +247 -0
  20. tactus/broker/protocol.py +183 -0
  21. tactus/broker/server.py +1123 -0
  22. tactus/broker/stdio.py +12 -0
  23. tactus/cli/__init__.py +7 -0
  24. tactus/cli/app.py +2245 -0
  25. tactus/cli/commands/__init__.py +0 -0
  26. tactus/core/__init__.py +32 -0
  27. tactus/core/config_manager.py +790 -0
  28. tactus/core/dependencies/__init__.py +14 -0
  29. tactus/core/dependencies/registry.py +180 -0
  30. tactus/core/dsl_stubs.py +2117 -0
  31. tactus/core/exceptions.py +66 -0
  32. tactus/core/execution_context.py +480 -0
  33. tactus/core/lua_sandbox.py +508 -0
  34. tactus/core/message_history_manager.py +236 -0
  35. tactus/core/mocking.py +286 -0
  36. tactus/core/output_validator.py +291 -0
  37. tactus/core/registry.py +499 -0
  38. tactus/core/runtime.py +2907 -0
  39. tactus/core/template_resolver.py +142 -0
  40. tactus/core/yaml_parser.py +301 -0
  41. tactus/docker/Dockerfile +61 -0
  42. tactus/docker/entrypoint.sh +69 -0
  43. tactus/dspy/__init__.py +39 -0
  44. tactus/dspy/agent.py +1144 -0
  45. tactus/dspy/broker_lm.py +181 -0
  46. tactus/dspy/config.py +212 -0
  47. tactus/dspy/history.py +196 -0
  48. tactus/dspy/module.py +405 -0
  49. tactus/dspy/prediction.py +318 -0
  50. tactus/dspy/signature.py +185 -0
  51. tactus/formatting/__init__.py +7 -0
  52. tactus/formatting/formatter.py +437 -0
  53. tactus/ide/__init__.py +9 -0
  54. tactus/ide/coding_assistant.py +343 -0
  55. tactus/ide/server.py +2223 -0
  56. tactus/primitives/__init__.py +49 -0
  57. tactus/primitives/control.py +168 -0
  58. tactus/primitives/file.py +229 -0
  59. tactus/primitives/handles.py +378 -0
  60. tactus/primitives/host.py +94 -0
  61. tactus/primitives/human.py +342 -0
  62. tactus/primitives/json.py +189 -0
  63. tactus/primitives/log.py +187 -0
  64. tactus/primitives/message_history.py +157 -0
  65. tactus/primitives/model.py +163 -0
  66. tactus/primitives/procedure.py +564 -0
  67. tactus/primitives/procedure_callable.py +318 -0
  68. tactus/primitives/retry.py +155 -0
  69. tactus/primitives/session.py +152 -0
  70. tactus/primitives/state.py +182 -0
  71. tactus/primitives/step.py +209 -0
  72. tactus/primitives/system.py +93 -0
  73. tactus/primitives/tool.py +375 -0
  74. tactus/primitives/tool_handle.py +279 -0
  75. tactus/primitives/toolset.py +229 -0
  76. tactus/protocols/__init__.py +38 -0
  77. tactus/protocols/chat_recorder.py +81 -0
  78. tactus/protocols/config.py +97 -0
  79. tactus/protocols/cost.py +31 -0
  80. tactus/protocols/hitl.py +71 -0
  81. tactus/protocols/log_handler.py +27 -0
  82. tactus/protocols/models.py +355 -0
  83. tactus/protocols/result.py +33 -0
  84. tactus/protocols/storage.py +90 -0
  85. tactus/providers/__init__.py +13 -0
  86. tactus/providers/base.py +92 -0
  87. tactus/providers/bedrock.py +117 -0
  88. tactus/providers/google.py +105 -0
  89. tactus/providers/openai.py +98 -0
  90. tactus/sandbox/__init__.py +63 -0
  91. tactus/sandbox/config.py +171 -0
  92. tactus/sandbox/container_runner.py +1099 -0
  93. tactus/sandbox/docker_manager.py +433 -0
  94. tactus/sandbox/entrypoint.py +227 -0
  95. tactus/sandbox/protocol.py +213 -0
  96. tactus/stdlib/__init__.py +10 -0
  97. tactus/stdlib/io/__init__.py +13 -0
  98. tactus/stdlib/io/csv.py +88 -0
  99. tactus/stdlib/io/excel.py +136 -0
  100. tactus/stdlib/io/file.py +90 -0
  101. tactus/stdlib/io/fs.py +154 -0
  102. tactus/stdlib/io/hdf5.py +121 -0
  103. tactus/stdlib/io/json.py +109 -0
  104. tactus/stdlib/io/parquet.py +83 -0
  105. tactus/stdlib/io/tsv.py +88 -0
  106. tactus/stdlib/loader.py +274 -0
  107. tactus/stdlib/tac/tactus/tools/done.tac +33 -0
  108. tactus/stdlib/tac/tactus/tools/log.tac +50 -0
  109. tactus/testing/README.md +273 -0
  110. tactus/testing/__init__.py +61 -0
  111. tactus/testing/behave_integration.py +380 -0
  112. tactus/testing/context.py +486 -0
  113. tactus/testing/eval_models.py +114 -0
  114. tactus/testing/evaluation_runner.py +222 -0
  115. tactus/testing/evaluators.py +634 -0
  116. tactus/testing/events.py +94 -0
  117. tactus/testing/gherkin_parser.py +134 -0
  118. tactus/testing/mock_agent.py +315 -0
  119. tactus/testing/mock_dependencies.py +234 -0
  120. tactus/testing/mock_hitl.py +171 -0
  121. tactus/testing/mock_registry.py +168 -0
  122. tactus/testing/mock_tools.py +133 -0
  123. tactus/testing/models.py +115 -0
  124. tactus/testing/pydantic_eval_runner.py +508 -0
  125. tactus/testing/steps/__init__.py +13 -0
  126. tactus/testing/steps/builtin.py +902 -0
  127. tactus/testing/steps/custom.py +69 -0
  128. tactus/testing/steps/registry.py +68 -0
  129. tactus/testing/test_runner.py +489 -0
  130. tactus/tracing/__init__.py +5 -0
  131. tactus/tracing/trace_manager.py +417 -0
  132. tactus/utils/__init__.py +1 -0
  133. tactus/utils/cost_calculator.py +72 -0
  134. tactus/utils/model_pricing.py +132 -0
  135. tactus/utils/safe_file_library.py +502 -0
  136. tactus/utils/safe_libraries.py +234 -0
  137. tactus/validation/LuaLexerBase.py +66 -0
  138. tactus/validation/LuaParserBase.py +23 -0
  139. tactus/validation/README.md +224 -0
  140. tactus/validation/__init__.py +7 -0
  141. tactus/validation/error_listener.py +21 -0
  142. tactus/validation/generated/LuaLexer.interp +231 -0
  143. tactus/validation/generated/LuaLexer.py +5548 -0
  144. tactus/validation/generated/LuaLexer.tokens +124 -0
  145. tactus/validation/generated/LuaLexerBase.py +66 -0
  146. tactus/validation/generated/LuaParser.interp +173 -0
  147. tactus/validation/generated/LuaParser.py +6439 -0
  148. tactus/validation/generated/LuaParser.tokens +124 -0
  149. tactus/validation/generated/LuaParserBase.py +23 -0
  150. tactus/validation/generated/LuaParserVisitor.py +118 -0
  151. tactus/validation/generated/__init__.py +7 -0
  152. tactus/validation/grammar/LuaLexer.g4 +123 -0
  153. tactus/validation/grammar/LuaParser.g4 +178 -0
  154. tactus/validation/semantic_visitor.py +817 -0
  155. tactus/validation/validator.py +157 -0
  156. tactus-0.31.2.dist-info/METADATA +1809 -0
  157. tactus-0.31.2.dist-info/RECORD +160 -0
  158. tactus-0.31.2.dist-info/WHEEL +4 -0
  159. tactus-0.31.2.dist-info/entry_points.txt +2 -0
  160. tactus-0.31.2.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,181 @@
1
+ """
2
+ DSPy LM implementation backed by the local Tactus broker.
3
+
4
+ This allows the runtime container to be:
5
+ - networkless (`--network none`)
6
+ - secretless (no API keys in env/mounts/request payload)
7
+
8
+ while still supporting streaming via DSPy's `streamify()` mechanism.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from typing import Any
14
+
15
+ import dspy
16
+ import litellm
17
+ from asyncer import syncify
18
+ from litellm import ModelResponse, ModelResponseStream
19
+
20
+ from tactus.broker.client import BrokerClient
21
+
22
+
23
+ def _split_provider_model(model: str) -> tuple[str, str]:
24
+ if "/" not in model:
25
+ raise ValueError(f"Invalid model format: {model}. Expected 'provider/model'.")
26
+ provider, model_id = model.split("/", 1)
27
+ return provider, model_id
28
+
29
+
30
+ class BrokeredLM(dspy.BaseLM):
31
+ """
32
+ A DSPy-compatible LM that delegates completion calls to the broker.
33
+
34
+ The broker connection is configured via `TACTUS_BROKER_SOCKET`.
35
+ """
36
+
37
+ def __init__(
38
+ self,
39
+ model: str,
40
+ *,
41
+ model_type: str = "chat",
42
+ temperature: float | None = None,
43
+ max_tokens: int | None = None,
44
+ cache: bool | None = None,
45
+ socket_path: str | None = None,
46
+ **kwargs: Any,
47
+ ):
48
+ if model_type != "chat":
49
+ raise ValueError("BrokeredLM currently supports only model_type='chat'")
50
+
51
+ super().__init__(
52
+ model=model,
53
+ model_type=model_type,
54
+ temperature=temperature if temperature is not None else 0.7,
55
+ max_tokens=max_tokens if max_tokens is not None else 1000,
56
+ cache=False,
57
+ **kwargs,
58
+ )
59
+
60
+ if socket_path is not None:
61
+ self._client = BrokerClient(socket_path)
62
+ return
63
+
64
+ env_client = BrokerClient.from_environment()
65
+ if env_client is None:
66
+ raise RuntimeError("BrokerClient not configured (TACTUS_BROKER_SOCKET is missing)")
67
+ self._client = env_client
68
+
69
+ def forward(
70
+ self, prompt: str | None = None, messages: list[dict[str, Any]] | None = None, **kwargs: Any
71
+ ):
72
+ return syncify(self.aforward)(prompt=prompt, messages=messages, **kwargs)
73
+
74
+ async def aforward(
75
+ self, prompt: str | None = None, messages: list[dict[str, Any]] | None = None, **kwargs: Any
76
+ ):
77
+ provider, model_id = _split_provider_model(self.model)
78
+
79
+ if provider != "openai":
80
+ raise ValueError(
81
+ f"BrokeredLM only supports provider 'openai' for now (got {provider!r})"
82
+ )
83
+
84
+ if messages is None:
85
+ if prompt is None:
86
+ messages = []
87
+ else:
88
+ messages = [{"role": "user", "content": prompt}]
89
+
90
+ merged_kwargs = {**self.kwargs, **kwargs}
91
+ temperature = merged_kwargs.get("temperature")
92
+
93
+ # DSPy uses `max_tokens`, while some reasoning models use `max_completion_tokens`.
94
+ max_tokens = merged_kwargs.get("max_tokens")
95
+ if max_tokens is None and merged_kwargs.get("max_completion_tokens") is not None:
96
+ max_tokens = merged_kwargs.get("max_completion_tokens")
97
+
98
+ send_stream = dspy.settings.send_stream
99
+ caller_predict = dspy.settings.caller_predict
100
+ caller_predict_id = id(caller_predict) if caller_predict else None
101
+
102
+ if send_stream is not None:
103
+ chunks: list[ModelResponseStream] = []
104
+ async for event in self._client.llm_chat(
105
+ provider="openai",
106
+ model=model_id,
107
+ messages=messages,
108
+ temperature=temperature,
109
+ max_tokens=max_tokens,
110
+ stream=True,
111
+ ):
112
+ event_type = event.get("event")
113
+ if event_type == "delta":
114
+ text = (event.get("data") or {}).get("text") or ""
115
+ if not text:
116
+ continue
117
+ chunk = ModelResponseStream(
118
+ model=model_id,
119
+ choices=[{"index": 0, "delta": {"content": text}}],
120
+ )
121
+ if caller_predict_id is not None:
122
+ chunk.predict_id = caller_predict_id # type: ignore[attr-defined]
123
+ chunks.append(chunk)
124
+ await send_stream.send(chunk)
125
+ continue
126
+
127
+ if event_type == "done":
128
+ break
129
+
130
+ if event_type == "error":
131
+ err = event.get("error") or {}
132
+ raise RuntimeError(err.get("message") or "Broker LLM error")
133
+
134
+ if chunks:
135
+ return litellm.stream_chunk_builder(chunks)
136
+
137
+ # No streamed chunks; return an empty completion.
138
+ return ModelResponse(
139
+ model=model_id,
140
+ choices=[
141
+ {
142
+ "index": 0,
143
+ "finish_reason": "stop",
144
+ "message": {"role": "assistant", "content": ""},
145
+ }
146
+ ],
147
+ usage={"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
148
+ )
149
+
150
+ # Non-streaming path
151
+ final_text = ""
152
+ usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
153
+ async for event in self._client.llm_chat(
154
+ provider="openai",
155
+ model=model_id,
156
+ messages=messages,
157
+ temperature=temperature,
158
+ max_tokens=max_tokens,
159
+ stream=False,
160
+ ):
161
+ event_type = event.get("event")
162
+ if event_type == "done":
163
+ data = event.get("data") or {}
164
+ final_text = data.get("text") or ""
165
+ usage = data.get("usage") or usage
166
+ break
167
+ if event_type == "error":
168
+ err = event.get("error") or {}
169
+ raise RuntimeError(err.get("message") or "Broker LLM error")
170
+
171
+ return ModelResponse(
172
+ model=model_id,
173
+ choices=[
174
+ {
175
+ "index": 0,
176
+ "finish_reason": "stop",
177
+ "message": {"role": "assistant", "content": final_text},
178
+ }
179
+ ],
180
+ usage=usage,
181
+ )
tactus/dspy/config.py ADDED
@@ -0,0 +1,212 @@
1
+ """
2
+ DSPy configuration for Tactus.
3
+
4
+ This module handles Language Model configuration using DSPy's LM abstraction,
5
+ which uses LiteLLM under the hood for provider-agnostic LLM access.
6
+ """
7
+
8
+ from typing import Optional, Any
9
+
10
+ import dspy
11
+
12
+
13
+ # Global reference to the current LM configuration
14
+ _current_lm: Optional[dspy.BaseLM] = None
15
+
16
+
17
+ def configure_lm(
18
+ model: str,
19
+ api_key: Optional[str] = None,
20
+ api_base: Optional[str] = None,
21
+ temperature: float = 0.7,
22
+ max_tokens: Optional[int] = None,
23
+ model_type: Optional[str] = None,
24
+ **kwargs: Any,
25
+ ) -> dspy.BaseLM:
26
+ """
27
+ Configure the default Language Model for DSPy operations.
28
+
29
+ This uses LiteLLM's model naming convention:
30
+ - OpenAI: "openai/gpt-4o", "openai/gpt-4o-mini"
31
+ - Anthropic: "anthropic/claude-3-5-sonnet-20241022"
32
+ - AWS Bedrock: "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
33
+ - Google: "gemini/gemini-pro"
34
+
35
+ Args:
36
+ model: Model identifier in LiteLLM format (e.g., "openai/gpt-4o")
37
+ api_key: API key (optional, can use environment variables)
38
+ api_base: Custom API base URL (optional)
39
+ temperature: Sampling temperature (default: 0.7)
40
+ max_tokens: Maximum tokens in response (optional)
41
+ model_type: Model type (e.g., "chat", "responses" for reasoning models)
42
+ **kwargs: Additional LiteLLM parameters
43
+
44
+ Returns:
45
+ Configured dspy.LM instance
46
+
47
+ Example:
48
+ >>> configure_lm("openai/gpt-4o", temperature=0.3)
49
+ >>> configure_lm("anthropic/claude-3-5-sonnet-20241022")
50
+ >>> configure_lm("openai/gpt-5-mini", model_type="responses")
51
+ """
52
+ global _current_lm
53
+
54
+ import os
55
+
56
+ # Validate model parameter
57
+ if model is None or not model:
58
+ raise ValueError("model is required for LM configuration")
59
+
60
+ if not isinstance(model, str) or not model.startswith(
61
+ ("openai/", "anthropic/", "bedrock/", "gemini/", "ollama/")
62
+ ):
63
+ # Check if it's at least formatted correctly
64
+ if "/" not in model:
65
+ raise ValueError(
66
+ f"Invalid model format: {model}. Expected format like 'provider/model-name'"
67
+ )
68
+
69
+ # Build configuration
70
+ lm_kwargs = {
71
+ "temperature": temperature,
72
+ # IMPORTANT: Disable caching to enable streaming. With cache=True (default),
73
+ # DSPy returns cached responses which breaks streamify()'s ability to stream.
74
+ "cache": False,
75
+ **kwargs,
76
+ }
77
+
78
+ if api_key:
79
+ lm_kwargs["api_key"] = api_key
80
+ if api_base:
81
+ lm_kwargs["api_base"] = api_base
82
+ if max_tokens:
83
+ lm_kwargs["max_tokens"] = max_tokens
84
+ if model_type:
85
+ lm_kwargs["model_type"] = model_type
86
+
87
+ # If running inside the secretless runtime container, use the brokered LM.
88
+ if os.environ.get("TACTUS_BROKER_SOCKET"):
89
+ from tactus.dspy.broker_lm import BrokeredLM
90
+
91
+ # Ensure we don't accidentally pass credentials into the runtime container process.
92
+ lm_kwargs.pop("api_key", None)
93
+ lm_kwargs.pop("api_base", None)
94
+
95
+ # BrokeredLM reads the socket path from TACTUS_BROKER_SOCKET.
96
+ lm = BrokeredLM(model, **lm_kwargs)
97
+ else:
98
+ # Create and configure the standard DSPy LM (LiteLLM-backed)
99
+ lm = dspy.LM(model, **lm_kwargs)
100
+
101
+ # Set as global default
102
+ dspy.configure(lm=lm)
103
+ _current_lm = lm
104
+
105
+ return lm
106
+
107
+
108
+ def get_current_lm() -> Optional[dspy.BaseLM]:
109
+ """
110
+ Get the currently configured Language Model.
111
+
112
+ Returns:
113
+ The current dspy.BaseLM instance, or None if not configured.
114
+ """
115
+ return _current_lm
116
+
117
+
118
+ def ensure_lm_configured() -> dspy.BaseLM:
119
+ """
120
+ Ensure a Language Model is configured, raising an error if not.
121
+
122
+ Returns:
123
+ The current dspy.BaseLM instance.
124
+
125
+ Raises:
126
+ RuntimeError: If no LM has been configured.
127
+ """
128
+ if _current_lm is None:
129
+ raise RuntimeError(
130
+ "No Language Model configured. "
131
+ "Call configure_lm() or use LM() primitive in your Tactus code."
132
+ )
133
+ return _current_lm
134
+
135
+
136
+ def reset_lm_configuration() -> None:
137
+ """
138
+ Reset the LM configuration (primarily for testing).
139
+
140
+ This clears the global LM state, allowing tests to verify
141
+ error handling when no LM is configured.
142
+ """
143
+ global _current_lm
144
+ _current_lm = None
145
+ # Also reset DSPy's global configuration
146
+ dspy.configure(lm=None)
147
+
148
+
149
+ def create_lm(
150
+ model: str,
151
+ api_key: Optional[str] = None,
152
+ api_base: Optional[str] = None,
153
+ temperature: float = 0.7,
154
+ max_tokens: Optional[int] = None,
155
+ model_type: Optional[str] = None,
156
+ **kwargs: Any,
157
+ ) -> dspy.LM:
158
+ """
159
+ Create a Language Model instance WITHOUT setting it as global default.
160
+
161
+ This is useful for creating LMs in async contexts where dspy.configure()
162
+ cannot be called (e.g., in different event loops or async tasks).
163
+
164
+ Use with dspy.context(lm=...) to set the LM for a specific scope:
165
+ lm = create_lm("openai/gpt-4o")
166
+ with dspy.context(lm=lm):
167
+ # Use DSPy operations here
168
+
169
+ Args:
170
+ model: Model identifier in LiteLLM format (e.g., "openai/gpt-4o")
171
+ api_key: API key (optional, can use environment variables)
172
+ api_base: Custom API base URL (optional)
173
+ temperature: Sampling temperature (default: 0.7)
174
+ max_tokens: Maximum tokens in response (optional)
175
+ model_type: Model type (e.g., "chat", "responses" for reasoning models)
176
+ **kwargs: Additional LiteLLM parameters
177
+
178
+ Returns:
179
+ dspy.LM instance (not configured globally)
180
+ """
181
+ # Validate model parameter
182
+ if model is None or not model:
183
+ raise ValueError("model is required for LM configuration")
184
+
185
+ if not isinstance(model, str) or not model.startswith(
186
+ ("openai/", "anthropic/", "bedrock/", "gemini/", "ollama/")
187
+ ):
188
+ # Check if it's at least formatted correctly
189
+ if "/" not in model:
190
+ raise ValueError(
191
+ f"Invalid model format: {model}. Expected format like 'provider/model-name'"
192
+ )
193
+
194
+ # Build configuration
195
+ lm_kwargs = {
196
+ "temperature": temperature,
197
+ # IMPORTANT: Disable caching to enable streaming
198
+ "cache": False,
199
+ **kwargs,
200
+ }
201
+
202
+ if api_key:
203
+ lm_kwargs["api_key"] = api_key
204
+ if api_base:
205
+ lm_kwargs["api_base"] = api_base
206
+ if max_tokens:
207
+ lm_kwargs["max_tokens"] = max_tokens
208
+ if model_type:
209
+ lm_kwargs["model_type"] = model_type
210
+
211
+ # Create LM without setting as global default
212
+ return dspy.LM(model, **lm_kwargs)
tactus/dspy/history.py ADDED
@@ -0,0 +1,196 @@
1
+ """
2
+ DSPy History integration for Tactus.
3
+
4
+ This module provides the History primitive that maps to DSPy History,
5
+ enabling multi-turn conversation management in Tactus procedures.
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ import dspy
11
+
12
+
13
+ class TactusHistory:
14
+ """
15
+ A Tactus wrapper around DSPy History.
16
+
17
+ This class provides a convenient API for managing conversation history
18
+ that can be passed to DSPy Modules. It maintains a list of messages
19
+ and provides methods for adding, retrieving, and clearing messages.
20
+
21
+ Example usage in Lua:
22
+ -- Create a history
23
+ local history = History()
24
+
25
+ -- Add messages
26
+ history.add({ question = "What is 2+2?", answer = "4" })
27
+ history.add({ question = "And 3+3?", answer = "6" })
28
+
29
+ -- Get all messages
30
+ local messages = history.get()
31
+
32
+ -- Pass to a Module
33
+ local result = qa_module({ question = "What is 4+4?", history = history })
34
+
35
+ -- Clear history
36
+ history.clear()
37
+ """
38
+
39
+ def __init__(self, messages: Optional[List[Dict[str, Any]]] = None):
40
+ """
41
+ Initialize a TactusHistory.
42
+
43
+ Args:
44
+ messages: Optional initial list of messages
45
+ """
46
+ self._messages: List[Dict[str, Any]] = messages or []
47
+
48
+ def add(self, message: Dict[str, Any]) -> None:
49
+ """
50
+ Add a message to the history.
51
+
52
+ Args:
53
+ message: A dict with keys 'role' and 'content', or a TactusMessage object
54
+ e.g., {"role": "user", "content": "What is 2+2?"}
55
+ e.g., Message {role = "user", content = "What is 2+2?"}
56
+
57
+ Raises:
58
+ ValueError: If message lacks required keys or invalid role
59
+ """
60
+ # Check if it's a TactusMessage (has to_dict method)
61
+ if hasattr(message, "to_dict") and callable(message.to_dict):
62
+ message = message.to_dict()
63
+ # Convert Lua tables to dict if needed
64
+ elif hasattr(message, "items"):
65
+ # It's a Lua table or similar mapping
66
+ try:
67
+ message = dict(message.items())
68
+ except (AttributeError, TypeError):
69
+ pass
70
+
71
+ # Check for required keys
72
+ if not isinstance(message, dict):
73
+ raise ValueError("Message must be a dictionary or TactusMessage")
74
+
75
+ if "role" not in message:
76
+ raise ValueError("role is required")
77
+
78
+ if "content" not in message:
79
+ raise ValueError("Message must include 'content' key")
80
+
81
+ # Validate role
82
+ valid_roles = ["system", "user", "assistant"]
83
+ if message["role"] not in valid_roles:
84
+ raise ValueError(f"Invalid role. Must be one of {valid_roles}")
85
+
86
+ # Convert legacy formats if needed
87
+ if "question" in message and "answer" in message:
88
+ message = {
89
+ "role": "user",
90
+ "content": message.get("question", ""),
91
+ }
92
+ elif "answer" in message:
93
+ message = {
94
+ "role": "assistant",
95
+ "content": message.get("answer", ""),
96
+ }
97
+
98
+ self._messages.append(message)
99
+
100
+ def get(
101
+ self, context_window: Optional[int] = None, token_limit: Optional[int] = None
102
+ ) -> List[Dict[str, Any]]:
103
+ """
104
+ Get messages from history, optionally filtered by context window and token limit.
105
+
106
+ Args:
107
+ context_window: Maximum number of recent messages to retrieve
108
+ token_limit: Maximum number of tokens to include
109
+
110
+ Returns:
111
+ List of message dictionaries
112
+ """
113
+ messages = self._messages.copy()
114
+
115
+ # Apply context window
116
+ if context_window is not None:
117
+ messages = messages[-context_window:]
118
+
119
+ # Simple token estimation (approximation)
120
+ if token_limit is not None:
121
+ token_count = 0
122
+ filtered_messages = []
123
+ for msg in reversed(messages):
124
+ # Basic token estimation: 1 token per 4 characters
125
+ msg_tokens = len(msg.get("content", "")) // 4 + len(msg.get("role", "")) // 4 + 4
126
+
127
+ if token_count + msg_tokens <= token_limit:
128
+ filtered_messages.insert(0, msg)
129
+ token_count += msg_tokens
130
+ else:
131
+ break
132
+
133
+ messages = filtered_messages
134
+
135
+ return messages
136
+
137
+ def clear(self) -> None:
138
+ """Clear all messages from the history."""
139
+ self._messages.clear()
140
+
141
+ def to_dspy(self) -> dspy.History:
142
+ """
143
+ Convert to a DSPy History object.
144
+
145
+ Returns:
146
+ A dspy.History instance suitable for passing to DSPy Modules
147
+ """
148
+ return dspy.History(messages=self._messages)
149
+
150
+ def count_tokens(self) -> int:
151
+ """
152
+ Estimate total tokens in the history.
153
+
154
+ Returns:
155
+ Estimated token count
156
+ """
157
+ return sum(
158
+ len(msg.get("content", "")) // 4 + len(msg.get("role", "")) // 4 + 4
159
+ for msg in self._messages
160
+ )
161
+
162
+ def __len__(self) -> int:
163
+ """Return the number of messages in history."""
164
+ return len(self._messages)
165
+
166
+ def __iter__(self):
167
+ """Iterate over messages in history."""
168
+ return iter(self._messages)
169
+
170
+ @classmethod
171
+ def from_dspy(cls, dspy_history: dspy.History) -> "TactusHistory":
172
+ """
173
+ Create a TactusHistory from a DSPy History.
174
+
175
+ Args:
176
+ dspy_history: A dspy.History instance
177
+
178
+ Returns:
179
+ A TactusHistory instance
180
+ """
181
+ return cls(messages=dspy_history.messages)
182
+
183
+
184
+ def create_history(messages: Optional[List[Dict[str, Any]]] = None) -> TactusHistory:
185
+ """
186
+ Create a new TactusHistory.
187
+
188
+ This is the main entry point used by the DSL stubs.
189
+
190
+ Args:
191
+ messages: Optional initial list of messages
192
+
193
+ Returns:
194
+ A TactusHistory instance
195
+ """
196
+ return TactusHistory(messages=messages)