tactus 0.31.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. tactus/__init__.py +49 -0
  2. tactus/adapters/__init__.py +9 -0
  3. tactus/adapters/broker_log.py +76 -0
  4. tactus/adapters/cli_hitl.py +189 -0
  5. tactus/adapters/cli_log.py +223 -0
  6. tactus/adapters/cost_collector_log.py +56 -0
  7. tactus/adapters/file_storage.py +367 -0
  8. tactus/adapters/http_callback_log.py +109 -0
  9. tactus/adapters/ide_log.py +71 -0
  10. tactus/adapters/lua_tools.py +336 -0
  11. tactus/adapters/mcp.py +289 -0
  12. tactus/adapters/mcp_manager.py +196 -0
  13. tactus/adapters/memory.py +53 -0
  14. tactus/adapters/plugins.py +419 -0
  15. tactus/backends/http_backend.py +58 -0
  16. tactus/backends/model_backend.py +35 -0
  17. tactus/backends/pytorch_backend.py +110 -0
  18. tactus/broker/__init__.py +12 -0
  19. tactus/broker/client.py +247 -0
  20. tactus/broker/protocol.py +183 -0
  21. tactus/broker/server.py +1123 -0
  22. tactus/broker/stdio.py +12 -0
  23. tactus/cli/__init__.py +7 -0
  24. tactus/cli/app.py +2245 -0
  25. tactus/cli/commands/__init__.py +0 -0
  26. tactus/core/__init__.py +32 -0
  27. tactus/core/config_manager.py +790 -0
  28. tactus/core/dependencies/__init__.py +14 -0
  29. tactus/core/dependencies/registry.py +180 -0
  30. tactus/core/dsl_stubs.py +2117 -0
  31. tactus/core/exceptions.py +66 -0
  32. tactus/core/execution_context.py +480 -0
  33. tactus/core/lua_sandbox.py +508 -0
  34. tactus/core/message_history_manager.py +236 -0
  35. tactus/core/mocking.py +286 -0
  36. tactus/core/output_validator.py +291 -0
  37. tactus/core/registry.py +499 -0
  38. tactus/core/runtime.py +2907 -0
  39. tactus/core/template_resolver.py +142 -0
  40. tactus/core/yaml_parser.py +301 -0
  41. tactus/docker/Dockerfile +61 -0
  42. tactus/docker/entrypoint.sh +69 -0
  43. tactus/dspy/__init__.py +39 -0
  44. tactus/dspy/agent.py +1144 -0
  45. tactus/dspy/broker_lm.py +181 -0
  46. tactus/dspy/config.py +212 -0
  47. tactus/dspy/history.py +196 -0
  48. tactus/dspy/module.py +405 -0
  49. tactus/dspy/prediction.py +318 -0
  50. tactus/dspy/signature.py +185 -0
  51. tactus/formatting/__init__.py +7 -0
  52. tactus/formatting/formatter.py +437 -0
  53. tactus/ide/__init__.py +9 -0
  54. tactus/ide/coding_assistant.py +343 -0
  55. tactus/ide/server.py +2223 -0
  56. tactus/primitives/__init__.py +49 -0
  57. tactus/primitives/control.py +168 -0
  58. tactus/primitives/file.py +229 -0
  59. tactus/primitives/handles.py +378 -0
  60. tactus/primitives/host.py +94 -0
  61. tactus/primitives/human.py +342 -0
  62. tactus/primitives/json.py +189 -0
  63. tactus/primitives/log.py +187 -0
  64. tactus/primitives/message_history.py +157 -0
  65. tactus/primitives/model.py +163 -0
  66. tactus/primitives/procedure.py +564 -0
  67. tactus/primitives/procedure_callable.py +318 -0
  68. tactus/primitives/retry.py +155 -0
  69. tactus/primitives/session.py +152 -0
  70. tactus/primitives/state.py +182 -0
  71. tactus/primitives/step.py +209 -0
  72. tactus/primitives/system.py +93 -0
  73. tactus/primitives/tool.py +375 -0
  74. tactus/primitives/tool_handle.py +279 -0
  75. tactus/primitives/toolset.py +229 -0
  76. tactus/protocols/__init__.py +38 -0
  77. tactus/protocols/chat_recorder.py +81 -0
  78. tactus/protocols/config.py +97 -0
  79. tactus/protocols/cost.py +31 -0
  80. tactus/protocols/hitl.py +71 -0
  81. tactus/protocols/log_handler.py +27 -0
  82. tactus/protocols/models.py +355 -0
  83. tactus/protocols/result.py +33 -0
  84. tactus/protocols/storage.py +90 -0
  85. tactus/providers/__init__.py +13 -0
  86. tactus/providers/base.py +92 -0
  87. tactus/providers/bedrock.py +117 -0
  88. tactus/providers/google.py +105 -0
  89. tactus/providers/openai.py +98 -0
  90. tactus/sandbox/__init__.py +63 -0
  91. tactus/sandbox/config.py +171 -0
  92. tactus/sandbox/container_runner.py +1099 -0
  93. tactus/sandbox/docker_manager.py +433 -0
  94. tactus/sandbox/entrypoint.py +227 -0
  95. tactus/sandbox/protocol.py +213 -0
  96. tactus/stdlib/__init__.py +10 -0
  97. tactus/stdlib/io/__init__.py +13 -0
  98. tactus/stdlib/io/csv.py +88 -0
  99. tactus/stdlib/io/excel.py +136 -0
  100. tactus/stdlib/io/file.py +90 -0
  101. tactus/stdlib/io/fs.py +154 -0
  102. tactus/stdlib/io/hdf5.py +121 -0
  103. tactus/stdlib/io/json.py +109 -0
  104. tactus/stdlib/io/parquet.py +83 -0
  105. tactus/stdlib/io/tsv.py +88 -0
  106. tactus/stdlib/loader.py +274 -0
  107. tactus/stdlib/tac/tactus/tools/done.tac +33 -0
  108. tactus/stdlib/tac/tactus/tools/log.tac +50 -0
  109. tactus/testing/README.md +273 -0
  110. tactus/testing/__init__.py +61 -0
  111. tactus/testing/behave_integration.py +380 -0
  112. tactus/testing/context.py +486 -0
  113. tactus/testing/eval_models.py +114 -0
  114. tactus/testing/evaluation_runner.py +222 -0
  115. tactus/testing/evaluators.py +634 -0
  116. tactus/testing/events.py +94 -0
  117. tactus/testing/gherkin_parser.py +134 -0
  118. tactus/testing/mock_agent.py +315 -0
  119. tactus/testing/mock_dependencies.py +234 -0
  120. tactus/testing/mock_hitl.py +171 -0
  121. tactus/testing/mock_registry.py +168 -0
  122. tactus/testing/mock_tools.py +133 -0
  123. tactus/testing/models.py +115 -0
  124. tactus/testing/pydantic_eval_runner.py +508 -0
  125. tactus/testing/steps/__init__.py +13 -0
  126. tactus/testing/steps/builtin.py +902 -0
  127. tactus/testing/steps/custom.py +69 -0
  128. tactus/testing/steps/registry.py +68 -0
  129. tactus/testing/test_runner.py +489 -0
  130. tactus/tracing/__init__.py +5 -0
  131. tactus/tracing/trace_manager.py +417 -0
  132. tactus/utils/__init__.py +1 -0
  133. tactus/utils/cost_calculator.py +72 -0
  134. tactus/utils/model_pricing.py +132 -0
  135. tactus/utils/safe_file_library.py +502 -0
  136. tactus/utils/safe_libraries.py +234 -0
  137. tactus/validation/LuaLexerBase.py +66 -0
  138. tactus/validation/LuaParserBase.py +23 -0
  139. tactus/validation/README.md +224 -0
  140. tactus/validation/__init__.py +7 -0
  141. tactus/validation/error_listener.py +21 -0
  142. tactus/validation/generated/LuaLexer.interp +231 -0
  143. tactus/validation/generated/LuaLexer.py +5548 -0
  144. tactus/validation/generated/LuaLexer.tokens +124 -0
  145. tactus/validation/generated/LuaLexerBase.py +66 -0
  146. tactus/validation/generated/LuaParser.interp +173 -0
  147. tactus/validation/generated/LuaParser.py +6439 -0
  148. tactus/validation/generated/LuaParser.tokens +124 -0
  149. tactus/validation/generated/LuaParserBase.py +23 -0
  150. tactus/validation/generated/LuaParserVisitor.py +118 -0
  151. tactus/validation/generated/__init__.py +7 -0
  152. tactus/validation/grammar/LuaLexer.g4 +123 -0
  153. tactus/validation/grammar/LuaParser.g4 +178 -0
  154. tactus/validation/semantic_visitor.py +817 -0
  155. tactus/validation/validator.py +157 -0
  156. tactus-0.31.0.dist-info/METADATA +1809 -0
  157. tactus-0.31.0.dist-info/RECORD +160 -0
  158. tactus-0.31.0.dist-info/WHEEL +4 -0
  159. tactus-0.31.0.dist-info/entry_points.txt +2 -0
  160. tactus-0.31.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,437 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from typing import Dict, Iterable, Set
7
+
8
+ from antlr4 import CommonTokenStream, InputStream
9
+ from antlr4.Token import Token
10
+ from antlr4.TokenStreamRewriter import TokenStreamRewriter
11
+
12
+ from tactus.validation.error_listener import TactusErrorListener
13
+ from tactus.validation.generated.LuaLexer import LuaLexer
14
+ from tactus.validation.generated.LuaParser import LuaParser
15
+
16
+
17
+ class FormattingError(RuntimeError):
18
+ pass
19
+
20
+
21
+ @dataclass(frozen=True)
22
+ class FormatResult:
23
+ formatted: str
24
+ changed: bool
25
+
26
+
27
+ class TactusFormatter:
28
+ """
29
+ ANTLR-based formatter for Tactus Lua DSL files.
30
+
31
+ Current scope: semantic indentation (2-space soft tabs) while preserving
32
+ token text, comments, and multi-line string/comment contents.
33
+ """
34
+
35
+ def __init__(self, indent_width: int = 2):
36
+ if indent_width <= 0:
37
+ raise ValueError("indent_width must be positive")
38
+ self._indent_width = indent_width
39
+
40
+ def format_source(self, source: str) -> FormatResult:
41
+ original_source = source
42
+ token_stream, error_listener = self._parse_to_tokens(source)
43
+ if error_listener.errors:
44
+ first = error_listener.errors[0]
45
+ raise FormattingError(f"Cannot format invalid source: {first.message}")
46
+
47
+ tokens = list(token_stream.tokens)
48
+ source = _rewrite_token_text(source, tokens, token_stream)
49
+ protected_lines = self._protected_lines_from_multiline_tokens(tokens)
50
+ indent_by_line = self._indentation_by_line(tokens, num_lines=_count_lines(source))
51
+
52
+ formatted = _rewrite_leading_indentation(
53
+ source,
54
+ indent_by_line=indent_by_line,
55
+ indent_width=self._indent_width,
56
+ protected_lines=protected_lines,
57
+ )
58
+ return FormatResult(formatted=formatted, changed=(formatted != original_source))
59
+
60
+ def format_file(self, file_path: Path) -> FormatResult:
61
+ source = file_path.read_text()
62
+ return self.format_source(source)
63
+
64
+ def _parse_to_tokens(self, source: str) -> tuple[CommonTokenStream, TactusErrorListener]:
65
+ input_stream = InputStream(source)
66
+ lexer = LuaLexer(input_stream)
67
+ token_stream = CommonTokenStream(lexer)
68
+ token_stream.fill()
69
+
70
+ parser = LuaParser(token_stream)
71
+ error_listener = TactusErrorListener()
72
+ parser.removeErrorListeners()
73
+ parser.addErrorListener(error_listener)
74
+ parser.start_()
75
+ return token_stream, error_listener
76
+
77
+ def _protected_lines_from_multiline_tokens(self, tokens: Iterable[Token]) -> Set[int]:
78
+ protected: Set[int] = set()
79
+ for tok in tokens:
80
+ text = getattr(tok, "text", None)
81
+ if not text or "\n" not in text:
82
+ continue
83
+
84
+ if tok.type in (LuaLexer.LONGSTRING, LuaLexer.COMMENT):
85
+ start = int(getattr(tok, "line", 0) or 0)
86
+ if start <= 0:
87
+ continue
88
+ end = start + text.count("\n")
89
+ protected.update(range(start, end + 1))
90
+ return protected
91
+
92
+ def _indentation_by_line(self, tokens: Iterable[Token], *, num_lines: int) -> Dict[int, int]:
93
+ line_to_default_tokens: Dict[int, list[Token]] = {}
94
+ for tok in tokens:
95
+ if tok.type == Token.EOF:
96
+ continue
97
+ if tok.channel != Token.DEFAULT_CHANNEL:
98
+ continue
99
+ line_to_default_tokens.setdefault(int(tok.line), []).append(tok)
100
+
101
+ open_tokens = {
102
+ LuaLexer.THEN,
103
+ LuaLexer.DO,
104
+ LuaLexer.FUNCTION,
105
+ LuaLexer.REPEAT,
106
+ LuaLexer.OCU, # {
107
+ LuaLexer.ELSE,
108
+ LuaLexer.ELSEIF,
109
+ }
110
+ close_tokens = {
111
+ LuaLexer.END,
112
+ LuaLexer.UNTIL,
113
+ LuaLexer.CCU, # }
114
+ }
115
+ dedent_at_line_start = close_tokens | {LuaLexer.ELSE, LuaLexer.ELSEIF}
116
+
117
+ indent_level = 0
118
+ indent_by_line: Dict[int, int] = {}
119
+
120
+ for line_no in range(1, num_lines + 1):
121
+ tokens_on_line = line_to_default_tokens.get(line_no, [])
122
+ first = tokens_on_line[0] if tokens_on_line else None
123
+
124
+ if first is not None and first.type in dedent_at_line_start:
125
+ indent_level = max(0, indent_level - 1)
126
+
127
+ indent_by_line[line_no] = indent_level
128
+
129
+ handled_first_dedent = first is not None and first.type in dedent_at_line_start
130
+ for idx, tok in enumerate(tokens_on_line):
131
+ if tok.type in close_tokens:
132
+ if idx == 0 and handled_first_dedent:
133
+ continue
134
+ indent_level = max(0, indent_level - 1)
135
+ if tok.type in open_tokens:
136
+ indent_level += 1
137
+
138
+ return indent_by_line
139
+
140
+
141
+ def _count_lines(source: str) -> int:
142
+ if not source:
143
+ return 1
144
+ return source.count("\n") + 1
145
+
146
+
147
+ def _split_line_ending(line: str) -> tuple[str, str]:
148
+ if line.endswith("\r\n"):
149
+ return line[:-2], "\r\n"
150
+ if line.endswith("\n"):
151
+ return line[:-1], "\n"
152
+ if line.endswith("\r"):
153
+ return line[:-1], "\r"
154
+ return line, ""
155
+
156
+
157
+ def _rewrite_leading_indentation(
158
+ source: str,
159
+ *,
160
+ indent_by_line: Dict[int, int],
161
+ indent_width: int,
162
+ protected_lines: Set[int],
163
+ ) -> str:
164
+ lines = source.splitlines(keepends=True)
165
+ out: list[str] = []
166
+
167
+ for i, raw in enumerate(lines, start=1):
168
+ if i in protected_lines:
169
+ out.append(raw)
170
+ continue
171
+
172
+ body, ending = _split_line_ending(raw)
173
+ if body.strip() == "":
174
+ out.append(ending)
175
+ continue
176
+
177
+ desired = " " * (indent_width * max(0, int(indent_by_line.get(i, 0))))
178
+ stripped = body.lstrip(" \t")
179
+ out.append(desired + stripped + ending)
180
+
181
+ return "".join(out)
182
+
183
+
184
+ _LONGSTRING_OPEN_RE = re.compile(r"^\[(?P<eq>=*)\[(?P<body>.*)\](?P=eq)\]$", re.DOTALL)
185
+
186
+
187
+ def _rewrite_token_text(source: str, tokens: list[Token], token_stream: CommonTokenStream) -> str:
188
+ """
189
+ Token-based, semantic rewrites (idempotent):
190
+ - Indent embedded Specifications longstrings.
191
+ - Enforce spaces around '=' (within a single line).
192
+ - Remove trailing commas in multi-line table constructors.
193
+ """
194
+ rewriter = TokenStreamRewriter(token_stream)
195
+ default_tokens: list[Token] = [
196
+ t for t in tokens if t.type != Token.EOF and t.channel == Token.DEFAULT_CHANNEL
197
+ ]
198
+
199
+ _apply_specifications_longstring_rewrites(tokens, default_tokens, rewriter)
200
+ _apply_assignment_spacing(default_tokens, tokens, rewriter)
201
+ _apply_comma_spacing(default_tokens, tokens, rewriter)
202
+ _apply_binary_operator_spacing(default_tokens, tokens, rewriter)
203
+ _apply_multiline_table_trailing_comma_removal(default_tokens, tokens, rewriter)
204
+
205
+ rewritten = rewriter.getDefaultText()
206
+ return rewritten if rewritten != source else source
207
+
208
+
209
+ def _apply_specifications_longstring_rewrites(
210
+ tokens: list[Token], default_tokens: list[Token], rewriter: TokenStreamRewriter
211
+ ) -> None:
212
+ longstring_token_indices: list[int] = []
213
+ for idx, tok in enumerate(default_tokens):
214
+ if tok.type != LuaLexer.LONGSTRING:
215
+ continue
216
+ if _is_specifications_call_longstring(default_tokens, idx):
217
+ longstring_token_indices.append(tok.tokenIndex)
218
+
219
+ for token_index in longstring_token_indices:
220
+ tok = tokens[token_index]
221
+ new_text = _format_specifications_longstring_text(tok.text or "")
222
+ if new_text != (tok.text or ""):
223
+ rewriter.replaceIndex(token_index, new_text)
224
+
225
+
226
+ def _apply_assignment_spacing(
227
+ default_tokens: list[Token], tokens: list[Token], rewriter: TokenStreamRewriter
228
+ ) -> None:
229
+ for i, tok in enumerate(default_tokens):
230
+ if tok.type != LuaLexer.EQ:
231
+ continue
232
+ if i == 0 or i + 1 >= len(default_tokens):
233
+ continue
234
+
235
+ prev = default_tokens[i - 1]
236
+ nxt = default_tokens[i + 1]
237
+ if prev.line != tok.line or nxt.line != tok.line:
238
+ continue
239
+
240
+ # Normalize the hidden token region between prev and '=' to a single space.
241
+ if prev.tokenIndex + 1 <= tok.tokenIndex - 1:
242
+ rewriter.replaceRange(prev.tokenIndex + 1, tok.tokenIndex - 1, " ")
243
+ else:
244
+ rewriter.insertBeforeIndex(tok.tokenIndex, " ")
245
+
246
+ # Normalize the hidden token region between '=' and next to a single space.
247
+ if tok.tokenIndex + 1 <= nxt.tokenIndex - 1:
248
+ rewriter.replaceRange(tok.tokenIndex + 1, nxt.tokenIndex - 1, " ")
249
+ else:
250
+ rewriter.insertAfterToken(tok, " ")
251
+
252
+
253
+ def _has_comment_or_newline_between(tokens: list[Token], left: Token, right: Token) -> bool:
254
+ if left.tokenIndex + 1 > right.tokenIndex - 1:
255
+ return False
256
+ for t in tokens[left.tokenIndex + 1 : right.tokenIndex]:
257
+ if t.type in (LuaLexer.NL, LuaLexer.COMMENT):
258
+ return True
259
+ return False
260
+
261
+
262
+ def _apply_comma_spacing(
263
+ default_tokens: list[Token], tokens: list[Token], rewriter: TokenStreamRewriter
264
+ ) -> None:
265
+ for i, tok in enumerate(default_tokens):
266
+ if tok.type != LuaLexer.COMMA:
267
+ continue
268
+ if i == 0 or i + 1 >= len(default_tokens):
269
+ continue
270
+ prev = default_tokens[i - 1]
271
+ nxt = default_tokens[i + 1]
272
+ if prev.line != tok.line or nxt.line != tok.line:
273
+ continue
274
+ if _has_comment_or_newline_between(tokens, prev, tok) or _has_comment_or_newline_between(
275
+ tokens, tok, nxt
276
+ ):
277
+ continue
278
+
279
+ if prev.tokenIndex + 1 <= tok.tokenIndex - 1:
280
+ rewriter.replaceRange(prev.tokenIndex + 1, tok.tokenIndex - 1, "")
281
+
282
+ if tok.tokenIndex + 1 <= nxt.tokenIndex - 1:
283
+ rewriter.replaceRange(tok.tokenIndex + 1, nxt.tokenIndex - 1, " ")
284
+ else:
285
+ rewriter.insertAfterToken(tok, " ")
286
+
287
+
288
+ def _apply_binary_operator_spacing(
289
+ default_tokens: list[Token], tokens: list[Token], rewriter: TokenStreamRewriter
290
+ ) -> None:
291
+ binary_ops = {
292
+ LuaLexer.PLUS,
293
+ LuaLexer.MINUS,
294
+ LuaLexer.STAR,
295
+ LuaLexer.SLASH,
296
+ LuaLexer.PER,
297
+ LuaLexer.SS, # //
298
+ LuaLexer.DD, # ..
299
+ LuaLexer.CARET,
300
+ LuaLexer.PIPE,
301
+ LuaLexer.AMP,
302
+ LuaLexer.LL,
303
+ LuaLexer.GG,
304
+ LuaLexer.LT,
305
+ LuaLexer.GT,
306
+ LuaLexer.LE,
307
+ LuaLexer.GE,
308
+ LuaLexer.EE,
309
+ LuaLexer.SQEQ,
310
+ LuaLexer.AND,
311
+ LuaLexer.OR,
312
+ }
313
+ unary_preceders = {
314
+ LuaLexer.EQ,
315
+ LuaLexer.COMMA,
316
+ LuaLexer.OP,
317
+ LuaLexer.OB,
318
+ LuaLexer.OCU,
319
+ LuaLexer.CC, # '::'
320
+ LuaLexer.THEN,
321
+ LuaLexer.DO,
322
+ LuaLexer.ELSE,
323
+ LuaLexer.ELSEIF,
324
+ LuaLexer.RETURN,
325
+ LuaLexer.FOR,
326
+ LuaLexer.WHILE,
327
+ LuaLexer.IF,
328
+ LuaLexer.IN,
329
+ LuaLexer.AND,
330
+ LuaLexer.OR,
331
+ } | binary_ops
332
+
333
+ for i, tok in enumerate(default_tokens):
334
+ if tok.type not in binary_ops:
335
+ continue
336
+ if i == 0 or i + 1 >= len(default_tokens):
337
+ continue
338
+ prev = default_tokens[i - 1]
339
+ nxt = default_tokens[i + 1]
340
+ if prev.line != tok.line or nxt.line != tok.line:
341
+ continue
342
+ if _has_comment_or_newline_between(tokens, prev, tok) or _has_comment_or_newline_between(
343
+ tokens, tok, nxt
344
+ ):
345
+ continue
346
+
347
+ if tok.type in (LuaLexer.MINUS, LuaLexer.PLUS) and prev.type in unary_preceders:
348
+ continue
349
+
350
+ if prev.tokenIndex + 1 <= tok.tokenIndex - 1:
351
+ rewriter.replaceRange(prev.tokenIndex + 1, tok.tokenIndex - 1, " ")
352
+ else:
353
+ rewriter.insertBeforeIndex(tok.tokenIndex, " ")
354
+
355
+ if tok.tokenIndex + 1 <= nxt.tokenIndex - 1:
356
+ rewriter.replaceRange(tok.tokenIndex + 1, nxt.tokenIndex - 1, " ")
357
+ else:
358
+ rewriter.insertAfterToken(tok, " ")
359
+
360
+
361
+ def _apply_multiline_table_trailing_comma_removal(
362
+ default_tokens: list[Token], tokens: list[Token], rewriter: TokenStreamRewriter
363
+ ) -> None:
364
+ stack: list[int] = []
365
+ for idx, tok in enumerate(default_tokens):
366
+ if tok.type == LuaLexer.OCU:
367
+ stack.append(idx)
368
+ continue
369
+ if tok.type != LuaLexer.CCU:
370
+ continue
371
+ if not stack:
372
+ continue
373
+ open_idx = stack.pop()
374
+ open_tok = default_tokens[open_idx]
375
+ close_tok = tok
376
+ if open_tok.line == close_tok.line:
377
+ continue
378
+
379
+ j = idx - 1
380
+ if j <= open_idx:
381
+ continue
382
+ last = default_tokens[j]
383
+ if last.type == LuaLexer.COMMA:
384
+ rewriter.replaceIndex(last.tokenIndex, "")
385
+
386
+
387
+ def _is_specifications_call_longstring(default_tokens: list[Token], longstring_idx: int) -> bool:
388
+ # Accept: Specifications(LONGSTRING) where LONGSTRING is the first argument.
389
+ if longstring_idx < 1:
390
+ return False
391
+ prev = default_tokens[longstring_idx - 1]
392
+ if prev.type == LuaLexer.OP and longstring_idx >= 2:
393
+ name = default_tokens[longstring_idx - 2]
394
+ return name.type == LuaLexer.NAME and (name.text or "") == "Specifications"
395
+ name = prev
396
+ return name.type == LuaLexer.NAME and (name.text or "") == "Specifications"
397
+
398
+
399
+ def _format_specifications_longstring_text(text: str) -> str:
400
+ match = _LONGSTRING_OPEN_RE.match(text)
401
+ if not match:
402
+ return text
403
+
404
+ eq = match.group("eq")
405
+ body = match.group("body")
406
+ open_delim = f"[{eq}["
407
+ close_delim = f"]{eq}]"
408
+
409
+ formatted_body = _shift_longstring_body_indent_once(body, shift_spaces=2)
410
+ return f"{open_delim}{formatted_body}{close_delim}"
411
+
412
+
413
+ def _shift_longstring_body_indent_once(body: str, *, shift_spaces: int) -> str:
414
+ lines = body.splitlines(keepends=True)
415
+ already_formatted = False
416
+ for line in lines:
417
+ raw, _ending = _split_line_ending(line)
418
+ if raw.strip() == "":
419
+ continue
420
+ expanded = raw.replace("\t", " ")
421
+ already_formatted = expanded.startswith(" " * shift_spaces)
422
+ break
423
+
424
+ if already_formatted:
425
+ return body
426
+
427
+ out: list[str] = []
428
+ prefix = " " * shift_spaces
429
+ for line in lines:
430
+ raw, ending = _split_line_ending(line)
431
+ if raw.strip() == "":
432
+ out.append(raw + ending)
433
+ continue
434
+
435
+ expanded = raw.replace("\t", " ")
436
+ out.append(prefix + expanded + ending)
437
+ return "".join(out)
tactus/ide/__init__.py ADDED
@@ -0,0 +1,9 @@
1
+ """
2
+ Tactus IDE module.
3
+
4
+ Provides the IDE server and related utilities.
5
+ """
6
+
7
+ from tactus.ide.server import create_app
8
+
9
+ __all__ = ["create_app"]