foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,510 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM-friendly response patterns for foundry-mcp.
|
|
3
|
+
|
|
4
|
+
Provides helpers for structuring tool responses to optimize LLM consumption,
|
|
5
|
+
including progressive disclosure, batch operation formatting, and context-aware
|
|
6
|
+
output sizing.
|
|
7
|
+
|
|
8
|
+
See docs/mcp_best_practices/15-concurrency-patterns.md for guidance.
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
from foundry_mcp.core.llm_patterns import (
|
|
12
|
+
progressive_disclosure, DetailLevel, batch_response
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
# Progressive disclosure based on detail level
|
|
16
|
+
data = {"id": "123", "name": "Item", "details": {...}, "metadata": {...}}
|
|
17
|
+
result = progressive_disclosure(data, level=DetailLevel.SUMMARY)
|
|
18
|
+
|
|
19
|
+
# Batch operation response
|
|
20
|
+
response = batch_response(results, errors, total=100)
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import logging
|
|
24
|
+
from dataclasses import dataclass, field
|
|
25
|
+
from enum import Enum
|
|
26
|
+
from typing import Any, Dict, List, Optional, TypeVar, Union
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
# Schema version for LLM patterns module
|
|
31
|
+
SCHEMA_VERSION = "1.0.0"
|
|
32
|
+
|
|
33
|
+
T = TypeVar("T")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class DetailLevel(str, Enum):
|
|
37
|
+
"""Detail levels for progressive disclosure.
|
|
38
|
+
|
|
39
|
+
Controls how much information is included in responses:
|
|
40
|
+
|
|
41
|
+
SUMMARY: Minimal info for quick overview (IDs, status, counts)
|
|
42
|
+
STANDARD: Default level with common fields (adds descriptions, timestamps)
|
|
43
|
+
FULL: Complete data including all optional/verbose fields
|
|
44
|
+
|
|
45
|
+
Example:
|
|
46
|
+
>>> level = DetailLevel.SUMMARY
|
|
47
|
+
>>> if level == DetailLevel.FULL:
|
|
48
|
+
... include_metadata = True
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
SUMMARY = "summary"
|
|
52
|
+
STANDARD = "standard"
|
|
53
|
+
FULL = "full"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class DisclosureConfig:
|
|
58
|
+
"""Configuration for progressive disclosure.
|
|
59
|
+
|
|
60
|
+
Attributes:
|
|
61
|
+
summary_fields: Fields to include at SUMMARY level
|
|
62
|
+
standard_fields: Additional fields for STANDARD level
|
|
63
|
+
full_fields: Additional fields for FULL level
|
|
64
|
+
max_list_items: Max items in lists at each level {level: count}
|
|
65
|
+
max_string_length: Max string length at each level {level: length}
|
|
66
|
+
truncation_suffix: Suffix to add when truncating
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
summary_fields: List[str] = field(default_factory=lambda: ["id", "name", "status"])
|
|
70
|
+
standard_fields: List[str] = field(
|
|
71
|
+
default_factory=lambda: ["description", "created_at", "updated_at"]
|
|
72
|
+
)
|
|
73
|
+
full_fields: List[str] = field(
|
|
74
|
+
default_factory=lambda: ["metadata", "details", "history"]
|
|
75
|
+
)
|
|
76
|
+
max_list_items: Dict[DetailLevel, int] = field(
|
|
77
|
+
default_factory=lambda: {
|
|
78
|
+
DetailLevel.SUMMARY: 5,
|
|
79
|
+
DetailLevel.STANDARD: 20,
|
|
80
|
+
DetailLevel.FULL: 100,
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
max_string_length: Dict[DetailLevel, int] = field(
|
|
84
|
+
default_factory=lambda: {
|
|
85
|
+
DetailLevel.SUMMARY: 100,
|
|
86
|
+
DetailLevel.STANDARD: 500,
|
|
87
|
+
DetailLevel.FULL: 5000,
|
|
88
|
+
}
|
|
89
|
+
)
|
|
90
|
+
truncation_suffix: str = "..."
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
# Default configuration
|
|
94
|
+
DEFAULT_DISCLOSURE_CONFIG = DisclosureConfig()
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def progressive_disclosure(
|
|
98
|
+
data: Union[Dict[str, Any], List[Any]],
|
|
99
|
+
level: DetailLevel = DetailLevel.STANDARD,
|
|
100
|
+
*,
|
|
101
|
+
config: Optional[DisclosureConfig] = None,
|
|
102
|
+
include_truncation_info: bool = True,
|
|
103
|
+
) -> Dict[str, Any]:
|
|
104
|
+
"""Apply progressive disclosure to data based on detail level.
|
|
105
|
+
|
|
106
|
+
Filters and truncates data based on the requested detail level,
|
|
107
|
+
making responses more manageable for LLM consumption.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
data: Dictionary or list to process
|
|
111
|
+
level: Detail level (SUMMARY, STANDARD, FULL)
|
|
112
|
+
config: Custom disclosure configuration
|
|
113
|
+
include_truncation_info: Add _truncated metadata when content is cut
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Processed data with appropriate fields and truncation
|
|
117
|
+
|
|
118
|
+
Example:
|
|
119
|
+
>>> data = {
|
|
120
|
+
... "id": "123",
|
|
121
|
+
... "name": "Task",
|
|
122
|
+
... "status": "active",
|
|
123
|
+
... "description": "A long description...",
|
|
124
|
+
... "metadata": {"complex": "data"},
|
|
125
|
+
... }
|
|
126
|
+
>>> result = progressive_disclosure(data, level=DetailLevel.SUMMARY)
|
|
127
|
+
>>> print(result.keys()) # Only id, name, status
|
|
128
|
+
"""
|
|
129
|
+
cfg = config or DEFAULT_DISCLOSURE_CONFIG
|
|
130
|
+
|
|
131
|
+
if isinstance(data, list):
|
|
132
|
+
return _disclose_list(data, level, cfg, include_truncation_info)
|
|
133
|
+
|
|
134
|
+
return _disclose_dict(data, level, cfg, include_truncation_info)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _disclose_dict(
|
|
138
|
+
data: Dict[str, Any],
|
|
139
|
+
level: DetailLevel,
|
|
140
|
+
config: DisclosureConfig,
|
|
141
|
+
include_truncation_info: bool,
|
|
142
|
+
) -> Dict[str, Any]:
|
|
143
|
+
"""Apply disclosure to a dictionary."""
|
|
144
|
+
# Determine which fields to include
|
|
145
|
+
allowed_fields = set(config.summary_fields)
|
|
146
|
+
if level in (DetailLevel.STANDARD, DetailLevel.FULL):
|
|
147
|
+
allowed_fields.update(config.standard_fields)
|
|
148
|
+
if level == DetailLevel.FULL:
|
|
149
|
+
allowed_fields.update(config.full_fields)
|
|
150
|
+
|
|
151
|
+
result: Dict[str, Any] = {}
|
|
152
|
+
truncated_fields: List[str] = []
|
|
153
|
+
|
|
154
|
+
for key, value in data.items():
|
|
155
|
+
# Always include if in allowed fields or if FULL level
|
|
156
|
+
if key in allowed_fields or level == DetailLevel.FULL:
|
|
157
|
+
processed_value, was_truncated = _process_value(value, level, config)
|
|
158
|
+
result[key] = processed_value
|
|
159
|
+
if was_truncated:
|
|
160
|
+
truncated_fields.append(key)
|
|
161
|
+
else:
|
|
162
|
+
truncated_fields.append(key)
|
|
163
|
+
|
|
164
|
+
if include_truncation_info and truncated_fields:
|
|
165
|
+
result["_truncated"] = {
|
|
166
|
+
"level": level.value,
|
|
167
|
+
"omitted_fields": [f for f in truncated_fields if f not in result],
|
|
168
|
+
"truncated_fields": [f for f in truncated_fields if f in result],
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
return result
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _disclose_list(
|
|
175
|
+
data: List[Any],
|
|
176
|
+
level: DetailLevel,
|
|
177
|
+
config: DisclosureConfig,
|
|
178
|
+
include_truncation_info: bool,
|
|
179
|
+
) -> Dict[str, Any]:
|
|
180
|
+
"""Apply disclosure to a list."""
|
|
181
|
+
max_items = config.max_list_items.get(level, 20)
|
|
182
|
+
total = len(data)
|
|
183
|
+
truncated = total > max_items
|
|
184
|
+
|
|
185
|
+
items = []
|
|
186
|
+
for item in data[:max_items]:
|
|
187
|
+
if isinstance(item, dict):
|
|
188
|
+
items.append(_disclose_dict(item, level, config, include_truncation_info=False))
|
|
189
|
+
else:
|
|
190
|
+
processed, _ = _process_value(item, level, config)
|
|
191
|
+
items.append(processed)
|
|
192
|
+
|
|
193
|
+
result: Dict[str, Any] = {
|
|
194
|
+
"items": items,
|
|
195
|
+
"count": len(items),
|
|
196
|
+
"total": total,
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if include_truncation_info and truncated:
|
|
200
|
+
result["_truncated"] = {
|
|
201
|
+
"level": level.value,
|
|
202
|
+
"shown": len(items),
|
|
203
|
+
"total": total,
|
|
204
|
+
"remaining": total - len(items),
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
return result
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _process_value(
|
|
211
|
+
value: Any,
|
|
212
|
+
level: DetailLevel,
|
|
213
|
+
config: DisclosureConfig,
|
|
214
|
+
) -> tuple[Any, bool]:
|
|
215
|
+
"""Process a single value, truncating if necessary.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Tuple of (processed_value, was_truncated)
|
|
219
|
+
"""
|
|
220
|
+
max_length = config.max_string_length.get(level, 500)
|
|
221
|
+
max_items = config.max_list_items.get(level, 20)
|
|
222
|
+
|
|
223
|
+
if isinstance(value, str):
|
|
224
|
+
if len(value) > max_length:
|
|
225
|
+
return value[:max_length] + config.truncation_suffix, True
|
|
226
|
+
return value, False
|
|
227
|
+
|
|
228
|
+
if isinstance(value, list):
|
|
229
|
+
if len(value) > max_items:
|
|
230
|
+
return value[:max_items], True
|
|
231
|
+
return value, False
|
|
232
|
+
|
|
233
|
+
if isinstance(value, dict):
|
|
234
|
+
# Recursively process nested dicts at non-FULL levels
|
|
235
|
+
if level != DetailLevel.FULL and len(str(value)) > max_length:
|
|
236
|
+
# Truncate by keeping only first few keys
|
|
237
|
+
keys = list(value.keys())[:5]
|
|
238
|
+
return {k: value[k] for k in keys}, True
|
|
239
|
+
return value, False
|
|
240
|
+
|
|
241
|
+
return value, False
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def get_detail_level(
|
|
245
|
+
requested: Optional[str] = None,
|
|
246
|
+
default: DetailLevel = DetailLevel.STANDARD,
|
|
247
|
+
) -> DetailLevel:
|
|
248
|
+
"""Parse detail level from string parameter.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
requested: Requested level as string (or None for default)
|
|
252
|
+
default: Default level if not specified or invalid
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
Parsed DetailLevel enum value
|
|
256
|
+
|
|
257
|
+
Example:
|
|
258
|
+
>>> level = get_detail_level("summary")
|
|
259
|
+
>>> level == DetailLevel.SUMMARY
|
|
260
|
+
True
|
|
261
|
+
"""
|
|
262
|
+
if requested is None:
|
|
263
|
+
return default
|
|
264
|
+
|
|
265
|
+
try:
|
|
266
|
+
return DetailLevel(requested.lower())
|
|
267
|
+
except ValueError:
|
|
268
|
+
logger.warning(f"Invalid detail level '{requested}', using default '{default.value}'")
|
|
269
|
+
return default
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
# -----------------------------------------------------------------------------
|
|
273
|
+
# Batch Operation Patterns
|
|
274
|
+
# -----------------------------------------------------------------------------
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
@dataclass
|
|
278
|
+
class BatchItemResult:
|
|
279
|
+
"""Result for a single item in a batch operation.
|
|
280
|
+
|
|
281
|
+
Attributes:
|
|
282
|
+
item_id: Identifier for the item (index or key)
|
|
283
|
+
success: Whether the operation succeeded
|
|
284
|
+
result: Operation result if successful
|
|
285
|
+
error: Error message if failed
|
|
286
|
+
error_code: Machine-readable error code if failed
|
|
287
|
+
"""
|
|
288
|
+
|
|
289
|
+
item_id: Union[int, str]
|
|
290
|
+
success: bool
|
|
291
|
+
result: Optional[Any] = None
|
|
292
|
+
error: Optional[str] = None
|
|
293
|
+
error_code: Optional[str] = None
|
|
294
|
+
|
|
295
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
296
|
+
"""Convert to dictionary for response."""
|
|
297
|
+
d: Dict[str, Any] = {
|
|
298
|
+
"item_id": self.item_id,
|
|
299
|
+
"success": self.success,
|
|
300
|
+
}
|
|
301
|
+
if self.success:
|
|
302
|
+
d["result"] = self.result
|
|
303
|
+
else:
|
|
304
|
+
d["error"] = self.error
|
|
305
|
+
if self.error_code:
|
|
306
|
+
d["error_code"] = self.error_code
|
|
307
|
+
return d
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
@dataclass
|
|
311
|
+
class BatchResult:
|
|
312
|
+
"""Result of a batch operation with separate success/failure tracking.
|
|
313
|
+
|
|
314
|
+
Designed for LLM consumption with clear summary and separated results.
|
|
315
|
+
|
|
316
|
+
Attributes:
|
|
317
|
+
total: Total items processed
|
|
318
|
+
succeeded: Count of successful operations
|
|
319
|
+
failed: Count of failed operations
|
|
320
|
+
results: List of successful results
|
|
321
|
+
errors: List of failed item details
|
|
322
|
+
warnings: Any warnings generated during processing
|
|
323
|
+
"""
|
|
324
|
+
|
|
325
|
+
total: int = 0
|
|
326
|
+
succeeded: int = 0
|
|
327
|
+
failed: int = 0
|
|
328
|
+
results: List[BatchItemResult] = field(default_factory=list)
|
|
329
|
+
errors: List[BatchItemResult] = field(default_factory=list)
|
|
330
|
+
warnings: List[str] = field(default_factory=list)
|
|
331
|
+
|
|
332
|
+
@property
|
|
333
|
+
def all_succeeded(self) -> bool:
|
|
334
|
+
"""Check if all operations succeeded."""
|
|
335
|
+
return self.failed == 0
|
|
336
|
+
|
|
337
|
+
@property
|
|
338
|
+
def success_rate(self) -> float:
|
|
339
|
+
"""Get success rate as percentage."""
|
|
340
|
+
if self.total == 0:
|
|
341
|
+
return 100.0
|
|
342
|
+
return (self.succeeded / self.total) * 100
|
|
343
|
+
|
|
344
|
+
def to_response(self, include_details: bool = True) -> Dict[str, Any]:
|
|
345
|
+
"""Convert to LLM-friendly response format.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
include_details: Include individual results/errors
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
Dictionary suitable for tool response
|
|
352
|
+
"""
|
|
353
|
+
response: Dict[str, Any] = {
|
|
354
|
+
"summary": f"Processed {self.succeeded}/{self.total} items successfully",
|
|
355
|
+
"counts": {
|
|
356
|
+
"total": self.total,
|
|
357
|
+
"succeeded": self.succeeded,
|
|
358
|
+
"failed": self.failed,
|
|
359
|
+
"success_rate": round(self.success_rate, 1),
|
|
360
|
+
},
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
if include_details:
|
|
364
|
+
if self.results:
|
|
365
|
+
response["results"] = [r.to_dict() for r in self.results]
|
|
366
|
+
if self.errors:
|
|
367
|
+
response["errors"] = [e.to_dict() for e in self.errors]
|
|
368
|
+
|
|
369
|
+
if self.warnings:
|
|
370
|
+
response["warnings"] = self.warnings
|
|
371
|
+
|
|
372
|
+
return response
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def batch_response(
|
|
376
|
+
results: List[Any],
|
|
377
|
+
errors: Optional[List[Dict[str, Any]]] = None,
|
|
378
|
+
*,
|
|
379
|
+
total: Optional[int] = None,
|
|
380
|
+
warnings: Optional[List[str]] = None,
|
|
381
|
+
) -> Dict[str, Any]:
|
|
382
|
+
"""Create a batch operation response from results and errors.
|
|
383
|
+
|
|
384
|
+
Convenience function for creating LLM-friendly batch responses.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
results: List of successful results
|
|
388
|
+
errors: List of error dicts with 'item_id', 'error', optional 'error_code'
|
|
389
|
+
total: Total items (defaults to len(results) + len(errors))
|
|
390
|
+
warnings: Optional warnings to include
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
LLM-friendly response dictionary
|
|
394
|
+
|
|
395
|
+
Example:
|
|
396
|
+
>>> results = [{"id": "1", "data": "..."}, {"id": "2", "data": "..."}]
|
|
397
|
+
>>> errors = [{"item_id": "3", "error": "Not found", "error_code": "NOT_FOUND"}]
|
|
398
|
+
>>> response = batch_response(results, errors)
|
|
399
|
+
>>> print(response["summary"])
|
|
400
|
+
'Processed 2/3 items successfully'
|
|
401
|
+
"""
|
|
402
|
+
error_list = errors or []
|
|
403
|
+
actual_total = total or (len(results) + len(error_list))
|
|
404
|
+
|
|
405
|
+
batch = BatchResult(
|
|
406
|
+
total=actual_total,
|
|
407
|
+
succeeded=len(results),
|
|
408
|
+
failed=len(error_list),
|
|
409
|
+
warnings=warnings or [],
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
# Convert results to BatchItemResult
|
|
413
|
+
for i, result in enumerate(results):
|
|
414
|
+
item_id = result.get("id", i) if isinstance(result, dict) else i
|
|
415
|
+
batch.results.append(BatchItemResult(
|
|
416
|
+
item_id=item_id,
|
|
417
|
+
success=True,
|
|
418
|
+
result=result,
|
|
419
|
+
))
|
|
420
|
+
|
|
421
|
+
# Convert errors to BatchItemResult
|
|
422
|
+
for err in error_list:
|
|
423
|
+
batch.errors.append(BatchItemResult(
|
|
424
|
+
item_id=err.get("item_id", "unknown"),
|
|
425
|
+
success=False,
|
|
426
|
+
error=err.get("error", "Unknown error"),
|
|
427
|
+
error_code=err.get("error_code"),
|
|
428
|
+
))
|
|
429
|
+
|
|
430
|
+
return batch.to_response()
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def paginated_batch_response(
|
|
434
|
+
results: List[Any],
|
|
435
|
+
*,
|
|
436
|
+
page_size: int = 50,
|
|
437
|
+
offset: int = 0,
|
|
438
|
+
total: int,
|
|
439
|
+
errors: Optional[List[Dict[str, Any]]] = None,
|
|
440
|
+
warnings: Optional[List[str]] = None,
|
|
441
|
+
) -> Dict[str, Any]:
|
|
442
|
+
"""Create a paginated batch response for large result sets.
|
|
443
|
+
|
|
444
|
+
Includes pagination metadata for LLMs to understand result scope.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
results: Results for current page
|
|
448
|
+
page_size: Number of items per page
|
|
449
|
+
offset: Current page offset
|
|
450
|
+
total: Total items available
|
|
451
|
+
errors: Errors for current page
|
|
452
|
+
warnings: Optional warnings
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
Response with pagination metadata
|
|
456
|
+
|
|
457
|
+
Example:
|
|
458
|
+
>>> response = paginated_batch_response(
|
|
459
|
+
... results=items[:50],
|
|
460
|
+
... page_size=50,
|
|
461
|
+
... offset=0,
|
|
462
|
+
... total=150,
|
|
463
|
+
... )
|
|
464
|
+
>>> response["pagination"]["has_more"]
|
|
465
|
+
True
|
|
466
|
+
"""
|
|
467
|
+
response = batch_response(
|
|
468
|
+
results=results,
|
|
469
|
+
errors=errors,
|
|
470
|
+
total=len(results) + len(errors or []),
|
|
471
|
+
warnings=warnings,
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
has_more = offset + len(results) < total
|
|
475
|
+
response["pagination"] = {
|
|
476
|
+
"offset": offset,
|
|
477
|
+
"page_size": page_size,
|
|
478
|
+
"returned": len(results),
|
|
479
|
+
"total": total,
|
|
480
|
+
"has_more": has_more,
|
|
481
|
+
"next_offset": offset + len(results) if has_more else None,
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
if has_more:
|
|
485
|
+
remaining = total - (offset + len(results))
|
|
486
|
+
response["warnings"] = response.get("warnings", [])
|
|
487
|
+
response["warnings"].append(
|
|
488
|
+
f"Showing {len(results)} of {total} items. "
|
|
489
|
+
f"{remaining} more available with offset={offset + len(results)}"
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
return response
|
|
493
|
+
|
|
494
|
+
|
|
495
|
+
# Export all public symbols
|
|
496
|
+
__all__ = [
|
|
497
|
+
# Schema
|
|
498
|
+
"SCHEMA_VERSION",
|
|
499
|
+
# Progressive disclosure
|
|
500
|
+
"DetailLevel",
|
|
501
|
+
"DisclosureConfig",
|
|
502
|
+
"DEFAULT_DISCLOSURE_CONFIG",
|
|
503
|
+
"progressive_disclosure",
|
|
504
|
+
"get_detail_level",
|
|
505
|
+
# Batch operations
|
|
506
|
+
"BatchItemResult",
|
|
507
|
+
"BatchResult",
|
|
508
|
+
"batch_response",
|
|
509
|
+
"paginated_batch_response",
|
|
510
|
+
]
|