claude-mpm 3.0.1__py3-none-any.whl → 3.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,411 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Ticket Workflow Schema Validator
4
+ ================================
5
+
6
+ Utility module for validating and working with ticket workflow schemas.
7
+
8
+ WHY: This module exists to ensure workflow definitions are valid and consistent
9
+ before they are used in the ticketing system. By validating workflows at load
10
+ time, we prevent runtime errors and ensure all workflows follow the same structure.
11
+
12
+ DESIGN DECISIONS:
13
+ - Uses jsonschema for validation to leverage existing JSON Schema standards
14
+ - Provides both validation and helper functions for common workflow operations
15
+ - Returns detailed error messages to help users fix invalid workflows
16
+ - Validates business logic beyond just schema structure
17
+
18
+ Usage:
19
+ from claude_mpm.schemas.workflow_validator import WorkflowValidator
20
+
21
+ validator = WorkflowValidator()
22
+
23
+ # Validate a workflow
24
+ errors = validator.validate_workflow(workflow_dict)
25
+ if errors:
26
+ print(f"Validation failed: {errors}")
27
+
28
+ # Load and validate from file
29
+ workflow = validator.load_workflow_file("standard_workflow.json")
30
+ """
31
+
32
+ import json
33
+ import logging
34
+ from pathlib import Path
35
+ from typing import Any, Dict, List, Optional, Set, Tuple
36
+
37
+ try:
38
+ import jsonschema
39
+ from jsonschema import Draft7Validator, validators
40
+ JSONSCHEMA_AVAILABLE = True
41
+ except ImportError:
42
+ JSONSCHEMA_AVAILABLE = False
43
+ logging.warning("jsonschema not available, workflow validation disabled")
44
+
45
+ logger = logging.getLogger(__name__)
46
+
47
+
48
+ class WorkflowValidator:
49
+ """
50
+ Validates ticket workflow definitions against the schema.
51
+
52
+ WHY: Ensures workflow definitions are valid before use, preventing
53
+ runtime errors and ensuring consistency across all workflows.
54
+ """
55
+
56
+ def __init__(self, schema_path: Optional[Path] = None):
57
+ """
58
+ Initialize the workflow validator.
59
+
60
+ Args:
61
+ schema_path: Path to the workflow schema file. If not provided,
62
+ uses the default schema location.
63
+ """
64
+ self.schema_path = schema_path or self._get_default_schema_path()
65
+ self.schema = self._load_schema()
66
+ self._validator = None
67
+
68
+ if JSONSCHEMA_AVAILABLE and self.schema:
69
+ self._validator = Draft7Validator(self.schema)
70
+
71
+ def _get_default_schema_path(self) -> Path:
72
+ """Get the default path to the workflow schema."""
73
+ return Path(__file__).parent / "ticket_workflow_schema.json"
74
+
75
+ def _load_schema(self) -> Optional[Dict[str, Any]]:
76
+ """Load the workflow schema from file."""
77
+ try:
78
+ if self.schema_path.exists():
79
+ with open(self.schema_path, 'r') as f:
80
+ return json.load(f)
81
+ else:
82
+ logger.error(f"Schema file not found: {self.schema_path}")
83
+ return None
84
+ except Exception as e:
85
+ logger.error(f"Failed to load schema: {e}")
86
+ return None
87
+
88
+ def validate_workflow(self, workflow: Dict[str, Any]) -> List[str]:
89
+ """
90
+ Validate a workflow definition.
91
+
92
+ WHY: Validates both schema structure and business logic to ensure
93
+ the workflow is not only syntactically correct but also logically
94
+ consistent and usable.
95
+
96
+ Args:
97
+ workflow: Workflow definition dictionary
98
+
99
+ Returns:
100
+ List of validation error messages (empty if valid)
101
+ """
102
+ errors = []
103
+
104
+ # Schema validation
105
+ if JSONSCHEMA_AVAILABLE and self._validator:
106
+ for error in self._validator.iter_errors(workflow):
107
+ errors.append(f"Schema error at {'.'.join(str(p) for p in error.path)}: {error.message}")
108
+
109
+ # Business logic validation
110
+ if not errors: # Only validate logic if schema is valid
111
+ logic_errors = self._validate_business_logic(workflow)
112
+ errors.extend(logic_errors)
113
+
114
+ return errors
115
+
116
+ def _validate_business_logic(self, workflow: Dict[str, Any]) -> List[str]:
117
+ """
118
+ Validate workflow business logic beyond schema requirements.
119
+
120
+ WHY: Schema validation ensures structure, but we also need to validate
121
+ that the workflow makes logical sense (e.g., transitions reference
122
+ existing statuses, mappings are consistent, etc.)
123
+ """
124
+ errors = []
125
+
126
+ # Extract status and resolution IDs
127
+ status_ids = {s['id'] for s in workflow.get('status_states', {}).get('states', [])}
128
+ resolution_ids = {r['id'] for r in workflow.get('resolution_types', {}).get('types', [])}
129
+
130
+ # Validate default status exists
131
+ default_status = workflow.get('status_states', {}).get('default_status')
132
+ if default_status and default_status not in status_ids:
133
+ errors.append(f"Default status '{default_status}' not found in status definitions")
134
+
135
+ # Validate transitions reference existing statuses
136
+ for i, transition in enumerate(workflow.get('transitions', {}).get('rules', [])):
137
+ from_status = transition.get('from_status')
138
+ to_status = transition.get('to_status')
139
+
140
+ if from_status != '*' and from_status not in status_ids:
141
+ errors.append(f"Transition {i}: from_status '{from_status}' not found")
142
+
143
+ if to_status not in status_ids:
144
+ errors.append(f"Transition {i}: to_status '{to_status}' not found")
145
+
146
+ # Validate status-resolution mappings
147
+ for i, mapping in enumerate(workflow.get('status_resolution_mapping', {}).get('mappings', [])):
148
+ status_id = mapping.get('status_id')
149
+ if status_id not in status_ids:
150
+ errors.append(f"Status-resolution mapping {i}: status '{status_id}' not found")
151
+
152
+ # Check resolution IDs (unless wildcard)
153
+ for resolution in mapping.get('allowed_resolutions', []):
154
+ if resolution != '*' and resolution not in resolution_ids:
155
+ errors.append(f"Status-resolution mapping {i}: resolution '{resolution}' not found")
156
+
157
+ # Check default resolution exists in allowed resolutions
158
+ default_res = mapping.get('default_resolution')
159
+ allowed_res = mapping.get('allowed_resolutions', [])
160
+ if default_res and '*' not in allowed_res and default_res not in allowed_res:
161
+ errors.append(f"Status-resolution mapping {i}: default resolution '{default_res}' not in allowed resolutions")
162
+
163
+ # Validate there's at least one initial status
164
+ initial_statuses = [s for s in workflow.get('status_states', {}).get('states', [])
165
+ if s.get('category') == 'initial']
166
+ if not initial_statuses:
167
+ errors.append("No initial status defined (category='initial')")
168
+
169
+ # Validate there's at least one terminal status
170
+ terminal_statuses = [s for s in workflow.get('status_states', {}).get('states', [])
171
+ if s.get('category') == 'terminal']
172
+ if not terminal_statuses:
173
+ errors.append("No terminal status defined (category='terminal')")
174
+
175
+ # Validate escalation rules reference existing statuses
176
+ for i, rule in enumerate(workflow.get('business_rules', {}).get('escalation_rules', [])):
177
+ condition_status = rule.get('condition', {}).get('status')
178
+ if condition_status and condition_status not in status_ids:
179
+ errors.append(f"Escalation rule {i}: condition status '{condition_status}' not found")
180
+
181
+ action_status = rule.get('action', {}).get('change_status')
182
+ if action_status and action_status not in status_ids:
183
+ errors.append(f"Escalation rule {i}: action status '{action_status}' not found")
184
+
185
+ return errors
186
+
187
+ def load_workflow_file(self, filepath: Path) -> Optional[Dict[str, Any]]:
188
+ """
189
+ Load and validate a workflow from file.
190
+
191
+ Args:
192
+ filepath: Path to workflow JSON file
193
+
194
+ Returns:
195
+ Validated workflow dictionary or None if invalid
196
+ """
197
+ try:
198
+ with open(filepath, 'r') as f:
199
+ workflow = json.load(f)
200
+
201
+ errors = self.validate_workflow(workflow)
202
+ if errors:
203
+ logger.error(f"Workflow validation failed for {filepath}:")
204
+ for error in errors:
205
+ logger.error(f" - {error}")
206
+ return None
207
+
208
+ return workflow
209
+
210
+ except Exception as e:
211
+ logger.error(f"Failed to load workflow from {filepath}: {e}")
212
+ return None
213
+
214
+ def get_valid_transitions(self, workflow: Dict[str, Any], from_status: str) -> List[Dict[str, Any]]:
215
+ """
216
+ Get valid transitions from a given status.
217
+
218
+ WHY: Helper function to easily determine what transitions are available
219
+ from a specific status, supporting both specific and wildcard rules.
220
+
221
+ Args:
222
+ workflow: Workflow definition
223
+ from_status: Current status ID
224
+
225
+ Returns:
226
+ List of valid transition rules
227
+ """
228
+ transitions = []
229
+
230
+ for rule in workflow.get('transitions', {}).get('rules', []):
231
+ rule_from = rule.get('from_status')
232
+ if rule_from == from_status or rule_from == '*':
233
+ transitions.append(rule)
234
+
235
+ return transitions
236
+
237
+ def get_allowed_resolutions(self, workflow: Dict[str, Any], status: str) -> Tuple[List[str], bool]:
238
+ """
239
+ Get allowed resolutions for a given status.
240
+
241
+ WHY: Helper function to determine which resolutions are valid for a
242
+ specific status, and whether a resolution is required.
243
+
244
+ Args:
245
+ workflow: Workflow definition
246
+ status: Status ID
247
+
248
+ Returns:
249
+ Tuple of (allowed_resolution_ids, is_required)
250
+ """
251
+ for mapping in workflow.get('status_resolution_mapping', {}).get('mappings', []):
252
+ if mapping.get('status_id') == status:
253
+ allowed = mapping.get('allowed_resolutions', [])
254
+ required = mapping.get('requires_resolution', False)
255
+
256
+ # Handle wildcard
257
+ if '*' in allowed:
258
+ all_resolutions = [r['id'] for r in workflow.get('resolution_types', {}).get('types', [])]
259
+ return all_resolutions, required
260
+
261
+ return allowed, required
262
+
263
+ return [], False
264
+
265
+ def validate_transition(self, workflow: Dict[str, Any], from_status: str,
266
+ to_status: str, data: Dict[str, Any]) -> List[str]:
267
+ """
268
+ Validate a specific status transition.
269
+
270
+ WHY: Ensures a proposed transition is valid according to the workflow
271
+ rules and that all required fields are provided.
272
+
273
+ Args:
274
+ workflow: Workflow definition
275
+ from_status: Current status
276
+ to_status: Target status
277
+ data: Transition data (should include required fields)
278
+
279
+ Returns:
280
+ List of validation errors (empty if valid)
281
+ """
282
+ errors = []
283
+
284
+ # Find applicable transition rules
285
+ valid_transitions = self.get_valid_transitions(workflow, from_status)
286
+ matching_rule = None
287
+
288
+ for rule in valid_transitions:
289
+ if rule.get('to_status') == to_status:
290
+ matching_rule = rule
291
+ break
292
+
293
+ if not matching_rule:
294
+ errors.append(f"No valid transition from '{from_status}' to '{to_status}'")
295
+ return errors
296
+
297
+ # Check required fields
298
+ required_fields = matching_rule.get('required_fields', [])
299
+ for field in required_fields:
300
+ if field not in data or data[field] is None:
301
+ errors.append(f"Required field '{field}' missing for transition")
302
+
303
+ # If transitioning to a terminal status, check resolution requirements
304
+ target_status = next((s for s in workflow.get('status_states', {}).get('states', [])
305
+ if s['id'] == to_status), None)
306
+
307
+ if target_status and target_status.get('category') == 'terminal':
308
+ allowed_resolutions, requires_resolution = self.get_allowed_resolutions(workflow, to_status)
309
+
310
+ if requires_resolution and 'resolution' not in data:
311
+ errors.append(f"Resolution required for status '{to_status}'")
312
+ elif 'resolution' in data and data['resolution'] not in allowed_resolutions:
313
+ errors.append(f"Resolution '{data['resolution']}' not allowed for status '{to_status}'")
314
+
315
+ return errors
316
+
317
+
318
+ def create_example_workflows():
319
+ """
320
+ Create example workflow files for testing and demonstration.
321
+
322
+ WHY: Provides ready-to-use workflow examples that demonstrate different
323
+ use cases and configuration options.
324
+ """
325
+ examples_dir = Path(__file__).parent / "examples"
326
+ examples_dir.mkdir(exist_ok=True)
327
+
328
+ # Bug tracking workflow
329
+ bug_workflow = {
330
+ "schema_version": "1.0.0",
331
+ "workflow_id": "bug_tracking",
332
+ "workflow_version": "1.0.0",
333
+ "metadata": {
334
+ "name": "Bug Tracking Workflow",
335
+ "description": "Workflow optimized for software bug tracking",
336
+ "workflow_type": "bug_tracking"
337
+ },
338
+ "status_states": {
339
+ "states": [
340
+ {"id": "reported", "name": "Reported", "category": "initial"},
341
+ {"id": "confirmed", "name": "Confirmed", "category": "active"},
342
+ {"id": "in_progress", "name": "In Progress", "category": "active"},
343
+ {"id": "fixed", "name": "Fixed", "category": "active"},
344
+ {"id": "verified", "name": "Verified", "category": "terminal"},
345
+ {"id": "closed", "name": "Closed", "category": "terminal"},
346
+ {"id": "rejected", "name": "Rejected", "category": "terminal"}
347
+ ]
348
+ },
349
+ "resolution_types": {
350
+ "types": [
351
+ {"id": "fixed", "name": "Fixed", "category": "successful"},
352
+ {"id": "cannot_reproduce", "name": "Cannot Reproduce", "category": "invalid"},
353
+ {"id": "duplicate", "name": "Duplicate", "category": "invalid"},
354
+ {"id": "by_design", "name": "By Design", "category": "invalid"},
355
+ {"id": "wont_fix", "name": "Won't Fix", "category": "unsuccessful"}
356
+ ]
357
+ },
358
+ "transitions": {
359
+ "rules": [
360
+ {"from_status": "reported", "to_status": "confirmed", "name": "Confirm Bug"},
361
+ {"from_status": "reported", "to_status": "rejected", "name": "Reject"},
362
+ {"from_status": "confirmed", "to_status": "in_progress", "name": "Start Fix"},
363
+ {"from_status": "in_progress", "to_status": "fixed", "name": "Mark Fixed"},
364
+ {"from_status": "fixed", "to_status": "verified", "name": "Verify Fix"},
365
+ {"from_status": "verified", "to_status": "closed", "name": "Close"}
366
+ ]
367
+ },
368
+ "status_resolution_mapping": {
369
+ "mappings": [
370
+ {
371
+ "status_id": "verified",
372
+ "allowed_resolutions": ["fixed"],
373
+ "requires_resolution": true
374
+ },
375
+ {
376
+ "status_id": "closed",
377
+ "allowed_resolutions": ["*"],
378
+ "requires_resolution": true
379
+ },
380
+ {
381
+ "status_id": "rejected",
382
+ "allowed_resolutions": ["cannot_reproduce", "duplicate", "by_design", "wont_fix"],
383
+ "requires_resolution": true
384
+ }
385
+ ]
386
+ }
387
+ }
388
+
389
+ with open(examples_dir / "bug_tracking_workflow.json", 'w') as f:
390
+ json.dump(bug_workflow, f, indent=2)
391
+
392
+ logger.info("Created example workflow files")
393
+
394
+
395
+ if __name__ == "__main__":
396
+ # Example usage
397
+ validator = WorkflowValidator()
398
+
399
+ # Load and validate the standard workflow
400
+ example_path = Path(__file__).parent / "examples" / "standard_workflow.json"
401
+ if example_path.exists():
402
+ workflow = validator.load_workflow_file(example_path)
403
+ if workflow:
404
+ print(f"Successfully loaded workflow: {workflow['metadata']['name']}")
405
+
406
+ # Test some helper functions
407
+ transitions = validator.get_valid_transitions(workflow, "open")
408
+ print(f"\nValid transitions from 'open': {[t['name'] for t in transitions]}")
409
+
410
+ resolutions, required = validator.get_allowed_resolutions(workflow, "resolved")
411
+ print(f"\nResolutions for 'resolved' status: {resolutions} (required: {required})")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: claude-mpm
3
- Version: 3.0.1
3
+ Version: 3.1.0
4
4
  Summary: Claude Multi-agent Project Manager - Clean orchestration with ticket management
5
5
  Home-page: https://github.com/bobmatnyc/claude-mpm
6
6
  Author: Claude MPM Team
@@ -220,6 +220,8 @@ Claude MPM provides a modular framework for extending Claude's capabilities:
220
220
  - **File System Protection**: Automatic sandboxing prevents file operations outside the working directory
221
221
  - **Path Traversal Prevention**: Blocks attempts to escape the project directory using `..` or symlinks
222
222
  - **Write Operation Control**: All write operations are validated while read operations remain unrestricted
223
+ - **Agent-Level Restrictions**: Each agent can have custom file access boundaries via `file_access` configuration
224
+ - **PM Agent Orchestration**: New PM (Project Manager) agent ensures all sub-agents operate within security boundaries
223
225
  - **Transparent Security**: Zero-configuration security that works automatically in the background
224
226
  - **Comprehensive Logging**: All security events are logged for audit purposes
225
227
 
@@ -1,7 +1,6 @@
1
- claude_mpm/__init__.py,sha256=sAbTZkHe3vWYAKDWdGyGVue5zwLD7nCOHZwZrLALM8A,395
1
+ claude_mpm/__init__.py,sha256=TRneXzyApGyF7cRerpTn7aCbYLu-AJhPHjQf-dMBElQ,666
2
2
  claude_mpm/__main__.py,sha256=smBw-5J3nf5s6GgQjj384GUr28YotIX-WNOxqpP0wnE,310
3
- claude_mpm/_version.py,sha256=4w4S1hWcUnRsA3wylRad272Nvb5pcv7L8gpn7UBpYN8,159
4
- claude_mpm/cli.py,sha256=_6tUSY0FqBN6cHR6awuXgqm2-Hlh-L508rB-RDgflPU,26036
3
+ claude_mpm/cli.py,sha256=5GeRclnY-VNDaGnv7XUXDOniHZfACJ8HUYVUUXEHh9g,26340
5
4
  claude_mpm/cli_enhancements.py,sha256=nwdOrbXITRqvcq_vrJtPKW1GDS7dLIG4UqjoUet2vR0,10890
6
5
  claude_mpm/cli_main.py,sha256=KCAe-ws73NrIg5qmFhPdZ1a4uoiaEZ-lldYzQ6KfnJg,306
7
6
  claude_mpm/constants.py,sha256=5AG5hgBxOC7gMNHDx0lAhS-FQ8gXhtGtqJ9Moj3S6ro,4044
@@ -10,7 +9,7 @@ claude_mpm/agents/BASE_AGENT_TEMPLATE.md,sha256=TYgSd9jNBMWp4mAOBUl9dconX4RcGbvm
10
9
  claude_mpm/agents/INSTRUCTIONS.md,sha256=X6bOhSII3pZVZh_Vw_zMYRdfLtyl5Mmf_jhrVYfNxFs,7200
11
10
  claude_mpm/agents/__init__.py,sha256=r-p7ervzjLPD7_8dm2tXX_fwvdTZy6KwKA03ofxN3sA,3275
12
11
  claude_mpm/agents/agent-template.yaml,sha256=koKJn8MCAJx0QNQMHouvIZrwvw5qjPV0U-aV-YVyk6s,2036
13
- claude_mpm/agents/agent_loader.py,sha256=UZf3Od23gSKe_k-EokatQdQKqS38lRQq_ifUYU5I0qU,44024
12
+ claude_mpm/agents/agent_loader.py,sha256=P4h3qupJHvZL9dfb6ntB582xenYv9JbkMOVav_kNkAo,44030
14
13
  claude_mpm/agents/agent_loader_integration.py,sha256=z_DXxAIeuUD71HBYdxxvcFKoQYQxITLo8oAdN_M4LTA,7610
15
14
  claude_mpm/agents/agents_metadata.py,sha256=Xju9Yim6XSv2u1J_Swre5VJySbdxxC-9TzpOfXG8ibg,5170
16
15
  claude_mpm/agents/base_agent.json,sha256=wvopTu58PEdvgvjBi45J5aBA6bxs5_v1KC84CbJeRzY,3820
@@ -96,8 +95,13 @@ claude_mpm/orchestration/archive/subprocess_orchestrator.py,sha256=TYTAHX6p4OpgB
96
95
  claude_mpm/orchestration/archive/system_prompt_orchestrator.py,sha256=R16sc-94kQVeGjJzTYmvKn0aYgj_9qxyzShDy1E5zpE,12853
97
96
  claude_mpm/orchestration/archive/wrapper_orchestrator.py,sha256=cvL0NJf9kCWf3QJl67ySwvtR1Hd9Rym28Ii8Rtsdi6Q,6806
98
97
  claude_mpm/schemas/README_SECURITY.md,sha256=6BiFZ9VuMllijQkYEx_lAT7wwXhMeO3FQtih7WMvsAI,3297
99
- claude_mpm/schemas/agent_schema.json,sha256=vEp9dFCqUmxLzkNLmB34uvCmGemNv1DiVYNcUXBDONc,15963
98
+ claude_mpm/schemas/agent_schema.json,sha256=PukBzcAs8SzLHU6tYn8hjdYcnH8xrdvWD9hwvcasy0U,12385
99
+ claude_mpm/schemas/agent_schema_documentation.md,sha256=pQTxXNhxDqDK_0mMohszPOx3NOBKz-YOJPAU5h_iSQg,6405
100
100
  claude_mpm/schemas/agent_schema_security_notes.md,sha256=gzx6Z4xL5x0QISiXVDcwRGNnfegRgVK7jx7jAcc5910,6449
101
+ claude_mpm/schemas/ticket_workflow_documentation.md,sha256=F51WiIkJg2Z9BLA7FqwkAPy6YOXKYF4--E-NAYRx4yM,12784
102
+ claude_mpm/schemas/ticket_workflow_schema.json,sha256=AVedOeNfOdJdERaxEz4ZJj4aVZaHzfungg3C5Y_02qg,19112
103
+ claude_mpm/schemas/workflow_validator.py,sha256=qRgGodJoIZQaLfZ8OzWz3Y9eVNz3ckrQwkJ2RvccxAs,17175
104
+ claude_mpm/schemas/examples/standard_workflow.json,sha256=POQdxPIoJRD2qe4-17a35pGqFX5hfmGttXWPfeZ8qRs,13835
101
105
  claude_mpm/scripts/__init__.py,sha256=M2n9fQeyfILC8gogXvJv6ixnu7hwpqLEqLWJRaUN0MU,37
102
106
  claude_mpm/scripts/ticket.py,sha256=GmFimtTJxc927cCzJvvJH3gvoxXQtAB-W-xnuclcvNs,9350
103
107
  claude_mpm/services/__init__.py,sha256=-EBm07Lh9mjcofiQHCqyCCQJMLi9akVArPlz8i_kEOo,226
@@ -167,9 +171,9 @@ claude_mpm/utils/path_operations.py,sha256=6pLMnAWBVzHkgp6JyQHmHbGD-dWn-nX21yV4E
167
171
  claude_mpm/utils/paths.py,sha256=Xv0SZWdZRkRjN9e6clBcA165ya00GNQxt7SwMz51tfA,10153
168
172
  claude_mpm/validation/__init__.py,sha256=bJ19g9lnk7yIjtxzN8XPegp87HTFBzCrGQOpFgRTf3g,155
169
173
  claude_mpm/validation/agent_validator.py,sha256=GCA2b2rKhKDeaNyUqWxTiWIs3sDdWjD9cgOFRp9K6ic,18227
170
- claude_mpm-3.0.1.dist-info/licenses/LICENSE,sha256=cSdDfXjoTVhstrERrqme4zgxAu4GubU22zVEHsiXGxs,1071
171
- claude_mpm-3.0.1.dist-info/METADATA,sha256=O4UprzdFs-f48tg4SYr4fTLe3ROPbhEd-q3kgXS0X20,13906
172
- claude_mpm-3.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
173
- claude_mpm-3.0.1.dist-info/entry_points.txt,sha256=PknO31um7d8bt6GjOiVeYpdJpjND0_C1z-LQfY6UfiU,142
174
- claude_mpm-3.0.1.dist-info/top_level.txt,sha256=1nUg3FEaBySgm8t-s54jK5zoPnu3_eY6EP6IOlekyHA,11
175
- claude_mpm-3.0.1.dist-info/RECORD,,
174
+ claude_mpm-3.1.0.dist-info/licenses/LICENSE,sha256=cSdDfXjoTVhstrERrqme4zgxAu4GubU22zVEHsiXGxs,1071
175
+ claude_mpm-3.1.0.dist-info/METADATA,sha256=jtA8pwlqe3dPFBhH4xQwzsULplyAeqBY8RLsUia0zUg,14139
176
+ claude_mpm-3.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
177
+ claude_mpm-3.1.0.dist-info/entry_points.txt,sha256=PknO31um7d8bt6GjOiVeYpdJpjND0_C1z-LQfY6UfiU,142
178
+ claude_mpm-3.1.0.dist-info/top_level.txt,sha256=1nUg3FEaBySgm8t-s54jK5zoPnu3_eY6EP6IOlekyHA,11
179
+ claude_mpm-3.1.0.dist-info/RECORD,,
claude_mpm/_version.py DELETED
@@ -1,4 +0,0 @@
1
- # file generated by setuptools_scm
2
- # don't change, don't track in version control
3
- __version__ = version = '3.0.1'
4
- __version_tuple__ = version_tuple = (3, 0, 1)