hatch-xclam 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. hatch/__init__.py +21 -0
  2. hatch/cli_hatch.py +2748 -0
  3. hatch/environment_manager.py +1375 -0
  4. hatch/installers/__init__.py +25 -0
  5. hatch/installers/dependency_installation_orchestrator.py +636 -0
  6. hatch/installers/docker_installer.py +545 -0
  7. hatch/installers/hatch_installer.py +198 -0
  8. hatch/installers/installation_context.py +109 -0
  9. hatch/installers/installer_base.py +195 -0
  10. hatch/installers/python_installer.py +342 -0
  11. hatch/installers/registry.py +179 -0
  12. hatch/installers/system_installer.py +588 -0
  13. hatch/mcp_host_config/__init__.py +38 -0
  14. hatch/mcp_host_config/backup.py +458 -0
  15. hatch/mcp_host_config/host_management.py +572 -0
  16. hatch/mcp_host_config/models.py +602 -0
  17. hatch/mcp_host_config/reporting.py +181 -0
  18. hatch/mcp_host_config/strategies.py +513 -0
  19. hatch/package_loader.py +263 -0
  20. hatch/python_environment_manager.py +734 -0
  21. hatch/registry_explorer.py +171 -0
  22. hatch/registry_retriever.py +335 -0
  23. hatch/template_generator.py +179 -0
  24. hatch_xclam-0.7.0.dist-info/METADATA +150 -0
  25. hatch_xclam-0.7.0.dist-info/RECORD +93 -0
  26. hatch_xclam-0.7.0.dist-info/WHEEL +5 -0
  27. hatch_xclam-0.7.0.dist-info/entry_points.txt +2 -0
  28. hatch_xclam-0.7.0.dist-info/licenses/LICENSE +661 -0
  29. hatch_xclam-0.7.0.dist-info/top_level.txt +2 -0
  30. tests/__init__.py +1 -0
  31. tests/run_environment_tests.py +124 -0
  32. tests/test_cli_version.py +122 -0
  33. tests/test_data/packages/basic/base_pkg/hatch_mcp_server.py +18 -0
  34. tests/test_data/packages/basic/base_pkg/mcp_server.py +21 -0
  35. tests/test_data/packages/basic/base_pkg_v2/hatch_mcp_server.py +18 -0
  36. tests/test_data/packages/basic/base_pkg_v2/mcp_server.py +21 -0
  37. tests/test_data/packages/basic/utility_pkg/hatch_mcp_server.py +18 -0
  38. tests/test_data/packages/basic/utility_pkg/mcp_server.py +21 -0
  39. tests/test_data/packages/dependencies/complex_dep_pkg/hatch_mcp_server.py +18 -0
  40. tests/test_data/packages/dependencies/complex_dep_pkg/mcp_server.py +21 -0
  41. tests/test_data/packages/dependencies/docker_dep_pkg/hatch_mcp_server.py +18 -0
  42. tests/test_data/packages/dependencies/docker_dep_pkg/mcp_server.py +21 -0
  43. tests/test_data/packages/dependencies/mixed_dep_pkg/hatch_mcp_server.py +18 -0
  44. tests/test_data/packages/dependencies/mixed_dep_pkg/mcp_server.py +21 -0
  45. tests/test_data/packages/dependencies/python_dep_pkg/hatch_mcp_server.py +18 -0
  46. tests/test_data/packages/dependencies/python_dep_pkg/mcp_server.py +21 -0
  47. tests/test_data/packages/dependencies/simple_dep_pkg/hatch_mcp_server.py +18 -0
  48. tests/test_data/packages/dependencies/simple_dep_pkg/mcp_server.py +21 -0
  49. tests/test_data/packages/dependencies/system_dep_pkg/hatch_mcp_server.py +18 -0
  50. tests/test_data/packages/dependencies/system_dep_pkg/mcp_server.py +21 -0
  51. tests/test_data/packages/error_scenarios/circular_dep_pkg/hatch_mcp_server.py +18 -0
  52. tests/test_data/packages/error_scenarios/circular_dep_pkg/mcp_server.py +21 -0
  53. tests/test_data/packages/error_scenarios/circular_dep_pkg_b/hatch_mcp_server.py +18 -0
  54. tests/test_data/packages/error_scenarios/circular_dep_pkg_b/mcp_server.py +21 -0
  55. tests/test_data/packages/error_scenarios/invalid_dep_pkg/hatch_mcp_server.py +18 -0
  56. tests/test_data/packages/error_scenarios/invalid_dep_pkg/mcp_server.py +21 -0
  57. tests/test_data/packages/error_scenarios/version_conflict_pkg/hatch_mcp_server.py +18 -0
  58. tests/test_data/packages/error_scenarios/version_conflict_pkg/mcp_server.py +21 -0
  59. tests/test_data/packages/schema_versions/schema_v1_1_0_pkg/main.py +11 -0
  60. tests/test_data/packages/schema_versions/schema_v1_2_0_pkg/main.py +11 -0
  61. tests/test_data/packages/schema_versions/schema_v1_2_1_pkg/hatch_mcp_server.py +18 -0
  62. tests/test_data/packages/schema_versions/schema_v1_2_1_pkg/mcp_server.py +21 -0
  63. tests/test_data_utils.py +472 -0
  64. tests/test_dependency_orchestrator_consent.py +266 -0
  65. tests/test_docker_installer.py +524 -0
  66. tests/test_env_manip.py +991 -0
  67. tests/test_hatch_installer.py +179 -0
  68. tests/test_installer_base.py +221 -0
  69. tests/test_mcp_atomic_operations.py +276 -0
  70. tests/test_mcp_backup_integration.py +308 -0
  71. tests/test_mcp_cli_all_host_specific_args.py +303 -0
  72. tests/test_mcp_cli_backup_management.py +295 -0
  73. tests/test_mcp_cli_direct_management.py +453 -0
  74. tests/test_mcp_cli_discovery_listing.py +582 -0
  75. tests/test_mcp_cli_host_config_integration.py +823 -0
  76. tests/test_mcp_cli_package_management.py +360 -0
  77. tests/test_mcp_cli_partial_updates.py +859 -0
  78. tests/test_mcp_environment_integration.py +520 -0
  79. tests/test_mcp_host_config_backup.py +257 -0
  80. tests/test_mcp_host_configuration_manager.py +331 -0
  81. tests/test_mcp_host_registry_decorator.py +348 -0
  82. tests/test_mcp_pydantic_architecture_v4.py +603 -0
  83. tests/test_mcp_server_config_models.py +242 -0
  84. tests/test_mcp_server_config_type_field.py +221 -0
  85. tests/test_mcp_sync_functionality.py +316 -0
  86. tests/test_mcp_user_feedback_reporting.py +359 -0
  87. tests/test_non_tty_integration.py +281 -0
  88. tests/test_online_package_loader.py +202 -0
  89. tests/test_python_environment_manager.py +882 -0
  90. tests/test_python_installer.py +327 -0
  91. tests/test_registry.py +51 -0
  92. tests/test_registry_retriever.py +250 -0
  93. tests/test_system_installer.py +733 -0
@@ -0,0 +1,823 @@
1
+ """
2
+ Test suite for MCP CLI host configuration integration.
3
+
4
+ This module tests the integration of the Pydantic model hierarchy (Phase 3B)
5
+ and user feedback reporting system (Phase 3C) into Hatch's CLI commands.
6
+
7
+ Tests focus on CLI-specific integration logic while leveraging existing test
8
+ infrastructure from Phases 3A-3C.
9
+ """
10
+
11
+ import unittest
12
+ import sys
13
+ from pathlib import Path
14
+ from unittest.mock import patch, MagicMock, call, ANY
15
+
16
+ # Add the parent directory to the path to import wobble
17
+ sys.path.insert(0, str(Path(__file__).parent.parent))
18
+
19
+ try:
20
+ from wobble.decorators import regression_test, integration_test
21
+ except ImportError:
22
+ # Fallback decorators if wobble is not available
23
+ def regression_test(func):
24
+ return func
25
+
26
+ def integration_test(scope="component"):
27
+ def decorator(func):
28
+ return func
29
+ return decorator
30
+
31
+ from hatch.cli_hatch import (
32
+ handle_mcp_configure,
33
+ parse_env_vars,
34
+ parse_header,
35
+ parse_host_list,
36
+ )
37
+ from hatch.mcp_host_config.models import (
38
+ MCPServerConfig,
39
+ MCPServerConfigOmni,
40
+ HOST_MODEL_REGISTRY,
41
+ MCPHostType,
42
+ MCPServerConfigGemini,
43
+ MCPServerConfigVSCode,
44
+ MCPServerConfigCursor,
45
+ MCPServerConfigClaude,
46
+ )
47
+ from hatch.mcp_host_config.reporting import (
48
+ generate_conversion_report,
49
+ display_report,
50
+ FieldOperation,
51
+ ConversionReport,
52
+ )
53
+
54
+
55
+ class TestCLIArgumentParsingToOmniCreation(unittest.TestCase):
56
+ """Test suite for CLI argument parsing to MCPServerConfigOmni creation."""
57
+
58
+ @regression_test
59
+ def test_configure_creates_omni_model_basic(self):
60
+ """Test that configure command creates MCPServerConfigOmni from CLI arguments."""
61
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
62
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
63
+ # Call handle_mcp_configure with basic arguments
64
+ result = handle_mcp_configure(
65
+ host='claude-desktop',
66
+ server_name='test-server',
67
+ command='python',
68
+ args=['server.py'],
69
+ env=None,
70
+ url=None,
71
+ header=None,
72
+ no_backup=True,
73
+ dry_run=False,
74
+ auto_approve=False
75
+ )
76
+
77
+ # Verify the function executed without errors
78
+ self.assertEqual(result, 0)
79
+
80
+ @regression_test
81
+ def test_configure_creates_omni_with_env_vars(self):
82
+ """Test that environment variables are parsed correctly into Omni model."""
83
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
84
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
85
+ # Call with environment variables
86
+ result = handle_mcp_configure(
87
+ host='claude-desktop',
88
+ server_name='test-server',
89
+ command='python',
90
+ args=['server.py'],
91
+ env=['API_KEY=secret', 'DEBUG=true'],
92
+ url=None,
93
+ header=None,
94
+ no_backup=True,
95
+ dry_run=False,
96
+ auto_approve=False
97
+ )
98
+
99
+ # Verify the function executed without errors
100
+ self.assertEqual(result, 0)
101
+
102
+ @regression_test
103
+ def test_configure_creates_omni_with_headers(self):
104
+ """Test that headers are parsed correctly into Omni model."""
105
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
106
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
107
+ result = handle_mcp_configure(
108
+ host='gemini', # Use gemini which supports remote servers
109
+ server_name='test-server',
110
+ command=None,
111
+ args=None,
112
+ env=None,
113
+ url='https://api.example.com',
114
+ header=['Authorization=Bearer token', 'Content-Type=application/json'],
115
+ no_backup=True,
116
+ dry_run=False,
117
+ auto_approve=False
118
+ )
119
+
120
+ # Verify the function executed without errors (bug fixed in Phase 4)
121
+ self.assertEqual(result, 0)
122
+
123
+ @regression_test
124
+ def test_configure_creates_omni_remote_server(self):
125
+ """Test that remote server arguments create correct Omni model."""
126
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
127
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
128
+ result = handle_mcp_configure(
129
+ host='gemini', # Use gemini which supports remote servers
130
+ server_name='remote-server',
131
+ command=None,
132
+ args=None,
133
+ env=None,
134
+ url='https://api.example.com',
135
+ header=['Auth=token'],
136
+ no_backup=True,
137
+ dry_run=False,
138
+ auto_approve=False
139
+ )
140
+
141
+ # Verify the function executed without errors (bug fixed in Phase 4)
142
+ self.assertEqual(result, 0)
143
+
144
+ @regression_test
145
+ def test_configure_omni_with_all_universal_fields(self):
146
+ """Test that all universal fields are supported in Omni creation."""
147
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
148
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
149
+ # Call with all universal fields
150
+ result = handle_mcp_configure(
151
+ host='claude-desktop',
152
+ server_name='full-server',
153
+ command='python',
154
+ args=['server.py', '--port', '8080'],
155
+ env=['API_KEY=secret', 'DEBUG=true', 'LOG_LEVEL=info'],
156
+ url=None,
157
+ header=None,
158
+ no_backup=True,
159
+ dry_run=False,
160
+ auto_approve=False
161
+ )
162
+
163
+ # Verify the function executed without errors
164
+ self.assertEqual(result, 0)
165
+
166
+ @regression_test
167
+ def test_configure_omni_with_optional_fields_none(self):
168
+ """Test that optional fields are handled correctly (None values)."""
169
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
170
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
171
+ # Call with only required fields
172
+ result = handle_mcp_configure(
173
+ host='claude-desktop',
174
+ server_name='minimal-server',
175
+ command='python',
176
+ args=['server.py'],
177
+ env=None,
178
+ url=None,
179
+ header=None,
180
+ no_backup=True,
181
+ dry_run=False,
182
+ auto_approve=False
183
+ )
184
+
185
+ # Verify the function executed without errors
186
+ self.assertEqual(result, 0)
187
+
188
+
189
+ class TestModelIntegration(unittest.TestCase):
190
+ """Test suite for model integration in CLI handlers."""
191
+
192
+ @regression_test
193
+ def test_configure_uses_host_model_registry(self):
194
+ """Test that configure command uses HOST_MODEL_REGISTRY for host selection."""
195
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
196
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
197
+ # Test with Gemini host
198
+ result = handle_mcp_configure(
199
+ host='gemini',
200
+ server_name='test-server',
201
+ command='python',
202
+ args=['server.py'],
203
+ env=None,
204
+ url=None,
205
+ header=None,
206
+ no_backup=True,
207
+ dry_run=False,
208
+ auto_approve=False
209
+ )
210
+
211
+ # Verify the function executed without errors
212
+ self.assertEqual(result, 0)
213
+
214
+ @regression_test
215
+ def test_configure_calls_from_omni_conversion(self):
216
+ """Test that from_omni() is called to convert Omni to host-specific model."""
217
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
218
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
219
+ # Call configure command
220
+ result = handle_mcp_configure(
221
+ host='claude-desktop',
222
+ server_name='test-server',
223
+ command='python',
224
+ args=['server.py'],
225
+ env=None,
226
+ url=None,
227
+ header=None,
228
+ no_backup=True,
229
+ dry_run=False,
230
+ auto_approve=False
231
+ )
232
+
233
+ # Verify the function executed without errors
234
+ self.assertEqual(result, 0)
235
+
236
+ @integration_test(scope="component")
237
+ def test_configure_passes_host_specific_model_to_manager(self):
238
+ """Test that host-specific model is passed to MCPHostConfigurationManager."""
239
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager_class:
240
+ mock_manager = MagicMock()
241
+ mock_manager_class.return_value = mock_manager
242
+ mock_manager.configure_server.return_value = MagicMock(success=True, backup_path=None)
243
+
244
+ with patch('hatch.cli_hatch.request_confirmation', return_value=True):
245
+ # Call configure command
246
+ result = handle_mcp_configure(
247
+ host='claude-desktop',
248
+ server_name='test-server',
249
+ command='python',
250
+ args=['server.py'],
251
+ env=None,
252
+ url=None,
253
+ header=None,
254
+ no_backup=True,
255
+ dry_run=False,
256
+ auto_approve=False
257
+ )
258
+
259
+ # Verify configure_server was called
260
+ self.assertEqual(result, 0)
261
+ mock_manager.configure_server.assert_called_once()
262
+
263
+ # Verify the server_config argument is a host-specific model instance
264
+ # (MCPServerConfigClaude for claude-desktop host)
265
+ call_args = mock_manager.configure_server.call_args
266
+ server_config = call_args.kwargs['server_config']
267
+ self.assertIsInstance(server_config, MCPServerConfigClaude)
268
+
269
+
270
+ class TestReportingIntegration(unittest.TestCase):
271
+ """Test suite for reporting integration in CLI commands."""
272
+
273
+ @regression_test
274
+ def test_configure_dry_run_displays_report_only(self):
275
+ """Test that dry-run mode displays report without configuration."""
276
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
277
+ # Call with dry-run
278
+ result = handle_mcp_configure(
279
+ host='claude-desktop',
280
+ server_name='test-server',
281
+ command='python',
282
+ args=['server.py'],
283
+ env=None,
284
+ url=None,
285
+ header=None,
286
+ no_backup=True,
287
+ dry_run=True,
288
+ auto_approve=False
289
+ )
290
+
291
+ # Verify the function executed without errors
292
+ self.assertEqual(result, 0)
293
+
294
+ # Verify MCPHostConfigurationManager.create_server was NOT called (dry-run doesn't persist)
295
+ # Note: get_server_config is called to check if server exists, but create_server is not called
296
+ mock_manager.return_value.create_server.assert_not_called()
297
+
298
+
299
+ class TestHostSpecificArguments(unittest.TestCase):
300
+ """Test suite for host-specific CLI arguments (Phase 3 - Mandatory)."""
301
+
302
+ @regression_test
303
+ def test_configure_accepts_all_universal_fields(self):
304
+ """Test that all universal fields are accepted by CLI."""
305
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
306
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
307
+ # Call with all universal fields
308
+ result = handle_mcp_configure(
309
+ host='claude-desktop',
310
+ server_name='test-server',
311
+ command='python',
312
+ args=['server.py', '--port', '8080'],
313
+ env=['API_KEY=secret', 'DEBUG=true'],
314
+ url=None,
315
+ header=None,
316
+ no_backup=True,
317
+ dry_run=False,
318
+ auto_approve=False
319
+ )
320
+
321
+ # Verify success
322
+ self.assertEqual(result, 0)
323
+
324
+ @regression_test
325
+ def test_configure_multiple_env_vars(self):
326
+ """Test that multiple environment variables are handled correctly."""
327
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
328
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
329
+ # Call with multiple env vars
330
+ result = handle_mcp_configure(
331
+ host='gemini',
332
+ server_name='test-server',
333
+ command='python',
334
+ args=['server.py'],
335
+ env=['VAR1=value1', 'VAR2=value2', 'VAR3=value3'],
336
+ url=None,
337
+ header=None,
338
+ no_backup=True,
339
+ dry_run=False,
340
+ auto_approve=False
341
+ )
342
+
343
+ # Verify success
344
+ self.assertEqual(result, 0)
345
+
346
+ @regression_test
347
+ def test_configure_different_hosts(self):
348
+ """Test that different host types are handled correctly."""
349
+ hosts_to_test = ['claude-desktop', 'cursor', 'vscode', 'gemini']
350
+
351
+ for host in hosts_to_test:
352
+ with self.subTest(host=host):
353
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
354
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
355
+ result = handle_mcp_configure(
356
+ host=host,
357
+ server_name='test-server',
358
+ command='python',
359
+ args=['server.py'],
360
+ env=None,
361
+ url=None,
362
+ header=None,
363
+ no_backup=True,
364
+ dry_run=False,
365
+ auto_approve=False
366
+ )
367
+
368
+ # Verify success for each host
369
+ self.assertEqual(result, 0)
370
+
371
+
372
+ class TestErrorHandling(unittest.TestCase):
373
+ """Test suite for error handling in CLI commands."""
374
+
375
+ @regression_test
376
+ def test_configure_invalid_host_type_error(self):
377
+ """Test that clear error is shown for invalid host type."""
378
+ # Call with invalid host
379
+ result = handle_mcp_configure(
380
+ host='invalid-host',
381
+ server_name='test-server',
382
+ command='python',
383
+ args=['server.py'],
384
+ env=None,
385
+ url=None,
386
+ header=None,
387
+ no_backup=True,
388
+ dry_run=False,
389
+ auto_approve=False
390
+ )
391
+
392
+ # Verify error return code
393
+ self.assertEqual(result, 1)
394
+
395
+ @regression_test
396
+ def test_configure_invalid_field_value_error(self):
397
+ """Test that clear error is shown for invalid field values."""
398
+ # Test with invalid URL format - this will be caught by Pydantic validation
399
+ # when creating MCPServerConfig
400
+ result = handle_mcp_configure(
401
+ host='claude-desktop',
402
+ server_name='test-server',
403
+ command=None,
404
+ args=None, # Must be None for remote server
405
+ env=None,
406
+ url='not-a-url', # Invalid URL format
407
+ header=None,
408
+ no_backup=True,
409
+ dry_run=False,
410
+ auto_approve=False
411
+ )
412
+
413
+ # Verify error return code (validation error caught in exception handler)
414
+ self.assertEqual(result, 1)
415
+
416
+ @regression_test
417
+ def test_configure_pydantic_validation_error_handling(self):
418
+ """Test that Pydantic ValidationErrors are caught and handled."""
419
+ # Test with conflicting arguments (command with headers)
420
+ result = handle_mcp_configure(
421
+ host='claude-desktop',
422
+ server_name='test-server',
423
+ command='python',
424
+ args=['server.py'],
425
+ env=None,
426
+ url=None,
427
+ header=['Auth=token'], # Headers not allowed with command
428
+ no_backup=True,
429
+ dry_run=False,
430
+ auto_approve=False
431
+ )
432
+
433
+ # Verify error return code (caught by validation in handle_mcp_configure)
434
+ self.assertEqual(result, 1)
435
+
436
+ @regression_test
437
+ def test_configure_missing_command_url_error(self):
438
+ """Test error handling when neither command nor URL provided."""
439
+ # This test verifies the argparse validation (required=True for mutually exclusive group)
440
+ # In actual CLI usage, argparse would catch this before handle_mcp_configure is called
441
+ # For unit testing, we test that the function handles None values appropriately
442
+ result = handle_mcp_configure(
443
+ host='claude-desktop',
444
+ server_name='test-server',
445
+ command=None,
446
+ args=None,
447
+ env=None,
448
+ url=None,
449
+ header=None,
450
+ no_backup=True,
451
+ dry_run=False,
452
+ auto_approve=False
453
+ )
454
+
455
+ # Verify error return code (validation error)
456
+ self.assertEqual(result, 1)
457
+
458
+
459
+ class TestBackwardCompatibility(unittest.TestCase):
460
+ """Test suite for backward compatibility."""
461
+
462
+ @regression_test
463
+ def test_existing_configure_command_still_works(self):
464
+ """Test that existing configure command usage still works."""
465
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager_class:
466
+ mock_manager = MagicMock()
467
+ mock_manager_class.return_value = mock_manager
468
+ mock_manager.configure_server.return_value = MagicMock(success=True, backup_path=None)
469
+
470
+ with patch('hatch.cli_hatch.request_confirmation', return_value=True):
471
+ # Call with existing command pattern
472
+ result = handle_mcp_configure(
473
+ host='claude-desktop',
474
+ server_name='my-server',
475
+ command='python',
476
+ args=['-m', 'my_package.server'],
477
+ env=['API_KEY=secret'],
478
+ url=None,
479
+ header=None,
480
+ no_backup=False,
481
+ dry_run=False,
482
+ auto_approve=False
483
+ )
484
+
485
+ # Verify success
486
+ self.assertEqual(result, 0)
487
+ mock_manager.configure_server.assert_called_once()
488
+
489
+
490
+ class TestParseUtilities(unittest.TestCase):
491
+ """Test suite for CLI parsing utilities."""
492
+
493
+ @regression_test
494
+ def test_parse_env_vars_basic(self):
495
+ """Test parsing environment variables from KEY=VALUE format."""
496
+ env_list = ['API_KEY=secret', 'DEBUG=true']
497
+ result = parse_env_vars(env_list)
498
+
499
+ expected = {'API_KEY': 'secret', 'DEBUG': 'true'}
500
+ self.assertEqual(result, expected)
501
+
502
+ @regression_test
503
+ def test_parse_env_vars_empty(self):
504
+ """Test parsing empty environment variables list."""
505
+ result = parse_env_vars(None)
506
+ self.assertEqual(result, {})
507
+
508
+ result = parse_env_vars([])
509
+ self.assertEqual(result, {})
510
+
511
+ @regression_test
512
+ def test_parse_header_basic(self):
513
+ """Test parsing headers from KEY=VALUE format."""
514
+ headers_list = ['Authorization=Bearer token', 'Content-Type=application/json']
515
+ result = parse_header(headers_list)
516
+
517
+ expected = {'Authorization': 'Bearer token', 'Content-Type': 'application/json'}
518
+ self.assertEqual(result, expected)
519
+
520
+ @regression_test
521
+ def test_parse_header_empty(self):
522
+ """Test parsing empty headers list."""
523
+ result = parse_header(None)
524
+ self.assertEqual(result, {})
525
+
526
+ result = parse_header([])
527
+ self.assertEqual(result, {})
528
+
529
+
530
+ class TestCLIIntegrationReadiness(unittest.TestCase):
531
+ """Test suite to verify readiness for Phase 4 CLI integration implementation."""
532
+
533
+ @regression_test
534
+ def test_host_model_registry_available(self):
535
+ """Test that HOST_MODEL_REGISTRY is available for CLI integration."""
536
+ from hatch.mcp_host_config.models import HOST_MODEL_REGISTRY, MCPHostType
537
+
538
+ # Verify registry contains all expected hosts
539
+ expected_hosts = [
540
+ MCPHostType.GEMINI,
541
+ MCPHostType.CLAUDE_DESKTOP,
542
+ MCPHostType.CLAUDE_CODE,
543
+ MCPHostType.VSCODE,
544
+ MCPHostType.CURSOR,
545
+ MCPHostType.LMSTUDIO,
546
+ ]
547
+
548
+ for host in expected_hosts:
549
+ self.assertIn(host, HOST_MODEL_REGISTRY)
550
+
551
+ @regression_test
552
+ def test_omni_model_available(self):
553
+ """Test that MCPServerConfigOmni is available for CLI integration."""
554
+ from hatch.mcp_host_config.models import MCPServerConfigOmni
555
+
556
+ # Create a basic Omni model
557
+ omni = MCPServerConfigOmni(
558
+ name='test-server',
559
+ command='python',
560
+ args=['server.py'],
561
+ env={'API_KEY': 'secret'},
562
+ )
563
+
564
+ # Verify model was created successfully
565
+ self.assertEqual(omni.name, 'test-server')
566
+ self.assertEqual(omni.command, 'python')
567
+ self.assertEqual(omni.args, ['server.py'])
568
+ self.assertEqual(omni.env, {'API_KEY': 'secret'})
569
+
570
+ @regression_test
571
+ def test_from_omni_conversion_available(self):
572
+ """Test that from_omni() conversion is available for all host models."""
573
+ from hatch.mcp_host_config.models import (
574
+ MCPServerConfigOmni,
575
+ MCPServerConfigGemini,
576
+ MCPServerConfigClaude,
577
+ MCPServerConfigVSCode,
578
+ MCPServerConfigCursor,
579
+ )
580
+
581
+ # Create Omni model
582
+ omni = MCPServerConfigOmni(
583
+ name='test-server',
584
+ command='python',
585
+ args=['server.py'],
586
+ )
587
+
588
+ # Test conversion to each host-specific model
589
+ gemini = MCPServerConfigGemini.from_omni(omni)
590
+ self.assertEqual(gemini.name, 'test-server')
591
+
592
+ claude = MCPServerConfigClaude.from_omni(omni)
593
+ self.assertEqual(claude.name, 'test-server')
594
+
595
+ vscode = MCPServerConfigVSCode.from_omni(omni)
596
+ self.assertEqual(vscode.name, 'test-server')
597
+
598
+ cursor = MCPServerConfigCursor.from_omni(omni)
599
+ self.assertEqual(cursor.name, 'test-server')
600
+
601
+ @regression_test
602
+ def test_reporting_functions_available(self):
603
+ """Test that reporting functions are available for CLI integration."""
604
+ from hatch.mcp_host_config.reporting import (
605
+ generate_conversion_report,
606
+ display_report,
607
+ )
608
+ from hatch.mcp_host_config.models import MCPServerConfigOmni, MCPHostType
609
+
610
+ # Create Omni model
611
+ omni = MCPServerConfigOmni(
612
+ name='test-server',
613
+ command='python',
614
+ args=['server.py'],
615
+ )
616
+
617
+ # Generate report
618
+ report = generate_conversion_report(
619
+ operation='create',
620
+ server_name='test-server',
621
+ target_host=MCPHostType.CLAUDE_DESKTOP,
622
+ omni=omni,
623
+ dry_run=True
624
+ )
625
+
626
+ # Verify report was created
627
+ self.assertIsNotNone(report)
628
+ self.assertEqual(report.operation, 'create')
629
+
630
+ @regression_test
631
+ def test_claude_desktop_rejects_url_configuration(self):
632
+ """Test Claude Desktop rejects remote server (--url) configurations (Issue 2)."""
633
+ with patch('hatch.cli_hatch.print') as mock_print:
634
+ result = handle_mcp_configure(
635
+ host='claude-desktop',
636
+ server_name='remote-server',
637
+ command=None,
638
+ args=None,
639
+ env=None,
640
+ url='http://localhost:8080', # Should be rejected
641
+ header=None,
642
+ no_backup=True,
643
+ dry_run=False,
644
+ auto_approve=True
645
+ )
646
+
647
+ # Validate: Should return error code 1
648
+ self.assertEqual(result, 1)
649
+
650
+ # Validate: Error message displayed
651
+ error_calls = [call for call in mock_print.call_args_list
652
+ if 'Error' in str(call) or 'error' in str(call)]
653
+ self.assertTrue(len(error_calls) > 0, "Expected error message to be printed")
654
+
655
+ @regression_test
656
+ def test_claude_code_rejects_url_configuration(self):
657
+ """Test Claude Code (same family) also rejects remote servers (Issue 2)."""
658
+ with patch('hatch.cli_hatch.print') as mock_print:
659
+ result = handle_mcp_configure(
660
+ host='claude-code',
661
+ server_name='remote-server',
662
+ command=None,
663
+ args=None,
664
+ env=None,
665
+ url='http://localhost:8080',
666
+ header=None,
667
+ no_backup=True,
668
+ dry_run=False,
669
+ auto_approve=True
670
+ )
671
+
672
+ # Validate: Should return error code 1
673
+ self.assertEqual(result, 1)
674
+
675
+ # Validate: Error message displayed
676
+ error_calls = [call for call in mock_print.call_args_list
677
+ if 'Error' in str(call) or 'error' in str(call)]
678
+ self.assertTrue(len(error_calls) > 0, "Expected error message to be printed")
679
+
680
+ @regression_test
681
+ def test_args_quoted_string_splitting(self):
682
+ """Test that quoted strings in --args are properly split (Issue 4)."""
683
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
684
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
685
+ # Simulate user providing: --args "-r --name aName"
686
+ # This arrives as a single string element in the args list
687
+ result = handle_mcp_configure(
688
+ host='claude-desktop',
689
+ server_name='test-server',
690
+ command='python',
691
+ args=['-r --name aName'], # Single string with quoted content
692
+ env=None,
693
+ url=None,
694
+ header=None,
695
+ no_backup=True,
696
+ dry_run=False,
697
+ auto_approve=False
698
+ )
699
+
700
+ # Verify: Should succeed (return 0)
701
+ self.assertEqual(result, 0)
702
+
703
+ # Verify: MCPServerConfigOmni was created with split args
704
+ call_args = mock_manager.return_value.create_server.call_args
705
+ if call_args:
706
+ omni_config = call_args[1]['omni']
707
+ # Args should be split into 3 elements: ['-r', '--name', 'aName']
708
+ self.assertEqual(omni_config.args, ['-r', '--name', 'aName'])
709
+
710
+ @regression_test
711
+ def test_args_multiple_quoted_strings(self):
712
+ """Test multiple quoted strings in --args are all split correctly (Issue 4)."""
713
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
714
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
715
+ # Simulate: --args "-r" "--name aName"
716
+ result = handle_mcp_configure(
717
+ host='claude-desktop',
718
+ server_name='test-server',
719
+ command='python',
720
+ args=['-r', '--name aName'], # Two separate args
721
+ env=None,
722
+ url=None,
723
+ header=None,
724
+ no_backup=True,
725
+ dry_run=False,
726
+ auto_approve=False
727
+ )
728
+
729
+ # Verify: Should succeed
730
+ self.assertEqual(result, 0)
731
+
732
+ # Verify: All args are properly split
733
+ call_args = mock_manager.return_value.create_server.call_args
734
+ if call_args:
735
+ omni_config = call_args[1]['omni']
736
+ # Should be split into: ['-r', '--name', 'aName']
737
+ self.assertEqual(omni_config.args, ['-r', '--name', 'aName'])
738
+
739
+ @regression_test
740
+ def test_args_empty_string_handling(self):
741
+ """Test that empty strings in --args are filtered out (Issue 4)."""
742
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
743
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
744
+ # Simulate: --args "" "server.py"
745
+ result = handle_mcp_configure(
746
+ host='claude-desktop',
747
+ server_name='test-server',
748
+ command='python',
749
+ args=['', 'server.py'], # Empty string should be filtered
750
+ env=None,
751
+ url=None,
752
+ header=None,
753
+ no_backup=True,
754
+ dry_run=False,
755
+ auto_approve=False
756
+ )
757
+
758
+ # Verify: Should succeed
759
+ self.assertEqual(result, 0)
760
+
761
+ # Verify: Empty strings are filtered out
762
+ call_args = mock_manager.return_value.create_server.call_args
763
+ if call_args:
764
+ omni_config = call_args[1]['omni']
765
+ # Should only contain 'server.py'
766
+ self.assertEqual(omni_config.args, ['server.py'])
767
+
768
+ @regression_test
769
+ def test_args_invalid_quote_handling(self):
770
+ """Test that invalid quotes in --args are handled gracefully (Issue 4)."""
771
+ with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager:
772
+ with patch('hatch.cli_hatch.request_confirmation', return_value=False):
773
+ with patch('hatch.cli_hatch.print') as mock_print:
774
+ # Simulate: --args 'unclosed "quote'
775
+ result = handle_mcp_configure(
776
+ host='claude-desktop',
777
+ server_name='test-server',
778
+ command='python',
779
+ args=['unclosed "quote'], # Invalid quote
780
+ env=None,
781
+ url=None,
782
+ header=None,
783
+ no_backup=True,
784
+ dry_run=False,
785
+ auto_approve=False
786
+ )
787
+
788
+ # Verify: Should succeed (graceful fallback)
789
+ self.assertEqual(result, 0)
790
+
791
+ # Verify: Warning was printed
792
+ warning_calls = [call for call in mock_print.call_args_list
793
+ if 'Warning' in str(call)]
794
+ self.assertTrue(len(warning_calls) > 0, "Expected warning for invalid quote")
795
+
796
+ # Verify: Original arg is used as fallback
797
+ call_args = mock_manager.return_value.create_server.call_args
798
+ if call_args:
799
+ omni_config = call_args[1]['omni']
800
+ self.assertIn('unclosed "quote', omni_config.args)
801
+
802
+ @regression_test
803
+ def test_cli_handler_signature_compatible(self):
804
+ """Test that handle_mcp_configure signature is compatible with integration."""
805
+ import inspect
806
+ from hatch.cli_hatch import handle_mcp_configure
807
+
808
+ # Get function signature
809
+ sig = inspect.signature(handle_mcp_configure)
810
+
811
+ # Verify expected parameters exist
812
+ expected_params = [
813
+ 'host', 'server_name', 'command', 'args',
814
+ 'env', 'url', 'header', 'no_backup', 'dry_run', 'auto_approve'
815
+ ]
816
+
817
+ for param in expected_params:
818
+ self.assertIn(param, sig.parameters)
819
+
820
+
821
+ if __name__ == '__main__':
822
+ unittest.main()
823
+