diff --git a/docs/articles/users/CLIReference.md b/docs/articles/users/CLIReference.md index d45f6d6..d01f2b8 100644 --- a/docs/articles/users/CLIReference.md +++ b/docs/articles/users/CLIReference.md @@ -361,6 +361,78 @@ Syntax: | `--auto-approve` | flag | Skip confirmation prompts | false | | `--no-backup` | flag | Skip backup creation before configuration | false | +**Behavior**: + +The command now displays a **conversion report** showing exactly what fields will be configured on the target host. This provides transparency about which fields are supported by the host and what values will be set. + +The conversion report shows: +- **UPDATED** fields: Fields being set with their new values (shown as `None --> value`) +- **UNSUPPORTED** fields: Fields not supported by the target host (automatically filtered out) +- **UNCHANGED** fields: Fields that already have the specified value (update operations only) + +**Example - Local Server Configuration**: + +```bash +$ hatch mcp configure my-server --host claude-desktop --command python --args server.py --env API_KEY=secret + +Server 'my-server' created for host 'claude-desktop': + name: UPDATED None --> 'my-server' + command: UPDATED None --> 'python' + args: UPDATED None --> ['server.py'] + env: UPDATED None --> {'API_KEY': 'secret'} + url: UPDATED None --> None + +Configure MCP server 'my-server' on host 'claude-desktop'? [y/N]: y +[SUCCESS] Successfully configured MCP server 'my-server' on host 'claude-desktop' +``` + +**Example - Remote Server Configuration**: + +```bash +$ hatch mcp configure api-server --host claude-desktop --url https://api.example.com --headers Auth=token + +Server 'api-server' created for host 'claude-desktop': + name: UPDATED None --> 'api-server' + command: UPDATED None --> None + args: UPDATED None --> None + env: UPDATED None --> {} + url: UPDATED None --> 'https://api.example.com' + headers: UPDATED None --> {'Auth': 'token'} + +Configure MCP server 'api-server' on host 'claude-desktop'? [y/N]: y +[SUCCESS] Successfully configured MCP server 'api-server' on host 'claude-desktop' +``` + +**Example - Dry Run Mode**: + +```bash +$ hatch mcp configure my-server --host gemini --command python --args server.py --dry-run + +[DRY RUN] Would configure MCP server 'my-server' on host 'gemini': +[DRY RUN] Command: python +[DRY RUN] Args: ['server.py'] +[DRY RUN] Backup: Enabled +[DRY RUN] Preview of changes for server 'my-server': + name: UPDATED None --> 'my-server' + command: UPDATED None --> 'python' + args: UPDATED None --> ['server.py'] + env: UPDATED None --> {} + url: UPDATED None --> None + +No changes were made. +``` + +**Host-Specific Field Support**: + +Different MCP hosts support different configuration fields. The conversion report automatically filters unsupported fields: + +- **Claude Desktop / Claude Code**: Supports universal fields only (command, args, env, url, headers, type) +- **Cursor / LM Studio**: Supports universal fields + envFile +- **VS Code**: Supports universal fields + envFile, inputs +- **Gemini CLI**: Supports universal fields + 14 additional fields (cwd, timeout, trust, OAuth settings, etc.) + +When configuring a server with fields not supported by the target host, those fields are marked as UNSUPPORTED in the report and automatically excluded from the configuration. + ### `hatch mcp sync` Synchronize MCP configurations across environments and hosts. diff --git a/hatch/cli_hatch.py b/hatch/cli_hatch.py index 0146cb6..b51fd6f 100644 --- a/hatch/cli_hatch.py +++ b/hatch/cli_hatch.py @@ -20,6 +20,8 @@ from hatch_validator.package.package_service import PackageService from hatch.template_generator import create_package_template from hatch.mcp_host_config import MCPHostConfigurationManager, MCPHostType, MCPHostRegistry, MCPServerConfig +from hatch.mcp_host_config.models import MCPServerConfigOmni, HOST_MODEL_REGISTRY +from hatch.mcp_host_config.reporting import generate_conversion_report, display_report def get_hatch_version() -> str: @@ -580,11 +582,53 @@ def parse_headers(headers_list: Optional[list]) -> dict: return headers_dict +def parse_inputs(inputs_list: Optional[list]) -> Optional[list]: + """Parse VS Code input variable definitions from command line format. + + Format: type,id,description[,password=true] + Example: promptString,api-key,GitHub Personal Access Token,password=true + + Returns: + List of input variable definition dictionaries, or None if no inputs provided. + """ + if not inputs_list: + return None + + parsed_inputs = [] + for input_str in inputs_list: + parts = [p.strip() for p in input_str.split(',')] + if len(parts) < 3: + print(f"Warning: Invalid input format '{input_str}'. Expected: type,id,description[,password=true]") + continue + + input_def = { + 'type': parts[0], + 'id': parts[1], + 'description': parts[2] + } + + # Check for optional password flag + if len(parts) > 3 and parts[3].lower() == 'password=true': + input_def['password'] = True + + parsed_inputs.append(input_def) + + return parsed_inputs if parsed_inputs else None + def handle_mcp_configure(host: str, server_name: str, command: str, args: list, env: Optional[list] = None, url: Optional[str] = None, - headers: Optional[list] = None, no_backup: bool = False, + headers: Optional[list] = None, timeout: Optional[int] = None, + trust: bool = False, cwd: Optional[str] = None, + env_file: Optional[str] = None, http_url: Optional[str] = None, + include_tools: Optional[list] = None, exclude_tools: Optional[list] = None, + inputs: Optional[list] = None, no_backup: bool = False, dry_run: bool = False, auto_approve: bool = False): - """Handle 'hatch mcp configure' command.""" + """Handle 'hatch mcp configure' command with ALL host-specific arguments. + + Host-specific arguments are accepted for all hosts. The reporting system will + show unsupported fields as "UNSUPPORTED" in the conversion report rather than + rejecting them upfront. + """ try: # Validate host type try: @@ -602,25 +646,74 @@ def handle_mcp_configure(host: str, server_name: str, command: str, args: list, print("Error: --args can only be used with --command (local servers), not with --url (remote servers)") return 1 - # Parse environment variables and headers + # NOTE: We do NOT validate host-specific arguments here. + # The reporting system will show unsupported fields as "UNSUPPORTED" in the conversion report. + # This allows users to see which fields are not supported by their target host without blocking the operation. + + # Parse environment variables, headers, and inputs env_dict = parse_env_vars(env) headers_dict = parse_headers(headers) - - # Create server configuration (only include headers if URL is provided) - config_data = { - 'name': server_name, - 'command': command, - 'args': args or [], - 'env': env_dict, - 'url': url - } - - # Only add headers if URL is provided (per MCPServerConfig validation) + inputs_list = parse_inputs(inputs) + + # Create Omni configuration (universal model) + # Only include fields that have actual values to ensure model_dump(exclude_unset=True) works correctly + omni_config_data = {'name': server_name} + + if command is not None: + omni_config_data['command'] = command + if args is not None: + omni_config_data['args'] = args + if env_dict: + omni_config_data['env'] = env_dict + if url is not None: + omni_config_data['url'] = url if url and headers_dict: - config_data['headers'] = headers_dict + omni_config_data['headers'] = headers_dict + + # Host-specific fields (Gemini) + if timeout is not None: + omni_config_data['timeout'] = timeout + if trust: + omni_config_data['trust'] = trust + if cwd is not None: + omni_config_data['cwd'] = cwd + if http_url is not None: + omni_config_data['httpUrl'] = http_url + if include_tools is not None: + omni_config_data['includeTools'] = include_tools + if exclude_tools is not None: + omni_config_data['excludeTools'] = exclude_tools + + # Host-specific fields (Cursor/VS Code/LM Studio) + if env_file is not None: + omni_config_data['envFile'] = env_file + + # Host-specific fields (VS Code) + if inputs_list is not None: + omni_config_data['inputs'] = inputs_list + + # Create Omni model + omni_config = MCPServerConfigOmni(**omni_config_data) + + # Convert to host-specific model using HOST_MODEL_REGISTRY + host_model_class = HOST_MODEL_REGISTRY.get(host_type) + if not host_model_class: + print(f"Error: No model registered for host '{host}'") + return 1 + + # Convert Omni to host-specific model + server_config = host_model_class.from_omni(omni_config) - server_config = MCPServerConfig(**config_data) + # Generate conversion report + report = generate_conversion_report( + operation='create', + server_name=server_name, + target_host=host_type, + omni=omni_config, + dry_run=dry_run + ) + # Display conversion report if dry_run: print(f"[DRY RUN] Would configure MCP server '{server_name}' on host '{host}':") print(f"[DRY RUN] Command: {command}") @@ -633,8 +726,13 @@ def handle_mcp_configure(host: str, server_name: str, command: str, args: list, if headers_dict: print(f"[DRY RUN] Headers: {headers_dict}") print(f"[DRY RUN] Backup: {'Disabled' if no_backup else 'Enabled'}") + # Display report in dry-run mode + display_report(report) return 0 + # Display report before confirmation + display_report(report) + # Confirm operation unless auto-approved if not request_confirmation( f"Configure MCP server '{server_name}' on host '{host}'?", @@ -1109,12 +1207,27 @@ def main(): # Create mutually exclusive group for server type server_type_group = mcp_configure_parser.add_mutually_exclusive_group(required=True) - server_type_group.add_argument("--command", help="Command to execute the MCP server (for local servers)") + server_type_group.add_argument("--command", dest="server_command", help="Command to execute the MCP server (for local servers)") server_type_group.add_argument("--url", help="Server URL for remote MCP servers") mcp_configure_parser.add_argument("--args", nargs="*", help="Arguments for the MCP server command (only with --command)") mcp_configure_parser.add_argument("--env-var", action="append", help="Environment variables (format: KEY=VALUE)") mcp_configure_parser.add_argument("--headers", action="append", help="HTTP headers for remote servers (format: KEY=VALUE, only with --url)") + + # Host-specific arguments (Gemini) + mcp_configure_parser.add_argument("--timeout", type=int, help="Request timeout in milliseconds (Gemini)") + mcp_configure_parser.add_argument("--trust", action="store_true", help="Bypass tool call confirmations (Gemini)") + mcp_configure_parser.add_argument("--cwd", help="Working directory for stdio transport (Gemini)") + mcp_configure_parser.add_argument("--http-url", help="HTTP streaming endpoint URL (Gemini)") + mcp_configure_parser.add_argument("--include-tools", nargs="*", help="Tool allowlist - only these tools will be available (Gemini)") + mcp_configure_parser.add_argument("--exclude-tools", nargs="*", help="Tool blocklist - these tools will be excluded (Gemini)") + + # Host-specific arguments (Cursor/VS Code/LM Studio) + mcp_configure_parser.add_argument("--env-file", help="Path to environment file (Cursor, VS Code, LM Studio)") + + # Host-specific arguments (VS Code) + mcp_configure_parser.add_argument("--inputs", action="append", help="Input variable definitions in format: type,id,description[,password=true] (VS Code)") + mcp_configure_parser.add_argument("--no-backup", action="store_true", help="Skip backup creation before configuration") mcp_configure_parser.add_argument("--dry-run", action="store_true", help="Preview configuration without execution") mcp_configure_parser.add_argument("--auto-approve", action="store_true", help="Skip confirmation prompts") @@ -1542,45 +1655,87 @@ def main(): # Configure on each host success_count = 0 for host in hosts: # 'host', here, is a string - host_success_count = 0 - for i, server_config in enumerate(server_configs): - pkg_name = package_names[i] - try: - result = mcp_manager.configure_server( - hostname=host, - server_config=server_config, - no_backup=False # Always backup when adding packages - ) - - if result.success: - print(f"✓ Configured {server_config.name} ({pkg_name}) on {host}") - host_success_count += 1 - - # Update package metadata with host configuration tracking - try: - server_config_dict = { - "name": server_config.name, - "command": server_config.command, - "args": server_config.args - } + try: + # Convert string to MCPHostType enum + host_type = MCPHostType(host) + host_model_class = HOST_MODEL_REGISTRY.get(host_type) + if not host_model_class: + print(f"✗ Error: No model registered for host '{host}'") + continue + + host_success_count = 0 + for i, server_config in enumerate(server_configs): + pkg_name = package_names[i] + try: + # Convert MCPServerConfig to Omni model + # Only include fields that have actual values + omni_config_data = {'name': server_config.name} + if server_config.command is not None: + omni_config_data['command'] = server_config.command + if server_config.args is not None: + omni_config_data['args'] = server_config.args + if server_config.env: + omni_config_data['env'] = server_config.env + if server_config.url is not None: + omni_config_data['url'] = server_config.url + headers = getattr(server_config, 'headers', None) + if headers is not None: + omni_config_data['headers'] = headers + + omni_config = MCPServerConfigOmni(**omni_config_data) + + # Convert to host-specific model + host_config = host_model_class.from_omni(omni_config) + + # Generate and display conversion report + report = generate_conversion_report( + operation='create', + server_name=server_config.name, + target_host=host_type, + omni=omni_config, + dry_run=False + ) + display_report(report) + + result = mcp_manager.configure_server( + hostname=host, + server_config=host_config, + no_backup=False # Always backup when adding packages + ) + + if result.success: + print(f"✓ Configured {server_config.name} ({pkg_name}) on {host}") + host_success_count += 1 + + # Update package metadata with host configuration tracking + try: + server_config_dict = { + "name": server_config.name, + "command": server_config.command, + "args": server_config.args + } + + env_manager.update_package_host_configuration( + env_name=env_name, + package_name=pkg_name, + hostname=host, + server_config=server_config_dict + ) + except Exception as e: + # Log but don't fail the configuration operation + print(f"[WARNING] Failed to update package metadata for {pkg_name}: {e}") + else: + print(f"✗ Failed to configure {server_config.name} ({pkg_name}) on {host}: {result.error_message}") - env_manager.update_package_host_configuration( - env_name=env_name, - package_name=pkg_name, - hostname=host, - server_config=server_config_dict - ) - except Exception as e: - # Log but don't fail the configuration operation - print(f"[WARNING] Failed to update package metadata for {pkg_name}: {e}") - else: - print(f"✗ Failed to configure {server_config.name} ({pkg_name}) on {host}: {result.error_message}") + except Exception as e: + print(f"✗ Error configuring {server_config.name} ({pkg_name}) on {host}: {e}") - except Exception as e: - print(f"✗ Error configuring {server_config.name} ({pkg_name}) on {host}: {e}") + if host_success_count == len(server_configs): + success_count += 1 - if host_success_count == len(server_configs): - success_count += 1 + except ValueError as e: + print(f"✗ Invalid host '{host}': {e}") + continue if success_count > 0: print(f"MCP configuration completed: {success_count}/{len(hosts)} hosts configured") @@ -1676,6 +1831,45 @@ def main(): print(f"[DRY RUN] Would synchronize MCP servers for {len(server_configs)} package(s) to hosts: {[h for h in hosts]}") for pkg_name, config in server_configs: print(f"[DRY RUN] - {pkg_name}: {config.name} -> {' '.join(config.args)}") + + # Generate and display conversion reports for dry-run mode + for host in hosts: + try: + host_type = MCPHostType(host) + host_model_class = HOST_MODEL_REGISTRY.get(host_type) + if not host_model_class: + print(f"[DRY RUN] ✗ Error: No model registered for host '{host}'") + continue + + # Convert to Omni model + # Only include fields that have actual values + omni_config_data = {'name': config.name} + if config.command is not None: + omni_config_data['command'] = config.command + if config.args is not None: + omni_config_data['args'] = config.args + if config.env: + omni_config_data['env'] = config.env + if config.url is not None: + omni_config_data['url'] = config.url + headers = getattr(config, 'headers', None) + if headers is not None: + omni_config_data['headers'] = headers + + omni_config = MCPServerConfigOmni(**omni_config_data) + + # Generate report + report = generate_conversion_report( + operation='create', + server_name=config.name, + target_host=host_type, + omni=omni_config, + dry_run=True + ) + print(f"[DRY RUN] Preview for {pkg_name} on {host}:") + display_report(report) + except ValueError as e: + print(f"[DRY RUN] ✗ Invalid host '{host}': {e}") return 0 # Confirm operation unless auto-approved @@ -1692,40 +1886,82 @@ def main(): success_count = 0 for host in hosts: - for pkg_name, server_config in server_configs: - try: - result = mcp_manager.configure_server( - hostname=host, - server_config=server_config, - no_backup=args.no_backup - ) - - if result.success: - print(f"[SUCCESS] Successfully configured {server_config.name} ({pkg_name}) on {host}") - success_count += 1 - - # Update package metadata with host configuration tracking - try: - server_config_dict = { - "name": server_config.name, - "command": server_config.command, - "args": server_config.args - } - - env_manager.update_package_host_configuration( - env_name=env_name, - package_name=pkg_name, - hostname=host, - server_config=server_config_dict - ) - except Exception as e: - # Log but don't fail the sync operation - print(f"[WARNING] Failed to update package metadata for {pkg_name}: {e}") - else: - print(f"[ERROR] Failed to configure {server_config.name} ({pkg_name}) on {host}: {result.error_message}") - - except Exception as e: - print(f"[ERROR] Error configuring {server_config.name} ({pkg_name}) on {host}: {e}") + try: + # Convert string to MCPHostType enum + host_type = MCPHostType(host) + host_model_class = HOST_MODEL_REGISTRY.get(host_type) + if not host_model_class: + print(f"✗ Error: No model registered for host '{host}'") + continue + + for pkg_name, server_config in server_configs: + try: + # Convert MCPServerConfig to Omni model + # Only include fields that have actual values + omni_config_data = {'name': server_config.name} + if server_config.command is not None: + omni_config_data['command'] = server_config.command + if server_config.args is not None: + omni_config_data['args'] = server_config.args + if server_config.env: + omni_config_data['env'] = server_config.env + if server_config.url is not None: + omni_config_data['url'] = server_config.url + headers = getattr(server_config, 'headers', None) + if headers is not None: + omni_config_data['headers'] = headers + + omni_config = MCPServerConfigOmni(**omni_config_data) + + # Convert to host-specific model + host_config = host_model_class.from_omni(omni_config) + + # Generate and display conversion report + report = generate_conversion_report( + operation='create', + server_name=server_config.name, + target_host=host_type, + omni=omni_config, + dry_run=False + ) + display_report(report) + + result = mcp_manager.configure_server( + hostname=host, + server_config=host_config, + no_backup=args.no_backup + ) + + if result.success: + print(f"[SUCCESS] Successfully configured {server_config.name} ({pkg_name}) on {host}") + success_count += 1 + + # Update package metadata with host configuration tracking + try: + server_config_dict = { + "name": server_config.name, + "command": server_config.command, + "args": server_config.args + } + + env_manager.update_package_host_configuration( + env_name=env_name, + package_name=pkg_name, + hostname=host, + server_config=server_config_dict + ) + except Exception as e: + # Log but don't fail the sync operation + print(f"[WARNING] Failed to update package metadata for {pkg_name}: {e}") + else: + print(f"[ERROR] Failed to configure {server_config.name} ({pkg_name}) on {host}: {result.error_message}") + + except Exception as e: + print(f"[ERROR] Error configuring {server_config.name} ({pkg_name}) on {host}: {e}") + + except ValueError as e: + print(f"✗ Invalid host '{host}': {e}") + continue # Report results if success_count == total_operations: @@ -1785,9 +2021,13 @@ def main(): elif args.mcp_command == "configure": return handle_mcp_configure( - args.host, args.server_name, args.command, args.args, - getattr(args, 'env_var', None), args.url, args.headers, args.no_backup, - args.dry_run, args.auto_approve + args.host, args.server_name, args.server_command, args.args, + getattr(args, 'env_var', None), args.url, args.headers, + getattr(args, 'timeout', None), getattr(args, 'trust', False), + getattr(args, 'cwd', None), getattr(args, 'env_file', None), + getattr(args, 'http_url', None), getattr(args, 'include_tools', None), + getattr(args, 'exclude_tools', None), getattr(args, 'inputs', None), + args.no_backup, args.dry_run, args.auto_approve ) elif args.mcp_command == "remove": diff --git a/hatch/mcp_host_config/__init__.py b/hatch/mcp_host_config/__init__.py index d89833a..03c8178 100644 --- a/hatch/mcp_host_config/__init__.py +++ b/hatch/mcp_host_config/__init__.py @@ -8,11 +8,18 @@ from .backup import MCPHostConfigBackupManager from .models import ( MCPHostType, MCPServerConfig, HostConfiguration, EnvironmentData, - PackageHostConfiguration, EnvironmentPackageEntry, ConfigurationResult, SyncResult + PackageHostConfiguration, EnvironmentPackageEntry, ConfigurationResult, SyncResult, + # Host-specific configuration models + MCPServerConfigBase, MCPServerConfigGemini, MCPServerConfigVSCode, + MCPServerConfigCursor, MCPServerConfigClaude, MCPServerConfigOmni, + HOST_MODEL_REGISTRY ) from .host_management import ( MCPHostRegistry, MCPHostStrategy, MCPHostConfigurationManager, register_host_strategy ) +from .reporting import ( + FieldOperation, ConversionReport, generate_conversion_report, display_report +) # Import strategies to trigger decorator registration from . import strategies @@ -21,5 +28,11 @@ 'MCPHostConfigBackupManager', 'MCPHostType', 'MCPServerConfig', 'HostConfiguration', 'EnvironmentData', 'PackageHostConfiguration', 'EnvironmentPackageEntry', 'ConfigurationResult', 'SyncResult', + # Host-specific configuration models + 'MCPServerConfigBase', 'MCPServerConfigGemini', 'MCPServerConfigVSCode', + 'MCPServerConfigCursor', 'MCPServerConfigClaude', 'MCPServerConfigOmni', + 'HOST_MODEL_REGISTRY', + # User feedback reporting + 'FieldOperation', 'ConversionReport', 'generate_conversion_report', 'display_report', 'MCPHostRegistry', 'MCPHostStrategy', 'MCPHostConfigurationManager', 'register_host_strategy' ] diff --git a/hatch/mcp_host_config/models.py b/hatch/mcp_host_config/models.py index ae73450..a713ed8 100644 --- a/hatch/mcp_host_config/models.py +++ b/hatch/mcp_host_config/models.py @@ -7,7 +7,7 @@ """ from pydantic import BaseModel, Field, field_validator, model_validator, ConfigDict -from typing import Dict, List, Optional, Union +from typing import Dict, List, Optional, Union, Literal from datetime import datetime from pathlib import Path from enum import Enum @@ -34,12 +34,18 @@ class MCPServerConfig(BaseModel): # Server identification name: Optional[str] = Field(None, description="Server name for identification") - # Local server configuration (Pattern A: Command-Based) + # Transport type (PRIMARY DISCRIMINATOR) + type: Optional[Literal["stdio", "sse", "http"]] = Field( + None, + description="Transport type (stdio for local, sse/http for remote)" + ) + + # Local server configuration (Pattern A: Command-Based / stdio transport) command: Optional[str] = Field(None, description="Executable path/name for local servers") args: Optional[List[str]] = Field(None, description="Command arguments for local servers") - env: Optional[Dict[str, str]] = Field(None, description="Environment variables for local servers") + env: Optional[Dict[str, str]] = Field(None, description="Environment variables for all transports") - # Remote server configuration (Pattern B: URL-Based) + # Remote server configuration (Pattern B: URL-Based / sse/http transports) url: Optional[str] = Field(None, description="Server endpoint URL for remote servers") headers: Optional[Dict[str, str]] = Field(None, description="HTTP headers for remote servers") @@ -81,24 +87,46 @@ def validate_field_combinations(self): if self.args is not None and self.command is None: raise ValueError("'args' can only be specified with 'command' for local servers") - # Validate env is only provided with command - if self.env is not None and self.command is None: - raise ValueError("'env' can only be specified with 'command' for local servers") - # Validate headers are only provided with URL if self.headers is not None and self.url is None: raise ValueError("'headers' can only be specified with 'url' for remote servers") return self - + + @model_validator(mode='after') + def validate_type_field(self): + """Validate type field consistency with command/url fields.""" + # Only validate if type field is explicitly set + if self.type is not None: + if self.type == "stdio": + if not self.command: + raise ValueError("'type=stdio' requires 'command' field") + if self.url: + raise ValueError("'type=stdio' cannot be used with 'url' field") + elif self.type in ("sse", "http"): + if not self.url: + raise ValueError(f"'type={self.type}' requires 'url' field") + if self.command: + raise ValueError(f"'type={self.type}' cannot be used with 'command' field") + + return self + @property def is_local_server(self) -> bool: """Check if this is a local server configuration.""" + # Prioritize type field if present + if self.type is not None: + return self.type == "stdio" + # Fall back to command detection for backward compatibility return self.command is not None - + @property def is_remote_server(self) -> bool: """Check if this is a remote server configuration.""" + # Prioritize type field if present + if self.type is not None: + return self.type in ("sse", "http") + # Fall back to url detection for backward compatibility return self.url is not None @@ -294,3 +322,235 @@ def success_rate(self) -> float: return 0.0 successful = len([r for r in self.results if r.success]) return (successful / len(self.results)) * 100.0 + + +# ============================================================================ +# MCP Host-Specific Configuration Models +# ============================================================================ + + +class MCPServerConfigBase(BaseModel): + """Base class for MCP server configurations with universal fields. + + This model contains fields supported by ALL MCP hosts and provides + transport validation logic. Host-specific models inherit from this base. + """ + + model_config = ConfigDict(extra="forbid") + + # Hatch-specific field + name: Optional[str] = Field(None, description="Server name for identification") + + # Transport type (PRIMARY DISCRIMINATOR) + type: Optional[Literal["stdio", "sse", "http"]] = Field( + None, + description="Transport type (stdio for local, sse/http for remote)" + ) + + # stdio transport fields + command: Optional[str] = Field(None, description="Server executable command") + args: Optional[List[str]] = Field(None, description="Command arguments") + + # All transports + env: Optional[Dict[str, str]] = Field(None, description="Environment variables") + + # Remote transport fields (sse/http) + url: Optional[str] = Field(None, description="Remote server endpoint") + headers: Optional[Dict[str, str]] = Field(None, description="HTTP headers") + + @model_validator(mode='after') + def validate_transport(self) -> 'MCPServerConfigBase': + """Validate transport configuration using type field.""" + # Check mutual exclusion - command and url cannot both be set + if self.command is not None and self.url is not None: + raise ValueError( + "Cannot specify both 'command' and 'url' - use 'type' field to specify transport" + ) + + # Validate based on type + if self.type == "stdio": + if not self.command: + raise ValueError("'command' is required for stdio transport") + elif self.type in ("sse", "http"): + if not self.url: + raise ValueError("'url' is required for sse/http transports") + elif self.type is None: + # Infer type from fields if not specified + if self.command: + self.type = "stdio" + elif self.url: + self.type = "sse" # default to sse for remote + else: + raise ValueError("Either 'command' or 'url' must be provided") + + return self + + +class MCPServerConfigGemini(MCPServerConfigBase): + """Gemini CLI-specific MCP server configuration. + + Extends base model with Gemini-specific fields including working directory, + timeout, trust mode, tool filtering, and OAuth configuration. + """ + + # Gemini-specific fields + cwd: Optional[str] = Field(None, description="Working directory for stdio transport") + timeout: Optional[int] = Field(None, description="Request timeout in milliseconds") + trust: Optional[bool] = Field(None, description="Bypass tool call confirmations") + httpUrl: Optional[str] = Field(None, description="HTTP streaming endpoint URL") + includeTools: Optional[List[str]] = Field(None, description="Tools to include (allowlist)") + excludeTools: Optional[List[str]] = Field(None, description="Tools to exclude (blocklist)") + + # OAuth configuration (simplified - nested object would be better but keeping flat for now) + oauth_enabled: Optional[bool] = Field(None, description="Enable OAuth for this server") + oauth_clientId: Optional[str] = Field(None, description="OAuth client identifier") + oauth_clientSecret: Optional[str] = Field(None, description="OAuth client secret") + oauth_authorizationUrl: Optional[str] = Field(None, description="OAuth authorization endpoint") + oauth_tokenUrl: Optional[str] = Field(None, description="OAuth token endpoint") + oauth_scopes: Optional[List[str]] = Field(None, description="Required OAuth scopes") + oauth_redirectUri: Optional[str] = Field(None, description="Custom redirect URI") + oauth_tokenParamName: Optional[str] = Field(None, description="Query parameter name for tokens") + oauth_audiences: Optional[List[str]] = Field(None, description="OAuth audiences") + authProviderType: Optional[str] = Field(None, description="Authentication provider type") + + @classmethod + def from_omni(cls, omni: 'MCPServerConfigOmni') -> 'MCPServerConfigGemini': + """Convert Omni model to Gemini-specific model using Pydantic APIs.""" + # Get supported fields dynamically from model definition + supported_fields = set(cls.model_fields.keys()) + + # Use Pydantic's model_dump with include and exclude_unset + gemini_data = omni.model_dump(include=supported_fields, exclude_unset=True) + + # Use Pydantic's model_validate for type-safe creation + return cls.model_validate(gemini_data) + + +class MCPServerConfigVSCode(MCPServerConfigBase): + """VS Code-specific MCP server configuration. + + Extends base model with VS Code-specific fields including environment file + path and input variable definitions. + """ + + # VS Code-specific fields + envFile: Optional[str] = Field(None, description="Path to environment file") + inputs: Optional[List[Dict]] = Field(None, description="Input variable definitions") + + @classmethod + def from_omni(cls, omni: 'MCPServerConfigOmni') -> 'MCPServerConfigVSCode': + """Convert Omni model to VS Code-specific model.""" + # Get supported fields dynamically + supported_fields = set(cls.model_fields.keys()) + + # Single-call field filtering + vscode_data = omni.model_dump(include=supported_fields, exclude_unset=True) + + return cls.model_validate(vscode_data) + + +class MCPServerConfigCursor(MCPServerConfigBase): + """Cursor/LM Studio-specific MCP server configuration. + + Extends base model with Cursor-specific fields including environment file path. + Cursor handles config interpolation (${env:NAME}, ${userHome}, etc.) at runtime. + """ + + # Cursor-specific fields + envFile: Optional[str] = Field(None, description="Path to environment file") + + @classmethod + def from_omni(cls, omni: 'MCPServerConfigOmni') -> 'MCPServerConfigCursor': + """Convert Omni model to Cursor-specific model.""" + # Get supported fields dynamically + supported_fields = set(cls.model_fields.keys()) + + # Single-call field filtering + cursor_data = omni.model_dump(include=supported_fields, exclude_unset=True) + + return cls.model_validate(cursor_data) + + +class MCPServerConfigClaude(MCPServerConfigBase): + """Claude Desktop/Code-specific MCP server configuration. + + Uses only universal fields from base model. Supports all transport types + (stdio, sse, http). Claude handles environment variable expansion at runtime. + """ + + # No host-specific fields - uses universal fields only + + @classmethod + def from_omni(cls, omni: 'MCPServerConfigOmni') -> 'MCPServerConfigClaude': + """Convert Omni model to Claude-specific model.""" + # Get supported fields dynamically + supported_fields = set(cls.model_fields.keys()) + + # Single-call field filtering + claude_data = omni.model_dump(include=supported_fields, exclude_unset=True) + + return cls.model_validate(claude_data) + + +class MCPServerConfigOmni(BaseModel): + """Omni configuration supporting all host-specific fields. + + This is the primary API interface for MCP server configuration. It contains + all possible fields from all hosts. Use host-specific models' from_omni() + methods to convert to host-specific configurations. + """ + + model_config = ConfigDict(extra="forbid") + + # Hatch-specific + name: Optional[str] = None + + # Universal fields (all hosts) + type: Optional[Literal["stdio", "sse", "http"]] = None + command: Optional[str] = None + args: Optional[List[str]] = None + env: Optional[Dict[str, str]] = None + url: Optional[str] = None + headers: Optional[Dict[str, str]] = None + + # Gemini CLI specific + cwd: Optional[str] = None + timeout: Optional[int] = None + trust: Optional[bool] = None + httpUrl: Optional[str] = None + includeTools: Optional[List[str]] = None + excludeTools: Optional[List[str]] = None + oauth_enabled: Optional[bool] = None + oauth_clientId: Optional[str] = None + oauth_clientSecret: Optional[str] = None + oauth_authorizationUrl: Optional[str] = None + oauth_tokenUrl: Optional[str] = None + oauth_scopes: Optional[List[str]] = None + oauth_redirectUri: Optional[str] = None + oauth_tokenParamName: Optional[str] = None + oauth_audiences: Optional[List[str]] = None + authProviderType: Optional[str] = None + + # VS Code specific + envFile: Optional[str] = None + inputs: Optional[List[Dict]] = None + + @field_validator('url') + @classmethod + def validate_url_format(cls, v): + """Validate URL format when provided.""" + if v is not None: + if not v.startswith(('http://', 'https://')): + raise ValueError("URL must start with http:// or https://") + return v + + +# HOST_MODEL_REGISTRY: Dictionary dispatch for host-specific models +HOST_MODEL_REGISTRY: Dict[MCPHostType, type[MCPServerConfigBase]] = { + MCPHostType.GEMINI: MCPServerConfigGemini, + MCPHostType.CLAUDE_DESKTOP: MCPServerConfigClaude, + MCPHostType.CLAUDE_CODE: MCPServerConfigClaude, # Same as CLAUDE_DESKTOP + MCPHostType.VSCODE: MCPServerConfigVSCode, + MCPHostType.CURSOR: MCPServerConfigCursor, + MCPHostType.LMSTUDIO: MCPServerConfigCursor, # Same as CURSOR +} diff --git a/hatch/mcp_host_config/reporting.py b/hatch/mcp_host_config/reporting.py new file mode 100644 index 0000000..2710a05 --- /dev/null +++ b/hatch/mcp_host_config/reporting.py @@ -0,0 +1,181 @@ +""" +User feedback reporting system for MCP configuration operations. + +This module provides models and functions for generating and displaying +user-friendly reports about MCP configuration changes, including field-level +operations and conversion summaries. +""" + +from typing import Literal, Optional, Any, List +from pydantic import BaseModel, ConfigDict + +from .models import MCPServerConfigOmni, MCPHostType, HOST_MODEL_REGISTRY + + +class FieldOperation(BaseModel): + """Single field operation in a conversion. + + Represents a single field-level change during MCP configuration conversion, + including the operation type (UPDATED, UNSUPPORTED, UNCHANGED) and values. + """ + + field_name: str + operation: Literal["UPDATED", "UNSUPPORTED", "UNCHANGED"] + old_value: Optional[Any] = None + new_value: Optional[Any] = None + + def __str__(self) -> str: + """Return formatted string representation for console output. + + Uses ASCII arrow (-->) for terminal compatibility instead of Unicode. + """ + if self.operation == "UPDATED": + return f"{self.field_name}: UPDATED {repr(self.old_value)} --> {repr(self.new_value)}" + elif self.operation == "UNSUPPORTED": + return f"{self.field_name}: UNSUPPORTED" + elif self.operation == "UNCHANGED": + return f"{self.field_name}: UNCHANGED {repr(self.new_value)}" + return f"{self.field_name}: {self.operation}" + + +class ConversionReport(BaseModel): + """Complete conversion report for a configuration operation. + + Contains metadata about the operation (create, update, delete, migrate) + and a list of field-level operations that occurred during conversion. + """ + + model_config = ConfigDict(validate_assignment=False) + + operation: Literal["create", "update", "delete", "migrate"] + server_name: str + source_host: Optional[MCPHostType] = None + target_host: MCPHostType + success: bool = True + error_message: Optional[str] = None + field_operations: List[FieldOperation] = [] + dry_run: bool = False + + +def generate_conversion_report( + operation: Literal["create", "update", "delete", "migrate"], + server_name: str, + target_host: MCPHostType, + omni: MCPServerConfigOmni, + source_host: Optional[MCPHostType] = None, + old_config: Optional[MCPServerConfigOmni] = None, + dry_run: bool = False +) -> ConversionReport: + """Generate conversion report for a configuration operation. + + Analyzes the conversion from Omni model to host-specific configuration, + identifying which fields were updated, which are unsupported, and which + remained unchanged. + + Args: + operation: Type of operation being performed + server_name: Name of the server being configured + target_host: Target host for the configuration (MCPHostType enum) + omni: New/updated configuration (Omni model) + source_host: Source host (for migrate operation, MCPHostType enum) + old_config: Existing configuration (for update operation) + dry_run: Whether this is a dry-run preview + + Returns: + ConversionReport with field-level operations + """ + # Derive supported fields dynamically from model class + model_class = HOST_MODEL_REGISTRY[target_host] + supported_fields = set(model_class.model_fields.keys()) + + field_operations = [] + set_fields = omni.model_dump(exclude_unset=True) + + for field_name, new_value in set_fields.items(): + if field_name in supported_fields: + # Field is supported by target host + if old_config: + # Update operation - check if field changed + old_fields = old_config.model_dump(exclude_unset=True) + if field_name in old_fields: + old_value = old_fields[field_name] + if old_value != new_value: + # Field was modified + field_operations.append(FieldOperation( + field_name=field_name, + operation="UPDATED", + old_value=old_value, + new_value=new_value + )) + else: + # Field unchanged + field_operations.append(FieldOperation( + field_name=field_name, + operation="UNCHANGED", + new_value=new_value + )) + else: + # Field was added + field_operations.append(FieldOperation( + field_name=field_name, + operation="UPDATED", + old_value=None, + new_value=new_value + )) + else: + # Create operation - all fields are new + field_operations.append(FieldOperation( + field_name=field_name, + operation="UPDATED", + old_value=None, + new_value=new_value + )) + else: + # Field is not supported by target host + field_operations.append(FieldOperation( + field_name=field_name, + operation="UNSUPPORTED", + new_value=new_value + )) + + return ConversionReport( + operation=operation, + server_name=server_name, + source_host=source_host, + target_host=target_host, + field_operations=field_operations, + dry_run=dry_run + ) + + +def display_report(report: ConversionReport) -> None: + """Display conversion report to console. + + Prints a formatted report showing the operation performed and all + field-level changes. Uses FieldOperation.__str__() for consistent + formatting. + + Args: + report: ConversionReport to display + """ + # Header + if report.dry_run: + print(f"[DRY RUN] Preview of changes for server '{report.server_name}':") + else: + if report.operation == "create": + print(f"Server '{report.server_name}' created for host '{report.target_host.value}':") + elif report.operation == "update": + print(f"Server '{report.server_name}' updated for host '{report.target_host.value}':") + elif report.operation == "migrate": + print(f"Server '{report.server_name}' migrated from '{report.source_host.value}' to '{report.target_host.value}':") + elif report.operation == "delete": + print(f"Server '{report.server_name}' deleted from host '{report.target_host.value}':") + + # Field operations + for field_op in report.field_operations: + print(f" {field_op}") + + # Footer + if report.dry_run: + print("\nNo changes were made.") + diff --git a/tests/test_data/configs/mcp_host_test_configs/claude_desktop_config.json b/tests/test_data/configs/mcp_host_test_configs/claude_desktop_config.json new file mode 100644 index 0000000..6106744 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/claude_desktop_config.json @@ -0,0 +1,4 @@ +{ + "mcpServers": {} +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/claude_desktop_config_with_server.json b/tests/test_data/configs/mcp_host_test_configs/claude_desktop_config_with_server.json new file mode 100644 index 0000000..39f52d2 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/claude_desktop_config_with_server.json @@ -0,0 +1,12 @@ +{ + "mcpServers": { + "existing-server": { + "command": "python", + "args": ["server.py"], + "env": { + "API_KEY": "secret" + } + } + } +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/cursor_mcp.json b/tests/test_data/configs/mcp_host_test_configs/cursor_mcp.json new file mode 100644 index 0000000..6106744 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/cursor_mcp.json @@ -0,0 +1,4 @@ +{ + "mcpServers": {} +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/cursor_mcp_with_server.json b/tests/test_data/configs/mcp_host_test_configs/cursor_mcp_with_server.json new file mode 100644 index 0000000..4eac728 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/cursor_mcp_with_server.json @@ -0,0 +1,12 @@ +{ + "mcpServers": { + "existing-server": { + "command": "node", + "args": ["server.js"], + "env": { + "NODE_ENV": "production" + } + } + } +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/gemini_cli_config.json b/tests/test_data/configs/mcp_host_test_configs/gemini_cli_config.json new file mode 100644 index 0000000..6106744 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/gemini_cli_config.json @@ -0,0 +1,4 @@ +{ + "mcpServers": {} +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/gemini_cli_config_with_server.json b/tests/test_data/configs/mcp_host_test_configs/gemini_cli_config_with_server.json new file mode 100644 index 0000000..c553c14 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/gemini_cli_config_with_server.json @@ -0,0 +1,15 @@ +{ + "mcpServers": { + "existing-server": { + "command": "python", + "args": ["server.py"], + "env": { + "API_KEY": "secret" + }, + "timeout": 30, + "trust": true, + "cwd": "/path/to/server" + } + } +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/vscode_mcp.json b/tests/test_data/configs/mcp_host_test_configs/vscode_mcp.json new file mode 100644 index 0000000..6106744 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/vscode_mcp.json @@ -0,0 +1,4 @@ +{ + "mcpServers": {} +} + diff --git a/tests/test_data/configs/mcp_host_test_configs/vscode_mcp_with_server.json b/tests/test_data/configs/mcp_host_test_configs/vscode_mcp_with_server.json new file mode 100644 index 0000000..ff8de11 --- /dev/null +++ b/tests/test_data/configs/mcp_host_test_configs/vscode_mcp_with_server.json @@ -0,0 +1,13 @@ +{ + "mcpServers": { + "existing-server": { + "command": "python", + "args": ["-m", "server"], + "env": { + "DEBUG": "true" + }, + "envFile": ".env" + } + } +} + diff --git a/tests/test_data/packages/dependencies/simple_dep_pkg/hatch_metadata.json b/tests/test_data/packages/dependencies/simple_dep_pkg/hatch_metadata.json index 89e59e7..f4928d7 100644 --- a/tests/test_data/packages/dependencies/simple_dep_pkg/hatch_metadata.json +++ b/tests/test_data/packages/dependencies/simple_dep_pkg/hatch_metadata.json @@ -29,7 +29,7 @@ "dependencies": { "hatch": [ { - "name": "base_pkg", + "name": "../../basic/base_pkg", "version_constraint": ">=1.0.0" } ] diff --git a/tests/test_env_manip.py b/tests/test_env_manip.py index 9350eee..fca4276 100644 --- a/tests/test_env_manip.py +++ b/tests/test_env_manip.py @@ -113,14 +113,14 @@ def _create_sample_registry(self): { "name": dep["name"], "version_constraint": dep.get("version_constraint", "") - } for dep in metadata.get("hatch_dependencies", []) + } for dep in metadata.get("dependencies", {}).get("hatch", []) ], "python_dependencies_added": [ { "name": dep["name"], "version_constraint": dep.get("version_constraint", ""), "package_manager": dep.get("package_manager", "pip") - } for dep in metadata.get("python_dependencies", []) + } for dep in metadata.get("dependencies", {}).get("python", []) ], "hatch_dependencies_removed": [], "hatch_dependencies_modified": [], diff --git a/tests/test_mcp_cli_all_host_specific_args.py b/tests/test_mcp_cli_all_host_specific_args.py new file mode 100644 index 0000000..20539da --- /dev/null +++ b/tests/test_mcp_cli_all_host_specific_args.py @@ -0,0 +1,303 @@ +""" +Tests for ALL host-specific CLI arguments in MCP configure command. + +This module tests that: +1. All host-specific arguments are accepted for all hosts +2. Unsupported fields are reported as "UNSUPPORTED" in conversion reports +3. All new arguments (httpUrl, includeTools, excludeTools, inputs) work correctly +""" + +import unittest +from unittest.mock import patch, MagicMock +from io import StringIO + +from hatch.cli_hatch import handle_mcp_configure, parse_inputs +from hatch.mcp_host_config import MCPHostType +from hatch.mcp_host_config.models import ( + MCPServerConfigGemini, MCPServerConfigCursor, MCPServerConfigVSCode, + MCPServerConfigClaude +) + + +class TestAllGeminiArguments(unittest.TestCase): + """Test ALL Gemini-specific CLI arguments.""" + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + @patch('sys.stdout', new_callable=StringIO) + def test_all_gemini_arguments_accepted(self, mock_stdout, mock_manager_class): + """Test that all Gemini arguments are accepted and passed to model.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + timeout=30000, + trust=True, + cwd='/workspace', + http_url='https://api.example.com/mcp', + include_tools=['tool1', 'tool2'], + exclude_tools=['dangerous_tool'], + auto_approve=True + ) + + self.assertEqual(result, 0) + + # Verify all fields were passed to Gemini model + call_args = mock_manager.configure_server.call_args + server_config = call_args.kwargs['server_config'] + self.assertIsInstance(server_config, MCPServerConfigGemini) + self.assertEqual(server_config.timeout, 30000) + self.assertEqual(server_config.trust, True) + self.assertEqual(server_config.cwd, '/workspace') + self.assertEqual(server_config.httpUrl, 'https://api.example.com/mcp') + self.assertEqual(server_config.includeTools, ['tool1', 'tool2']) + self.assertEqual(server_config.excludeTools, ['dangerous_tool']) + + +class TestUnsupportedFieldReporting(unittest.TestCase): + """Test that unsupported fields are reported correctly, not rejected.""" + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + @patch('sys.stdout', new_callable=StringIO) + def test_gemini_args_on_vscode_show_unsupported(self, mock_stdout, mock_manager_class): + """Test that Gemini-specific args on VS Code show as UNSUPPORTED.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='vscode', + server_name='test-server', + command='python', + args=['server.py'], + timeout=30000, # Gemini-only field + trust=True, # Gemini-only field + auto_approve=True + ) + + # Should succeed (not return error code 1) + self.assertEqual(result, 0) + + # Check that output contains "UNSUPPORTED" for Gemini fields + output = mock_stdout.getvalue() + self.assertIn('UNSUPPORTED', output) + self.assertIn('timeout', output) + self.assertIn('trust', output) + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + @patch('sys.stdout', new_callable=StringIO) + def test_vscode_inputs_on_gemini_show_unsupported(self, mock_stdout, mock_manager_class): + """Test that VS Code inputs on Gemini show as UNSUPPORTED.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + inputs=['promptString,api-key,API Key,password=true'], # VS Code-only field + auto_approve=True + ) + + # Should succeed (not return error code 1) + self.assertEqual(result, 0) + + # Check that output contains "UNSUPPORTED" for inputs field + output = mock_stdout.getvalue() + self.assertIn('UNSUPPORTED', output) + self.assertIn('inputs', output) + + +class TestVSCodeInputsParsing(unittest.TestCase): + """Test VS Code inputs parsing.""" + + def test_parse_inputs_basic(self): + """Test basic input parsing.""" + inputs_list = ['promptString,api-key,GitHub Personal Access Token'] + result = parse_inputs(inputs_list) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 1) + self.assertEqual(result[0]['type'], 'promptString') + self.assertEqual(result[0]['id'], 'api-key') + self.assertEqual(result[0]['description'], 'GitHub Personal Access Token') + self.assertNotIn('password', result[0]) + + def test_parse_inputs_with_password(self): + """Test input parsing with password flag.""" + inputs_list = ['promptString,api-key,API Key,password=true'] + result = parse_inputs(inputs_list) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 1) + self.assertEqual(result[0]['password'], True) + + def test_parse_inputs_multiple(self): + """Test parsing multiple inputs.""" + inputs_list = [ + 'promptString,api-key,API Key,password=true', + 'promptString,db-url,Database URL' + ] + result = parse_inputs(inputs_list) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 2) + + def test_parse_inputs_none(self): + """Test parsing None inputs.""" + result = parse_inputs(None) + self.assertIsNone(result) + + def test_parse_inputs_empty(self): + """Test parsing empty inputs list.""" + result = parse_inputs([]) + self.assertIsNone(result) + + +class TestVSCodeInputsIntegration(unittest.TestCase): + """Test VS Code inputs integration with configure command.""" + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + def test_vscode_inputs_passed_to_model(self, mock_manager_class): + """Test that parsed inputs are passed to VS Code model.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='vscode', + server_name='test-server', + command='python', + args=['server.py'], + inputs=['promptString,api-key,API Key,password=true'], + auto_approve=True + ) + + self.assertEqual(result, 0) + + # Verify inputs were passed to VS Code model + call_args = mock_manager.configure_server.call_args + server_config = call_args.kwargs['server_config'] + self.assertIsInstance(server_config, MCPServerConfigVSCode) + self.assertIsNotNone(server_config.inputs) + self.assertEqual(len(server_config.inputs), 1) + self.assertEqual(server_config.inputs[0]['id'], 'api-key') + + +class TestHttpUrlArgument(unittest.TestCase): + """Test --http-url argument for Gemini.""" + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + def test_http_url_passed_to_gemini(self, mock_manager_class): + """Test that httpUrl is passed to Gemini model.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + http_url='https://api.example.com/mcp', + auto_approve=True + ) + + self.assertEqual(result, 0) + + # Verify httpUrl was passed to Gemini model + call_args = mock_manager.configure_server.call_args + server_config = call_args.kwargs['server_config'] + self.assertIsInstance(server_config, MCPServerConfigGemini) + self.assertEqual(server_config.httpUrl, 'https://api.example.com/mcp') + + +class TestToolFilteringArguments(unittest.TestCase): + """Test --include-tools and --exclude-tools arguments for Gemini.""" + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + def test_include_tools_passed_to_gemini(self, mock_manager_class): + """Test that includeTools is passed to Gemini model.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + include_tools=['tool1', 'tool2', 'tool3'], + auto_approve=True + ) + + self.assertEqual(result, 0) + + # Verify includeTools was passed to Gemini model + call_args = mock_manager.configure_server.call_args + server_config = call_args.kwargs['server_config'] + self.assertIsInstance(server_config, MCPServerConfigGemini) + self.assertEqual(server_config.includeTools, ['tool1', 'tool2', 'tool3']) + + @patch('hatch.cli_hatch.MCPHostConfigurationManager') + def test_exclude_tools_passed_to_gemini(self, mock_manager_class): + """Test that excludeTools is passed to Gemini model.""" + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + + mock_result = MagicMock() + mock_result.success = True + mock_result.backup_path = None + mock_manager.configure_server.return_value = mock_result + + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + exclude_tools=['dangerous_tool'], + auto_approve=True + ) + + self.assertEqual(result, 0) + + # Verify excludeTools was passed to Gemini model + call_args = mock_manager.configure_server.call_args + server_config = call_args.kwargs['server_config'] + self.assertIsInstance(server_config, MCPServerConfigGemini) + self.assertEqual(server_config.excludeTools, ['dangerous_tool']) + + +if __name__ == '__main__': + unittest.main() + diff --git a/tests/test_mcp_cli_direct_management.py b/tests/test_mcp_cli_direct_management.py index dbc48d6..cfd4c69 100644 --- a/tests/test_mcp_cli_direct_management.py +++ b/tests/test_mcp_cli_direct_management.py @@ -31,40 +31,50 @@ class TestMCPConfigureCommand(unittest.TestCase): @regression_test def test_configure_argument_parsing_basic(self): """Test basic argument parsing for 'hatch mcp configure' command.""" - test_args = ['hatch', 'mcp', 'configure', 'claude-desktop', 'weather-server', 'python', 'weather.py'] - + # Updated to match current CLI: server_name is positional, --host is required, --command/--url are mutually exclusive + test_args = ['hatch', 'mcp', 'configure', 'weather-server', '--host', 'claude-desktop', '--command', 'python', '--args', 'weather.py'] + with patch('sys.argv', test_args): with patch('hatch.cli_hatch.HatchEnvironmentManager'): with patch('hatch.cli_hatch.handle_mcp_configure', return_value=0) as mock_handler: try: - main() + result = main() + # If main() returns without SystemExit, check the handler was called + # Updated to include ALL host-specific parameters mock_handler.assert_called_once_with( 'claude-desktop', 'weather-server', 'python', ['weather.py'], - None, None, None, False, False, False + None, None, None, None, False, None, None, None, None, None, None, False, False, False ) except SystemExit as e: - self.assertEqual(e.code, 0) + # If SystemExit is raised, it should be 0 (success) and handler should have been called + if e.code == 0: + mock_handler.assert_called_once_with( + 'claude-desktop', 'weather-server', 'python', ['weather.py'], + None, None, None, None, False, None, None, None, None, None, None, False, False, False + ) + else: + self.fail(f"main() exited with code {e.code}, expected 0") @regression_test def test_configure_argument_parsing_with_options(self): """Test argument parsing with environment variables and options.""" test_args = [ - 'hatch', 'mcp', 'configure', 'cursor', 'file-server', 'node', 'server.js', 'arg1', 'arg2', - '--env', 'API_KEY=secret', '--env', 'DEBUG=true', - '--url', 'http://localhost:8080', + 'hatch', 'mcp', 'configure', 'file-server', '--host', 'cursor', '--url', 'http://localhost:8080', + '--env-var', 'API_KEY=secret', '--env-var', 'DEBUG=true', '--headers', 'Authorization=Bearer token', '--no-backup', '--dry-run', '--auto-approve' ] - + with patch('sys.argv', test_args): with patch('hatch.cli_hatch.HatchEnvironmentManager'): with patch('hatch.cli_hatch.handle_mcp_configure', return_value=0) as mock_handler: try: main() + # Updated to include ALL host-specific parameters mock_handler.assert_called_once_with( - 'cursor', 'file-server', 'node', ['server.js', 'arg1', 'arg2'], + 'cursor', 'file-server', None, None, ['API_KEY=secret', 'DEBUG=true'], 'http://localhost:8080', - ['Authorization=Bearer token'], True, True, True + ['Authorization=Bearer token'], None, False, None, None, None, None, None, None, True, True, True ) except SystemExit as e: self.assertEqual(e.code, 0) @@ -397,6 +407,9 @@ def test_remove_host_successful(self): mock_manager_class.return_value = mock_manager with patch('hatch.cli_hatch.HatchEnvironmentManager') as mock_env_manager: + # Mock the clear_host_from_all_packages_all_envs method + mock_env_manager.return_value.clear_host_from_all_packages_all_envs.return_value = 2 + with patch('builtins.print') as mock_print: result = handle_mcp_remove_host(mock_env_manager.return_value, 'claude-desktop', auto_approve=True) diff --git a/tests/test_mcp_cli_host_config_integration.py b/tests/test_mcp_cli_host_config_integration.py new file mode 100644 index 0000000..f1fd22c --- /dev/null +++ b/tests/test_mcp_cli_host_config_integration.py @@ -0,0 +1,650 @@ +""" +Test suite for MCP CLI host configuration integration. + +This module tests the integration of the Pydantic model hierarchy (Phase 3B) +and user feedback reporting system (Phase 3C) into Hatch's CLI commands. + +Tests focus on CLI-specific integration logic while leveraging existing test +infrastructure from Phases 3A-3C. +""" + +import unittest +import sys +from pathlib import Path +from unittest.mock import patch, MagicMock, call, ANY + +# Add the parent directory to the path to import wobble +sys.path.insert(0, str(Path(__file__).parent.parent)) + +try: + from wobble.decorators import regression_test, integration_test +except ImportError: + # Fallback decorators if wobble is not available + def regression_test(func): + return func + + def integration_test(scope="component"): + def decorator(func): + return func + return decorator + +from hatch.cli_hatch import ( + handle_mcp_configure, + parse_env_vars, + parse_headers, + parse_host_list, +) +from hatch.mcp_host_config.models import ( + MCPServerConfig, + MCPServerConfigOmni, + HOST_MODEL_REGISTRY, + MCPHostType, + MCPServerConfigGemini, + MCPServerConfigVSCode, + MCPServerConfigCursor, + MCPServerConfigClaude, +) +from hatch.mcp_host_config.reporting import ( + generate_conversion_report, + display_report, + FieldOperation, + ConversionReport, +) + + +class TestCLIArgumentParsingToOmniCreation(unittest.TestCase): + """Test suite for CLI argument parsing to MCPServerConfigOmni creation.""" + + @regression_test + def test_configure_creates_omni_model_basic(self): + """Test that configure command creates MCPServerConfigOmni from CLI arguments.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call handle_mcp_configure with basic arguments + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + @regression_test + def test_configure_creates_omni_with_env_vars(self): + """Test that environment variables are parsed correctly into Omni model.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call with environment variables + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py'], + env=['API_KEY=secret', 'DEBUG=true'], + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + @regression_test + def test_configure_creates_omni_with_headers(self): + """Test that headers are parsed correctly into Omni model.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command=None, + args=None, + env=None, + url='https://api.example.com', + headers=['Authorization=Bearer token', 'Content-Type=application/json'], + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors (bug fixed in Phase 4) + self.assertEqual(result, 0) + + @regression_test + def test_configure_creates_omni_remote_server(self): + """Test that remote server arguments create correct Omni model.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + result = handle_mcp_configure( + host='claude-desktop', + server_name='remote-server', + command=None, + args=None, + env=None, + url='https://api.example.com', + headers=['Auth=token'], + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors (bug fixed in Phase 4) + self.assertEqual(result, 0) + + @regression_test + def test_configure_omni_with_all_universal_fields(self): + """Test that all universal fields are supported in Omni creation.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call with all universal fields + result = handle_mcp_configure( + host='claude-desktop', + server_name='full-server', + command='python', + args=['server.py', '--port', '8080'], + env=['API_KEY=secret', 'DEBUG=true', 'LOG_LEVEL=info'], + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + @regression_test + def test_configure_omni_with_optional_fields_none(self): + """Test that optional fields are handled correctly (None values).""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call with only required fields + result = handle_mcp_configure( + host='claude-desktop', + server_name='minimal-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + +class TestModelIntegration(unittest.TestCase): + """Test suite for model integration in CLI handlers.""" + + @regression_test + def test_configure_uses_host_model_registry(self): + """Test that configure command uses HOST_MODEL_REGISTRY for host selection.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Test with Gemini host + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + @regression_test + def test_configure_calls_from_omni_conversion(self): + """Test that from_omni() is called to convert Omni to host-specific model.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call configure command + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + @integration_test(scope="component") + def test_configure_passes_host_specific_model_to_manager(self): + """Test that host-specific model is passed to MCPHostConfigurationManager.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager_class: + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + mock_manager.configure_server.return_value = MagicMock(success=True, backup_path=None) + + with patch('hatch.cli_hatch.request_confirmation', return_value=True): + # Call configure command + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify configure_server was called + self.assertEqual(result, 0) + mock_manager.configure_server.assert_called_once() + + # Verify the server_config argument is a host-specific model instance + # (MCPServerConfigClaude for claude-desktop host) + call_args = mock_manager.configure_server.call_args + server_config = call_args.kwargs['server_config'] + self.assertIsInstance(server_config, MCPServerConfigClaude) + + +class TestReportingIntegration(unittest.TestCase): + """Test suite for reporting integration in CLI commands.""" + + @regression_test + def test_configure_dry_run_displays_report_only(self): + """Test that dry-run mode displays report without configuration.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + # Call with dry-run + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=True, + auto_approve=False + ) + + # Verify the function executed without errors + self.assertEqual(result, 0) + + # Verify MCPHostConfigurationManager was not instantiated (no actual configuration) + mock_manager.assert_not_called() + + +class TestHostSpecificArguments(unittest.TestCase): + """Test suite for host-specific CLI arguments (Phase 3 - Mandatory).""" + + @regression_test + def test_configure_accepts_all_universal_fields(self): + """Test that all universal fields are accepted by CLI.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call with all universal fields + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py', '--port', '8080'], + env=['API_KEY=secret', 'DEBUG=true'], + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify success + self.assertEqual(result, 0) + + @regression_test + def test_configure_multiple_env_vars(self): + """Test that multiple environment variables are handled correctly.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + # Call with multiple env vars + result = handle_mcp_configure( + host='gemini', + server_name='test-server', + command='python', + args=['server.py'], + env=['VAR1=value1', 'VAR2=value2', 'VAR3=value3'], + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify success + self.assertEqual(result, 0) + + @regression_test + def test_configure_different_hosts(self): + """Test that different host types are handled correctly.""" + hosts_to_test = ['claude-desktop', 'cursor', 'vscode', 'gemini'] + + for host in hosts_to_test: + with self.subTest(host=host): + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager: + with patch('hatch.cli_hatch.request_confirmation', return_value=False): + result = handle_mcp_configure( + host=host, + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify success for each host + self.assertEqual(result, 0) + + +class TestErrorHandling(unittest.TestCase): + """Test suite for error handling in CLI commands.""" + + @regression_test + def test_configure_invalid_host_type_error(self): + """Test that clear error is shown for invalid host type.""" + # Call with invalid host + result = handle_mcp_configure( + host='invalid-host', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify error return code + self.assertEqual(result, 1) + + @regression_test + def test_configure_invalid_field_value_error(self): + """Test that clear error is shown for invalid field values.""" + # Test with invalid URL format - this will be caught by Pydantic validation + # when creating MCPServerConfig + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command=None, + args=None, # Must be None for remote server + env=None, + url='not-a-url', # Invalid URL format + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify error return code (validation error caught in exception handler) + self.assertEqual(result, 1) + + @regression_test + def test_configure_pydantic_validation_error_handling(self): + """Test that Pydantic ValidationErrors are caught and handled.""" + # Test with conflicting arguments (command with headers) + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command='python', + args=['server.py'], + env=None, + url=None, + headers=['Auth=token'], # Headers not allowed with command + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify error return code (caught by validation in handle_mcp_configure) + self.assertEqual(result, 1) + + @regression_test + def test_configure_missing_command_url_error(self): + """Test error handling when neither command nor URL provided.""" + # This test verifies the argparse validation (required=True for mutually exclusive group) + # In actual CLI usage, argparse would catch this before handle_mcp_configure is called + # For unit testing, we test that the function handles None values appropriately + result = handle_mcp_configure( + host='claude-desktop', + server_name='test-server', + command=None, + args=None, + env=None, + url=None, + headers=None, + no_backup=True, + dry_run=False, + auto_approve=False + ) + + # Verify error return code (validation error) + self.assertEqual(result, 1) + + +class TestBackwardCompatibility(unittest.TestCase): + """Test suite for backward compatibility.""" + + @regression_test + def test_existing_configure_command_still_works(self): + """Test that existing configure command usage still works.""" + with patch('hatch.cli_hatch.MCPHostConfigurationManager') as mock_manager_class: + mock_manager = MagicMock() + mock_manager_class.return_value = mock_manager + mock_manager.configure_server.return_value = MagicMock(success=True, backup_path=None) + + with patch('hatch.cli_hatch.request_confirmation', return_value=True): + # Call with existing command pattern + result = handle_mcp_configure( + host='claude-desktop', + server_name='my-server', + command='python', + args=['-m', 'my_package.server'], + env=['API_KEY=secret'], + url=None, + headers=None, + no_backup=False, + dry_run=False, + auto_approve=False + ) + + # Verify success + self.assertEqual(result, 0) + mock_manager.configure_server.assert_called_once() + + +class TestParseUtilities(unittest.TestCase): + """Test suite for CLI parsing utilities.""" + + @regression_test + def test_parse_env_vars_basic(self): + """Test parsing environment variables from KEY=VALUE format.""" + env_list = ['API_KEY=secret', 'DEBUG=true'] + result = parse_env_vars(env_list) + + expected = {'API_KEY': 'secret', 'DEBUG': 'true'} + self.assertEqual(result, expected) + + @regression_test + def test_parse_env_vars_empty(self): + """Test parsing empty environment variables list.""" + result = parse_env_vars(None) + self.assertEqual(result, {}) + + result = parse_env_vars([]) + self.assertEqual(result, {}) + + @regression_test + def test_parse_headers_basic(self): + """Test parsing headers from KEY=VALUE format.""" + headers_list = ['Authorization=Bearer token', 'Content-Type=application/json'] + result = parse_headers(headers_list) + + expected = {'Authorization': 'Bearer token', 'Content-Type': 'application/json'} + self.assertEqual(result, expected) + + @regression_test + def test_parse_headers_empty(self): + """Test parsing empty headers list.""" + result = parse_headers(None) + self.assertEqual(result, {}) + + result = parse_headers([]) + self.assertEqual(result, {}) + + +class TestCLIIntegrationReadiness(unittest.TestCase): + """Test suite to verify readiness for Phase 4 CLI integration implementation.""" + + @regression_test + def test_host_model_registry_available(self): + """Test that HOST_MODEL_REGISTRY is available for CLI integration.""" + from hatch.mcp_host_config.models import HOST_MODEL_REGISTRY, MCPHostType + + # Verify registry contains all expected hosts + expected_hosts = [ + MCPHostType.GEMINI, + MCPHostType.CLAUDE_DESKTOP, + MCPHostType.CLAUDE_CODE, + MCPHostType.VSCODE, + MCPHostType.CURSOR, + MCPHostType.LMSTUDIO, + ] + + for host in expected_hosts: + self.assertIn(host, HOST_MODEL_REGISTRY) + + @regression_test + def test_omni_model_available(self): + """Test that MCPServerConfigOmni is available for CLI integration.""" + from hatch.mcp_host_config.models import MCPServerConfigOmni + + # Create a basic Omni model + omni = MCPServerConfigOmni( + name='test-server', + command='python', + args=['server.py'], + env={'API_KEY': 'secret'}, + ) + + # Verify model was created successfully + self.assertEqual(omni.name, 'test-server') + self.assertEqual(omni.command, 'python') + self.assertEqual(omni.args, ['server.py']) + self.assertEqual(omni.env, {'API_KEY': 'secret'}) + + @regression_test + def test_from_omni_conversion_available(self): + """Test that from_omni() conversion is available for all host models.""" + from hatch.mcp_host_config.models import ( + MCPServerConfigOmni, + MCPServerConfigGemini, + MCPServerConfigClaude, + MCPServerConfigVSCode, + MCPServerConfigCursor, + ) + + # Create Omni model + omni = MCPServerConfigOmni( + name='test-server', + command='python', + args=['server.py'], + ) + + # Test conversion to each host-specific model + gemini = MCPServerConfigGemini.from_omni(omni) + self.assertEqual(gemini.name, 'test-server') + + claude = MCPServerConfigClaude.from_omni(omni) + self.assertEqual(claude.name, 'test-server') + + vscode = MCPServerConfigVSCode.from_omni(omni) + self.assertEqual(vscode.name, 'test-server') + + cursor = MCPServerConfigCursor.from_omni(omni) + self.assertEqual(cursor.name, 'test-server') + + @regression_test + def test_reporting_functions_available(self): + """Test that reporting functions are available for CLI integration.""" + from hatch.mcp_host_config.reporting import ( + generate_conversion_report, + display_report, + ) + from hatch.mcp_host_config.models import MCPServerConfigOmni, MCPHostType + + # Create Omni model + omni = MCPServerConfigOmni( + name='test-server', + command='python', + args=['server.py'], + ) + + # Generate report + report = generate_conversion_report( + operation='create', + server_name='test-server', + target_host=MCPHostType.CLAUDE_DESKTOP, + omni=omni, + dry_run=True + ) + + # Verify report was created + self.assertIsNotNone(report) + self.assertEqual(report.operation, 'create') + + @regression_test + def test_cli_handler_signature_compatible(self): + """Test that handle_mcp_configure signature is compatible with integration.""" + import inspect + from hatch.cli_hatch import handle_mcp_configure + + # Get function signature + sig = inspect.signature(handle_mcp_configure) + + # Verify expected parameters exist + expected_params = [ + 'host', 'server_name', 'command', 'args', + 'env', 'url', 'headers', 'no_backup', 'dry_run', 'auto_approve' + ] + + for param in expected_params: + self.assertIn(param, sig.parameters) + + +if __name__ == '__main__': + unittest.main() + diff --git a/tests/test_mcp_pydantic_architecture_v4.py b/tests/test_mcp_pydantic_architecture_v4.py new file mode 100644 index 0000000..30233fc --- /dev/null +++ b/tests/test_mcp_pydantic_architecture_v4.py @@ -0,0 +1,561 @@ +""" +Test suite for Round 04 v4 Pydantic Model Hierarchy. + +This module tests the new model hierarchy including MCPServerConfigBase, +host-specific models (Gemini, VS Code, Cursor, Claude), MCPServerConfigOmni, +HOST_MODEL_REGISTRY, and from_omni() conversion methods. +""" + +import unittest +import sys +from pathlib import Path + +# Add the parent directory to the path to import wobble +sys.path.insert(0, str(Path(__file__).parent.parent)) + +try: + from wobble.decorators import regression_test +except ImportError: + # Fallback decorator if wobble is not available + def regression_test(func): + return func + +from hatch.mcp_host_config.models import ( + MCPServerConfigBase, + MCPServerConfigGemini, + MCPServerConfigVSCode, + MCPServerConfigCursor, + MCPServerConfigClaude, + MCPServerConfigOmni, + HOST_MODEL_REGISTRY, + MCPHostType +) +from pydantic import ValidationError + + +class TestMCPServerConfigBase(unittest.TestCase): + """Test suite for MCPServerConfigBase model.""" + + @regression_test + def test_base_model_local_server_validation_success(self): + """Test successful local server configuration with type inference.""" + config = MCPServerConfigBase( + name="test-server", + command="python", + args=["server.py"], + env={"API_KEY": "test"} + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.type, "stdio") # Inferred from command + self.assertEqual(len(config.args), 1) + self.assertEqual(config.env["API_KEY"], "test") + + @regression_test + def test_base_model_remote_server_validation_success(self): + """Test successful remote server configuration with type inference.""" + config = MCPServerConfigBase( + name="test-server", + url="https://api.example.com/mcp", + headers={"Authorization": "Bearer token"} + ) + + self.assertEqual(config.url, "https://api.example.com/mcp") + self.assertEqual(config.type, "sse") # Inferred from url (default to sse) + self.assertEqual(config.headers["Authorization"], "Bearer token") + + @regression_test + def test_base_model_mutual_exclusion_validation_fails(self): + """Test validation fails when both command and url provided.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfigBase( + name="test-server", + command="python", + url="https://api.example.com/mcp" + ) + + self.assertIn("Cannot specify both 'command' and 'url'", str(context.exception)) + + @regression_test + def test_base_model_type_field_stdio_validation(self): + """Test type=stdio validation.""" + # Valid: type=stdio with command + config = MCPServerConfigBase( + name="test-server", + type="stdio", + command="python" + ) + self.assertEqual(config.type, "stdio") + self.assertEqual(config.command, "python") + + # Invalid: type=stdio without command + with self.assertRaises(ValidationError) as context: + MCPServerConfigBase( + name="test-server", + type="stdio", + url="https://api.example.com/mcp" + ) + self.assertIn("'command' is required for stdio transport", str(context.exception)) + + @regression_test + def test_base_model_type_field_sse_validation(self): + """Test type=sse validation.""" + # Valid: type=sse with url + config = MCPServerConfigBase( + name="test-server", + type="sse", + url="https://api.example.com/mcp" + ) + self.assertEqual(config.type, "sse") + self.assertEqual(config.url, "https://api.example.com/mcp") + + # Invalid: type=sse without url + with self.assertRaises(ValidationError) as context: + MCPServerConfigBase( + name="test-server", + type="sse", + command="python" + ) + self.assertIn("'url' is required for sse/http transports", str(context.exception)) + + @regression_test + def test_base_model_type_field_http_validation(self): + """Test type=http validation.""" + # Valid: type=http with url + config = MCPServerConfigBase( + name="test-server", + type="http", + url="https://api.example.com/mcp" + ) + self.assertEqual(config.type, "http") + self.assertEqual(config.url, "https://api.example.com/mcp") + + # Invalid: type=http without url + with self.assertRaises(ValidationError) as context: + MCPServerConfigBase( + name="test-server", + type="http", + command="python" + ) + self.assertIn("'url' is required for sse/http transports", str(context.exception)) + + @regression_test + def test_base_model_type_field_invalid_value(self): + """Test validation fails for invalid type value.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfigBase( + name="test-server", + type="invalid", + command="python" + ) + + # Pydantic will reject invalid Literal value + self.assertIn("Input should be 'stdio', 'sse' or 'http'", str(context.exception)) + + +class TestMCPServerConfigGemini(unittest.TestCase): + """Test suite for MCPServerConfigGemini model.""" + + @regression_test + def test_gemini_model_with_all_fields(self): + """Test Gemini model with all Gemini-specific fields.""" + config = MCPServerConfigGemini( + name="gemini-server", + command="npx", + args=["-y", "server"], + env={"API_KEY": "test"}, + cwd="/path/to/dir", + timeout=30000, + trust=True, + includeTools=["tool1", "tool2"], + excludeTools=["tool3"] + ) + + # Verify universal fields + self.assertEqual(config.command, "npx") + self.assertEqual(config.type, "stdio") # Inferred + + # Verify Gemini-specific fields + self.assertEqual(config.cwd, "/path/to/dir") + self.assertEqual(config.timeout, 30000) + self.assertTrue(config.trust) + self.assertEqual(len(config.includeTools), 2) + self.assertEqual(len(config.excludeTools), 1) + + @regression_test + def test_gemini_model_minimal_configuration(self): + """Test Gemini model with minimal configuration.""" + config = MCPServerConfigGemini( + name="gemini-server", + command="python" + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.type, "stdio") # Inferred + self.assertIsNone(config.cwd) + self.assertIsNone(config.timeout) + self.assertIsNone(config.trust) + + @regression_test + def test_gemini_model_field_filtering(self): + """Test Gemini model field filtering with model_dump.""" + config = MCPServerConfigGemini( + name="gemini-server", + command="python", + cwd="/path/to/dir" + ) + + # Use model_dump(exclude_unset=True) to get only set fields + data = config.model_dump(exclude_unset=True) + + # Should include name, command, cwd, type (inferred) + self.assertIn("name", data) + self.assertIn("command", data) + self.assertIn("cwd", data) + self.assertIn("type", data) + + # Should NOT include unset fields + self.assertNotIn("timeout", data) + self.assertNotIn("trust", data) + + +class TestMCPServerConfigVSCode(unittest.TestCase): + """Test suite for MCPServerConfigVSCode model.""" + + @regression_test + def test_vscode_model_with_inputs_array(self): + """Test VS Code model with inputs array.""" + config = MCPServerConfigVSCode( + name="vscode-server", + command="python", + args=["server.py"], + inputs=[ + { + "type": "promptString", + "id": "api-key", + "description": "API Key", + "password": True + } + ] + ) + + self.assertEqual(config.command, "python") + self.assertEqual(len(config.inputs), 1) + self.assertEqual(config.inputs[0]["id"], "api-key") + self.assertTrue(config.inputs[0]["password"]) + + @regression_test + def test_vscode_model_with_envFile(self): + """Test VS Code model with envFile field.""" + config = MCPServerConfigVSCode( + name="vscode-server", + command="python", + envFile=".env" + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.envFile, ".env") + + @regression_test + def test_vscode_model_minimal_configuration(self): + """Test VS Code model with minimal configuration.""" + config = MCPServerConfigVSCode( + name="vscode-server", + command="python" + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.type, "stdio") # Inferred + self.assertIsNone(config.envFile) + self.assertIsNone(config.inputs) + + +class TestMCPServerConfigCursor(unittest.TestCase): + """Test suite for MCPServerConfigCursor model.""" + + @regression_test + def test_cursor_model_with_envFile(self): + """Test Cursor model with envFile field.""" + config = MCPServerConfigCursor( + name="cursor-server", + command="python", + envFile=".env" + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.envFile, ".env") + + @regression_test + def test_cursor_model_minimal_configuration(self): + """Test Cursor model with minimal configuration.""" + config = MCPServerConfigCursor( + name="cursor-server", + command="python" + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.type, "stdio") # Inferred + self.assertIsNone(config.envFile) + + @regression_test + def test_cursor_model_env_with_interpolation_syntax(self): + """Test Cursor model with env containing interpolation syntax.""" + # Our code writes the literal string value + # Cursor handles ${env:NAME}, ${userHome}, etc. expansion at runtime + config = MCPServerConfigCursor( + name="cursor-server", + command="python", + env={"API_KEY": "${env:API_KEY}", "HOME": "${userHome}"} + ) + + self.assertEqual(config.env["API_KEY"], "${env:API_KEY}") + self.assertEqual(config.env["HOME"], "${userHome}") + + +class TestMCPServerConfigClaude(unittest.TestCase): + """Test suite for MCPServerConfigClaude model.""" + + @regression_test + def test_claude_model_universal_fields_only(self): + """Test Claude model with universal fields only.""" + config = MCPServerConfigClaude( + name="claude-server", + command="python", + args=["server.py"], + env={"API_KEY": "test"} + ) + + # Verify universal fields work + self.assertEqual(config.command, "python") + self.assertEqual(config.type, "stdio") # Inferred + self.assertEqual(len(config.args), 1) + self.assertEqual(config.env["API_KEY"], "test") + + @regression_test + def test_claude_model_all_transport_types(self): + """Test Claude model supports all transport types.""" + # stdio transport + config_stdio = MCPServerConfigClaude( + name="claude-server", + type="stdio", + command="python" + ) + self.assertEqual(config_stdio.type, "stdio") + + # sse transport + config_sse = MCPServerConfigClaude( + name="claude-server", + type="sse", + url="https://api.example.com/mcp" + ) + self.assertEqual(config_sse.type, "sse") + + # http transport + config_http = MCPServerConfigClaude( + name="claude-server", + type="http", + url="https://api.example.com/mcp" + ) + self.assertEqual(config_http.type, "http") + + +class TestMCPServerConfigOmni(unittest.TestCase): + """Test suite for MCPServerConfigOmni model.""" + + @regression_test + def test_omni_model_all_fields_optional(self): + """Test Omni model with no fields (all optional).""" + # Should not raise ValidationError + config = MCPServerConfigOmni() + + self.assertIsNone(config.name) + self.assertIsNone(config.command) + self.assertIsNone(config.url) + + @regression_test + def test_omni_model_with_mixed_host_fields(self): + """Test Omni model with fields from multiple hosts.""" + config = MCPServerConfigOmni( + name="omni-server", + command="python", + cwd="/path/to/dir", # Gemini field + envFile=".env" # VS Code/Cursor field + ) + + self.assertEqual(config.command, "python") + self.assertEqual(config.cwd, "/path/to/dir") + self.assertEqual(config.envFile, ".env") + + @regression_test + def test_omni_model_exclude_unset(self): + """Test Omni model with exclude_unset.""" + config = MCPServerConfigOmni( + name="omni-server", + command="python", + args=["server.py"] + ) + + # Use model_dump(exclude_unset=True) + data = config.model_dump(exclude_unset=True) + + # Should only include set fields + self.assertIn("name", data) + self.assertIn("command", data) + self.assertIn("args", data) + + # Should NOT include unset fields + self.assertNotIn("url", data) + self.assertNotIn("cwd", data) + self.assertNotIn("envFile", data) + + +class TestHostModelRegistry(unittest.TestCase): + """Test suite for HOST_MODEL_REGISTRY dictionary dispatch.""" + + @regression_test + def test_registry_contains_all_host_types(self): + """Test registry contains entries for all MCPHostType values.""" + # Verify registry has entries for all host types + self.assertIn(MCPHostType.GEMINI, HOST_MODEL_REGISTRY) + self.assertIn(MCPHostType.CLAUDE_DESKTOP, HOST_MODEL_REGISTRY) + self.assertIn(MCPHostType.CLAUDE_CODE, HOST_MODEL_REGISTRY) + self.assertIn(MCPHostType.VSCODE, HOST_MODEL_REGISTRY) + self.assertIn(MCPHostType.CURSOR, HOST_MODEL_REGISTRY) + self.assertIn(MCPHostType.LMSTUDIO, HOST_MODEL_REGISTRY) + + # Verify correct model classes + self.assertEqual(HOST_MODEL_REGISTRY[MCPHostType.GEMINI], MCPServerConfigGemini) + self.assertEqual(HOST_MODEL_REGISTRY[MCPHostType.CLAUDE_DESKTOP], MCPServerConfigClaude) + self.assertEqual(HOST_MODEL_REGISTRY[MCPHostType.CLAUDE_CODE], MCPServerConfigClaude) + self.assertEqual(HOST_MODEL_REGISTRY[MCPHostType.VSCODE], MCPServerConfigVSCode) + self.assertEqual(HOST_MODEL_REGISTRY[MCPHostType.CURSOR], MCPServerConfigCursor) + self.assertEqual(HOST_MODEL_REGISTRY[MCPHostType.LMSTUDIO], MCPServerConfigCursor) + + @regression_test + def test_registry_dictionary_dispatch(self): + """Test dictionary dispatch retrieves correct model class.""" + # Test Gemini + gemini_class = HOST_MODEL_REGISTRY[MCPHostType.GEMINI] + self.assertEqual(gemini_class, MCPServerConfigGemini) + + # Test VS Code + vscode_class = HOST_MODEL_REGISTRY[MCPHostType.VSCODE] + self.assertEqual(vscode_class, MCPServerConfigVSCode) + + # Test Cursor + cursor_class = HOST_MODEL_REGISTRY[MCPHostType.CURSOR] + self.assertEqual(cursor_class, MCPServerConfigCursor) + + # Test Claude Desktop + claude_class = HOST_MODEL_REGISTRY[MCPHostType.CLAUDE_DESKTOP] + self.assertEqual(claude_class, MCPServerConfigClaude) + + +class TestFromOmniConversion(unittest.TestCase): + """Test suite for from_omni() conversion methods.""" + + @regression_test + def test_gemini_from_omni_with_supported_fields(self): + """Test Gemini from_omni with supported fields.""" + omni = MCPServerConfigOmni( + name="gemini-server", + command="npx", + args=["-y", "server"], + cwd="/path/to/dir", + timeout=30000 + ) + + # Convert to Gemini model + gemini = MCPServerConfigGemini.from_omni(omni) + + # Verify all supported fields transferred + self.assertEqual(gemini.name, "gemini-server") + self.assertEqual(gemini.command, "npx") + self.assertEqual(len(gemini.args), 2) + self.assertEqual(gemini.cwd, "/path/to/dir") + self.assertEqual(gemini.timeout, 30000) + + @regression_test + def test_gemini_from_omni_with_unsupported_fields(self): + """Test Gemini from_omni excludes unsupported fields.""" + omni = MCPServerConfigOmni( + name="gemini-server", + command="python", + cwd="/path/to/dir", # Gemini field + envFile=".env" # VS Code field (unsupported by Gemini) + ) + + # Convert to Gemini model + gemini = MCPServerConfigGemini.from_omni(omni) + + # Verify Gemini fields transferred + self.assertEqual(gemini.command, "python") + self.assertEqual(gemini.cwd, "/path/to/dir") + + # Verify unsupported field NOT transferred + # (Gemini model doesn't have envFile field) + self.assertFalse(hasattr(gemini, 'envFile') and gemini.envFile is not None) + + @regression_test + def test_vscode_from_omni_with_supported_fields(self): + """Test VS Code from_omni with supported fields.""" + omni = MCPServerConfigOmni( + name="vscode-server", + command="python", + args=["server.py"], + envFile=".env", + inputs=[{"type": "promptString", "id": "api-key"}] + ) + + # Convert to VS Code model + vscode = MCPServerConfigVSCode.from_omni(omni) + + # Verify all supported fields transferred + self.assertEqual(vscode.name, "vscode-server") + self.assertEqual(vscode.command, "python") + self.assertEqual(vscode.envFile, ".env") + self.assertEqual(len(vscode.inputs), 1) + + @regression_test + def test_cursor_from_omni_with_supported_fields(self): + """Test Cursor from_omni with supported fields.""" + omni = MCPServerConfigOmni( + name="cursor-server", + command="python", + args=["server.py"], + envFile=".env" + ) + + # Convert to Cursor model + cursor = MCPServerConfigCursor.from_omni(omni) + + # Verify all supported fields transferred + self.assertEqual(cursor.name, "cursor-server") + self.assertEqual(cursor.command, "python") + self.assertEqual(cursor.envFile, ".env") + + @regression_test + def test_claude_from_omni_with_universal_fields(self): + """Test Claude from_omni with universal fields only.""" + omni = MCPServerConfigOmni( + name="claude-server", + command="python", + args=["server.py"], + env={"API_KEY": "test"}, + type="stdio" + ) + + # Convert to Claude model + claude = MCPServerConfigClaude.from_omni(omni) + + # Verify universal fields transferred + self.assertEqual(claude.name, "claude-server") + self.assertEqual(claude.command, "python") + self.assertEqual(claude.type, "stdio") + self.assertEqual(len(claude.args), 1) + self.assertEqual(claude.env["API_KEY"], "test") + + +if __name__ == '__main__': + unittest.main() + diff --git a/tests/test_mcp_server_config_models.py b/tests/test_mcp_server_config_models.py index 448f8ab..92d3348 100644 --- a/tests/test_mcp_server_config_models.py +++ b/tests/test_mcp_server_config_models.py @@ -124,20 +124,23 @@ def test_mcp_server_config_url_format_validation(self): @regression_test def test_mcp_server_config_no_future_extension_fields(self): - """Test that future extension fields are not present.""" - # These fields should not be accepted (removed in v2) + """Test that extra fields are allowed for host-specific extensions.""" + # Current design allows extra fields to support host-specific configurations + # (e.g., Gemini's timeout, VS Code's envFile, etc.) config_data = { "command": "python", - "timeout": 30, # Should be rejected - "retry_attempts": 3, # Should be rejected - "ssl_verify": True # Should be rejected + "timeout": 30, # Allowed (host-specific field) + "retry_attempts": 3, # Allowed (host-specific field) + "ssl_verify": True # Allowed (host-specific field) } - - with self.assertRaises(ValidationError) as context: - MCPServerConfig(**config_data) - - # Should fail due to extra fields being forbidden - self.assertIn("Extra inputs are not permitted", str(context.exception)) + + # Should NOT raise ValidationError (extra="allow") + config = MCPServerConfig(**config_data) + + # Verify core fields are set correctly + self.assertEqual(config.command, "python") + + # Note: In Phase 3B, strict validation will be enforced in host-specific models @regression_test def test_mcp_server_config_command_empty_validation(self): diff --git a/tests/test_mcp_server_config_type_field.py b/tests/test_mcp_server_config_type_field.py new file mode 100644 index 0000000..733eeb8 --- /dev/null +++ b/tests/test_mcp_server_config_type_field.py @@ -0,0 +1,221 @@ +""" +Test suite for MCPServerConfig type field (Phase 3A). + +This module tests the type field addition to MCPServerConfig model, +including validation and property behavior. +""" + +import unittest +import sys +from pathlib import Path + +# Add the parent directory to the path to import wobble +sys.path.insert(0, str(Path(__file__).parent.parent)) + +try: + from wobble.decorators import regression_test +except ImportError: + # Fallback decorator if wobble is not available + def regression_test(func): + return func + +from hatch.mcp_host_config.models import MCPServerConfig +from pydantic import ValidationError + + +class TestMCPServerConfigTypeField(unittest.TestCase): + """Test suite for MCPServerConfig type field validation.""" + + @regression_test + def test_type_stdio_with_command_success(self): + """Test successful stdio type with command.""" + config = MCPServerConfig( + name="test-server", + type="stdio", + command="python", + args=["server.py"] + ) + + self.assertEqual(config.type, "stdio") + self.assertEqual(config.command, "python") + self.assertTrue(config.is_local_server) + self.assertFalse(config.is_remote_server) + + @regression_test + def test_type_sse_with_url_success(self): + """Test successful sse type with url.""" + config = MCPServerConfig( + name="test-server", + type="sse", + url="https://api.example.com/mcp" + ) + + self.assertEqual(config.type, "sse") + self.assertEqual(config.url, "https://api.example.com/mcp") + self.assertFalse(config.is_local_server) + self.assertTrue(config.is_remote_server) + + @regression_test + def test_type_http_with_url_success(self): + """Test successful http type with url.""" + config = MCPServerConfig( + name="test-server", + type="http", + url="https://api.example.com/mcp", + headers={"Authorization": "Bearer token"} + ) + + self.assertEqual(config.type, "http") + self.assertEqual(config.url, "https://api.example.com/mcp") + self.assertFalse(config.is_local_server) + self.assertTrue(config.is_remote_server) + + @regression_test + def test_type_stdio_without_command_fails(self): + """Test validation fails when type=stdio without command.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfig( + name="test-server", + type="stdio", + url="https://api.example.com/mcp" # Invalid: stdio with url + ) + + self.assertIn("'type=stdio' requires 'command' field", str(context.exception)) + + @regression_test + def test_type_stdio_with_url_fails(self): + """Test validation fails when type=stdio with url.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfig( + name="test-server", + type="stdio", + command="python", + url="https://api.example.com/mcp" # Invalid: both command and url + ) + + # The validate_server_type() validator catches this first + self.assertIn("Cannot specify both 'command' and 'url'", str(context.exception)) + + @regression_test + def test_type_sse_without_url_fails(self): + """Test validation fails when type=sse without url.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfig( + name="test-server", + type="sse", + command="python" # Invalid: sse with command + ) + + self.assertIn("'type=sse' requires 'url' field", str(context.exception)) + + @regression_test + def test_type_http_without_url_fails(self): + """Test validation fails when type=http without url.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfig( + name="test-server", + type="http", + command="python" # Invalid: http with command + ) + + self.assertIn("'type=http' requires 'url' field", str(context.exception)) + + @regression_test + def test_type_sse_with_command_fails(self): + """Test validation fails when type=sse with command.""" + with self.assertRaises(ValidationError) as context: + MCPServerConfig( + name="test-server", + type="sse", + command="python", + url="https://api.example.com/mcp" # Invalid: both command and url + ) + + # The validate_server_type() validator catches this first + self.assertIn("Cannot specify both 'command' and 'url'", str(context.exception)) + + @regression_test + def test_backward_compatibility_no_type_field_local(self): + """Test backward compatibility: local server without type field.""" + config = MCPServerConfig( + name="test-server", + command="python", + args=["server.py"] + ) + + self.assertIsNone(config.type) + self.assertEqual(config.command, "python") + self.assertTrue(config.is_local_server) + self.assertFalse(config.is_remote_server) + + @regression_test + def test_backward_compatibility_no_type_field_remote(self): + """Test backward compatibility: remote server without type field.""" + config = MCPServerConfig( + name="test-server", + url="https://api.example.com/mcp" + ) + + self.assertIsNone(config.type) + self.assertEqual(config.url, "https://api.example.com/mcp") + self.assertFalse(config.is_local_server) + self.assertTrue(config.is_remote_server) + + @regression_test + def test_type_field_with_env_variables(self): + """Test type field with environment variables.""" + config = MCPServerConfig( + name="test-server", + type="stdio", + command="python", + args=["server.py"], + env={"API_KEY": "test-key", "DEBUG": "true"} + ) + + self.assertEqual(config.type, "stdio") + self.assertEqual(config.env["API_KEY"], "test-key") + self.assertEqual(config.env["DEBUG"], "true") + + @regression_test + def test_type_field_serialization(self): + """Test type field is included in serialization.""" + config = MCPServerConfig( + name="test-server", + type="stdio", + command="python", + args=["server.py"] + ) + + # Test model_dump includes type field + data = config.model_dump() + self.assertEqual(data["type"], "stdio") + self.assertEqual(data["command"], "python") + + # Test JSON serialization + import json + json_str = config.model_dump_json() + parsed = json.loads(json_str) + self.assertEqual(parsed["type"], "stdio") + + @regression_test + def test_type_field_roundtrip(self): + """Test type field survives serialization roundtrip.""" + original = MCPServerConfig( + name="test-server", + type="sse", + url="https://api.example.com/mcp", + headers={"Authorization": "Bearer token"} + ) + + # Serialize and deserialize + data = original.model_dump() + roundtrip = MCPServerConfig(**data) + + self.assertEqual(roundtrip.type, "sse") + self.assertEqual(roundtrip.url, "https://api.example.com/mcp") + self.assertEqual(roundtrip.headers["Authorization"], "Bearer token") + + +if __name__ == '__main__': + unittest.main() + diff --git a/tests/test_mcp_user_feedback_reporting.py b/tests/test_mcp_user_feedback_reporting.py new file mode 100644 index 0000000..6beff73 --- /dev/null +++ b/tests/test_mcp_user_feedback_reporting.py @@ -0,0 +1,359 @@ +""" +Test suite for MCP user feedback reporting system. + +This module tests the FieldOperation and ConversionReport models, +generate_conversion_report() function, and display_report() function. +""" + +import unittest +import sys +from pathlib import Path +from io import StringIO + +# Add the parent directory to the path to import wobble +sys.path.insert(0, str(Path(__file__).parent.parent)) + +try: + from wobble.decorators import regression_test +except ImportError: + # Fallback decorator if wobble is not available + def regression_test(func): + return func + +from hatch.mcp_host_config.reporting import ( + FieldOperation, + ConversionReport, + generate_conversion_report, + display_report +) +from hatch.mcp_host_config.models import ( + MCPServerConfigOmni, + MCPHostType +) + + +class TestFieldOperation(unittest.TestCase): + """Test suite for FieldOperation model.""" + + @regression_test + def test_field_operation_updated_str_representation(self): + """Test UPDATED operation string representation.""" + field_op = FieldOperation( + field_name="command", + operation="UPDATED", + old_value="old_command", + new_value="new_command" + ) + + result = str(field_op) + + # Verify ASCII arrow used (not Unicode) + self.assertIn("-->", result) + self.assertNotIn("→", result) + + # Verify format + self.assertEqual(result, "command: UPDATED 'old_command' --> 'new_command'") + + @regression_test + def test_field_operation_updated_with_none_old_value(self): + """Test UPDATED operation with None old_value (field added).""" + field_op = FieldOperation( + field_name="timeout", + operation="UPDATED", + old_value=None, + new_value=30000 + ) + + result = str(field_op) + + # Verify None is displayed + self.assertEqual(result, "timeout: UPDATED None --> 30000") + + @regression_test + def test_field_operation_unsupported_str_representation(self): + """Test UNSUPPORTED operation string representation.""" + field_op = FieldOperation( + field_name="envFile", + operation="UNSUPPORTED", + new_value=".env" + ) + + result = str(field_op) + + # Verify format + self.assertEqual(result, "envFile: UNSUPPORTED") + + @regression_test + def test_field_operation_unchanged_str_representation(self): + """Test UNCHANGED operation string representation.""" + field_op = FieldOperation( + field_name="name", + operation="UNCHANGED", + new_value="my-server" + ) + + result = str(field_op) + + # Verify format + self.assertEqual(result, "name: UNCHANGED 'my-server'") + + +class TestConversionReport(unittest.TestCase): + """Test suite for ConversionReport model.""" + + @regression_test + def test_conversion_report_create_operation(self): + """Test ConversionReport with create operation.""" + report = ConversionReport( + operation="create", + server_name="my-server", + target_host=MCPHostType.GEMINI, + field_operations=[ + FieldOperation(field_name="command", operation="UPDATED", old_value=None, new_value="python") + ] + ) + + self.assertEqual(report.operation, "create") + self.assertEqual(report.server_name, "my-server") + self.assertEqual(report.target_host, MCPHostType.GEMINI) + self.assertTrue(report.success) + self.assertIsNone(report.error_message) + self.assertEqual(len(report.field_operations), 1) + self.assertFalse(report.dry_run) + + @regression_test + def test_conversion_report_update_operation(self): + """Test ConversionReport with update operation.""" + report = ConversionReport( + operation="update", + server_name="my-server", + target_host=MCPHostType.VSCODE, + field_operations=[ + FieldOperation(field_name="command", operation="UPDATED", old_value="old", new_value="new"), + FieldOperation(field_name="name", operation="UNCHANGED", new_value="my-server") + ] + ) + + self.assertEqual(report.operation, "update") + self.assertEqual(len(report.field_operations), 2) + + @regression_test + def test_conversion_report_migrate_operation(self): + """Test ConversionReport with migrate operation.""" + report = ConversionReport( + operation="migrate", + server_name="my-server", + source_host=MCPHostType.GEMINI, + target_host=MCPHostType.VSCODE, + field_operations=[] + ) + + self.assertEqual(report.operation, "migrate") + self.assertEqual(report.source_host, MCPHostType.GEMINI) + self.assertEqual(report.target_host, MCPHostType.VSCODE) + + +class TestGenerateConversionReport(unittest.TestCase): + """Test suite for generate_conversion_report() function.""" + + @regression_test + def test_generate_report_create_operation_all_supported(self): + """Test generate_conversion_report for create with all supported fields.""" + omni = MCPServerConfigOmni( + name="gemini-server", + command="npx", + args=["-y", "server"], + cwd="/path/to/dir", + timeout=30000 + ) + + report = generate_conversion_report( + operation="create", + server_name="gemini-server", + target_host=MCPHostType.GEMINI, + omni=omni + ) + + # Verify all fields are UPDATED (create operation) + self.assertEqual(report.operation, "create") + self.assertEqual(report.server_name, "gemini-server") + self.assertEqual(report.target_host, MCPHostType.GEMINI) + + # All set fields should be UPDATED + updated_ops = [op for op in report.field_operations if op.operation == "UPDATED"] + self.assertEqual(len(updated_ops), 5) # name, command, args, cwd, timeout + + # No unsupported fields + unsupported_ops = [op for op in report.field_operations if op.operation == "UNSUPPORTED"] + self.assertEqual(len(unsupported_ops), 0) + + @regression_test + def test_generate_report_create_operation_with_unsupported(self): + """Test generate_conversion_report with unsupported fields.""" + omni = MCPServerConfigOmni( + name="gemini-server", + command="python", + cwd="/path/to/dir", # Gemini field + envFile=".env" # VS Code field (unsupported by Gemini) + ) + + report = generate_conversion_report( + operation="create", + server_name="gemini-server", + target_host=MCPHostType.GEMINI, + omni=omni + ) + + # Verify Gemini fields are UPDATED + updated_ops = [op for op in report.field_operations if op.operation == "UPDATED"] + updated_fields = {op.field_name for op in updated_ops} + self.assertIn("name", updated_fields) + self.assertIn("command", updated_fields) + self.assertIn("cwd", updated_fields) + + # Verify VS Code field is UNSUPPORTED + unsupported_ops = [op for op in report.field_operations if op.operation == "UNSUPPORTED"] + self.assertEqual(len(unsupported_ops), 1) + self.assertEqual(unsupported_ops[0].field_name, "envFile") + + @regression_test + def test_generate_report_update_operation(self): + """Test generate_conversion_report for update operation.""" + old_config = MCPServerConfigOmni( + name="my-server", + command="python", + args=["old.py"] + ) + + new_omni = MCPServerConfigOmni( + name="my-server", + command="python", + args=["new.py"] + ) + + report = generate_conversion_report( + operation="update", + server_name="my-server", + target_host=MCPHostType.GEMINI, + omni=new_omni, + old_config=old_config + ) + + # Verify name and command are UNCHANGED + unchanged_ops = [op for op in report.field_operations if op.operation == "UNCHANGED"] + unchanged_fields = {op.field_name for op in unchanged_ops} + self.assertIn("name", unchanged_fields) + self.assertIn("command", unchanged_fields) + + # Verify args is UPDATED + updated_ops = [op for op in report.field_operations if op.operation == "UPDATED"] + self.assertEqual(len(updated_ops), 1) + self.assertEqual(updated_ops[0].field_name, "args") + self.assertEqual(updated_ops[0].old_value, ["old.py"]) + self.assertEqual(updated_ops[0].new_value, ["new.py"]) + + @regression_test + def test_generate_report_dynamic_field_derivation(self): + """Test that generate_conversion_report uses dynamic field derivation.""" + omni = MCPServerConfigOmni( + name="test-server", + command="python" + ) + + # Generate report for Gemini + report_gemini = generate_conversion_report( + operation="create", + server_name="test-server", + target_host=MCPHostType.GEMINI, + omni=omni + ) + + # All fields should be UPDATED (no unsupported) + unsupported_ops = [op for op in report_gemini.field_operations if op.operation == "UNSUPPORTED"] + self.assertEqual(len(unsupported_ops), 0) + + +class TestDisplayReport(unittest.TestCase): + """Test suite for display_report() function.""" + + @regression_test + def test_display_report_create_operation(self): + """Test display_report for create operation.""" + report = ConversionReport( + operation="create", + server_name="my-server", + target_host=MCPHostType.GEMINI, + field_operations=[ + FieldOperation(field_name="command", operation="UPDATED", old_value=None, new_value="python") + ] + ) + + # Capture stdout + captured_output = StringIO() + sys.stdout = captured_output + + display_report(report) + + sys.stdout = sys.__stdout__ + output = captured_output.getvalue() + + # Verify header + self.assertIn("Server 'my-server' created for host", output) + self.assertIn("gemini", output.lower()) + + # Verify field operation displayed + self.assertIn("command: UPDATED", output) + + @regression_test + def test_display_report_update_operation(self): + """Test display_report for update operation.""" + report = ConversionReport( + operation="update", + server_name="my-server", + target_host=MCPHostType.VSCODE, + field_operations=[ + FieldOperation(field_name="args", operation="UPDATED", old_value=["old.py"], new_value=["new.py"]) + ] + ) + + # Capture stdout + captured_output = StringIO() + sys.stdout = captured_output + + display_report(report) + + sys.stdout = sys.__stdout__ + output = captured_output.getvalue() + + # Verify header + self.assertIn("Server 'my-server' updated for host", output) + + @regression_test + def test_display_report_dry_run(self): + """Test display_report for dry-run mode.""" + report = ConversionReport( + operation="create", + server_name="my-server", + target_host=MCPHostType.GEMINI, + field_operations=[], + dry_run=True + ) + + # Capture stdout + captured_output = StringIO() + sys.stdout = captured_output + + display_report(report) + + sys.stdout = sys.__stdout__ + output = captured_output.getvalue() + + # Verify dry-run header and footer + self.assertIn("[DRY RUN]", output) + self.assertIn("Preview of changes", output) + self.assertIn("No changes were made", output) + + +if __name__ == '__main__': + unittest.main() +