diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 0cc9a4f78..000000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,6 +0,0 @@ -name: Test AgentEx Tutorials - -on: - workflow_dispatch: - - workflow_call: diff --git a/src/agentex/lib/cli/commands/agents.py b/src/agentex/lib/cli/commands/agents.py index 10dab0d03..3c1abdf8a 100644 --- a/src/agentex/lib/cli/commands/agents.py +++ b/src/agentex/lib/cli/commands/agents.py @@ -83,26 +83,24 @@ def delete( @agents.command() def cleanup_workflows( agent_name: str = typer.Argument(..., help="Name of the agent to cleanup workflows for"), - force: bool = typer.Option(False, help="Force cleanup using direct Temporal termination (bypasses development check)"), + force: bool = typer.Option( + False, help="Force cleanup using direct Temporal termination (bypasses development check)" + ), ): """ Clean up all running workflows for an agent. - + By default, uses graceful cancellation via agent RPC. With --force, directly terminates workflows via Temporal client. This is a convenience command that does the same thing as 'agentex tasks cleanup'. """ try: console.print(f"[blue]Cleaning up workflows for agent '{agent_name}'...[/blue]") - - cleanup_agent_workflows( - agent_name=agent_name, - force=force, - development_only=True - ) - + + cleanup_agent_workflows(agent_name=agent_name, force=force, development_only=True) + console.print(f"[green]✓ Workflow cleanup completed for agent '{agent_name}'[/green]") - + except Exception as e: console.print(f"[red]Cleanup failed: {str(e)}[/red]") logger.exception("Agent workflow cleanup failed") @@ -112,12 +110,8 @@ def cleanup_workflows( @agents.command() def build( manifest: str = typer.Option(..., help="Path to the manifest you want to use"), - registry: str | None = typer.Option( - None, help="Registry URL for pushing the built image" - ), - repository_name: str | None = typer.Option( - None, help="Repository name to use for the built image" - ), + registry: str | None = typer.Option(None, help="Registry URL for pushing the built image"), + repository_name: str | None = typer.Option(None, help="Repository name to use for the built image"), platforms: str | None = typer.Option( None, help="Platform to build the image for. Please enter a comma separated list of platforms." ), @@ -126,9 +120,7 @@ def build( None, help="Docker build secret in the format 'id=secret-id,src=path-to-secret-file'", ), - tag: str | None = typer.Option( - None, help="Image tag to use (defaults to 'latest')" - ), + tag: str | None = typer.Option(None, help="Image tag to use (defaults to 'latest')"), build_arg: builtins.list[str] | None = typer.Option( # noqa: B008 None, help="Docker build argument in the format 'KEY=VALUE' (can be used multiple times)", @@ -143,7 +135,7 @@ def build( if push and not registry: typer.echo("Error: --registry is required when --push is enabled", err=True) raise typer.Exit(1) - + # Only proceed with build if we have a registry (for now, to match existing behavior) if not registry: typer.echo("No registry provided, skipping image build") @@ -175,10 +167,7 @@ def build( @agents.command() def run( manifest: str = typer.Option(..., help="Path to the manifest you want to use"), - cleanup_on_start: bool = typer.Option( - False, - help="Clean up existing workflows for this agent before starting" - ), + cleanup_on_start: bool = typer.Option(False, help="Clean up existing workflows for this agent before starting"), # Debug options debug: bool = typer.Option(False, help="Enable debug mode for both worker and ACP (disables auto-reload)"), debug_worker: bool = typer.Option(False, help="Enable debug mode for temporal worker only"), @@ -190,26 +179,22 @@ def run( Run an agent locally from the given manifest. """ typer.echo(f"Running agent from manifest: {manifest}") - + # Optionally cleanup existing workflows before starting if cleanup_on_start: try: # Parse manifest to get agent name manifest_obj = AgentManifest.from_yaml(file_path=manifest) agent_name = manifest_obj.agent.name - + console.print(f"[yellow]Cleaning up existing workflows for agent '{agent_name}'...[/yellow]") - cleanup_agent_workflows( - agent_name=agent_name, - force=False, - development_only=True - ) + cleanup_agent_workflows(agent_name=agent_name, force=False, development_only=True) console.print("[green]✓ Pre-run cleanup completed[/green]") - + except Exception as e: console.print(f"[yellow]⚠ Pre-run cleanup failed: {str(e)}[/yellow]") logger.warning(f"Pre-run cleanup failed: {e}") - + # Create debug configuration based on CLI flags debug_config = None if debug or debug_worker or debug_acp: @@ -224,19 +209,19 @@ def run( mode = DebugMode.ACP else: mode = DebugMode.NONE - + debug_config = DebugConfig( enabled=True, mode=mode, port=debug_port, wait_for_attach=wait_for_debugger, - auto_port=False # Use fixed port to match VS Code launch.json + auto_port=False, # Use fixed port to match VS Code launch.json ) - + console.print(f"[blue]🐛 Debug mode enabled: {mode.value}[/blue]") if wait_for_debugger: console.print("[yellow]⏳ Processes will wait for debugger attachment[/yellow]") - + try: run_agent(manifest_path=manifest, debug_config=debug_config) except Exception as e: @@ -247,30 +232,23 @@ def run( @agents.command() def deploy( - cluster: str = typer.Option( - ..., help="Target cluster name (must match kubectl context)" - ), + cluster: str = typer.Option(..., help="Target cluster name (must match kubectl context)"), manifest: str = typer.Option("manifest.yaml", help="Path to the manifest file"), namespace: str | None = typer.Option( None, help="Override Kubernetes namespace (defaults to namespace from environments.yaml)", ), environment: str | None = typer.Option( - None, help="Environment name (dev, prod, etc.) - must be defined in environments.yaml. If not provided, the namespace must be set explicitly." - ), - tag: str | None = typer.Option(None, help="Override the image tag for deployment"), - repository: str | None = typer.Option( - None, help="Override the repository for deployment" - ), - interactive: bool = typer.Option( - True, "--interactive/--no-interactive", help="Enable interactive prompts" + None, + help="Environment name (dev, prod, etc.) - must be defined in environments.yaml. If not provided, the namespace must be set explicitly.", ), + tag: str | None = typer.Option(None, help="Override the image tag for deployment"), + repository: str | None = typer.Option(None, help="Override the repository for deployment"), + interactive: bool = typer.Option(True, "--interactive/--no-interactive", help="Enable interactive prompts"), ): """Deploy an agent to a Kubernetes cluster using Helm""" - console.print( - Panel.fit("🚀 [bold blue]Deploy Agent[/bold blue]", border_style="blue") - ) + console.print(Panel.fit("🚀 [bold blue]Deploy Agent[/bold blue]", border_style="blue")) try: # Validate manifest exists @@ -281,17 +259,12 @@ def deploy( # Validate manifest and environments configuration try: - if environment: - _, environments_config = validate_manifest_and_environments( - str(manifest_path), - required_environment=environment - ) - agent_env_config = environments_config.get_config_for_env(environment) - console.print(f"[green]✓[/green] Environment config validated: {environment}") - else: - agent_env_config = None - console.print(f"[yellow]⚠[/yellow] No environment provided, skipping environment-specific config") - + _, environments_config = validate_manifest_and_environments( + str(manifest_path), required_environment=environment + ) + agent_env_config = environments_config.get_config_for_env(environment) + console.print(f"[green]✓[/green] Environment config validated: {environment}") + except EnvironmentsValidationError as e: error_msg = generate_helpful_error_message(e, "Environment validation failed") console.print(f"[red]Configuration Error:[/red]\n{error_msg}") @@ -310,9 +283,13 @@ def deploy( console.print(f"[blue]ℹ[/blue] Using namespace from environments.yaml: {namespace_from_config}") namespace = namespace_from_config else: - raise DeploymentError(f"No namespace found in environments.yaml for environment: {environment}, and not passed in as --namespace") + raise DeploymentError( + f"No namespace found in environments.yaml for environment: {environment}, and not passed in as --namespace" + ) elif not namespace: - raise DeploymentError("No namespace provided, and not passed in as --namespace and no environment provided to read from an environments.yaml file") + raise DeploymentError( + "No namespace provided, and not passed in as --namespace and no environment provided to read from an environments.yaml file" + ) # Confirm deployment (only in interactive mode) console.print("\n[bold]Deployment Summary:[/bold]") @@ -325,9 +302,7 @@ def deploy( if interactive: proceed = questionary.confirm("Proceed with deployment?").ask() - proceed = handle_questionary_cancellation( - proceed, "deployment confirmation" - ) + proceed = handle_questionary_cancellation(proceed, "deployment confirmation") if not proceed: console.print("Deployment cancelled") @@ -337,9 +312,7 @@ def deploy( check_and_switch_cluster_context(cluster) if not validate_namespace(namespace, cluster): - console.print( - f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'" - ) + console.print(f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'") raise typer.Exit(1) deploy_overrides = InputDeployOverrides(repository=repository, image_tag=tag) @@ -356,9 +329,7 @@ def deploy( # Use the already loaded manifest object release_name = f"{manifest_obj.agent.name}-{cluster}" - console.print( - "\n[bold green]🎉 Deployment completed successfully![/bold green]" - ) + console.print("\n[bold green]🎉 Deployment completed successfully![/bold green]") console.print("\nTo check deployment status:") console.print(f" kubectl get pods -n {namespace}") console.print(f" helm status {release_name} -n {namespace}") diff --git a/src/agentex/lib/sdk/config/environment_config.py b/src/agentex/lib/sdk/config/environment_config.py index 959e26830..c625dbaf3 100644 --- a/src/agentex/lib/sdk/config/environment_config.py +++ b/src/agentex/lib/sdk/config/environment_config.py @@ -18,13 +18,12 @@ class AgentAuthConfig(BaseModel): """Authentication configuration for an agent in a specific environment.""" - + principal: Dict[str, Any] = Field( - ..., - description="Principal configuration for agent authorization and registration" + ..., description="Principal configuration for agent authorization and registration" ) - - @field_validator('principal') + + @field_validator("principal") @classmethod def validate_principal_required_fields(cls, v: Any) -> Dict[str, Any]: """Ensure principal has required fields for agent registration.""" @@ -35,125 +34,166 @@ def validate_principal_required_fields(cls, v: Any) -> Dict[str, Any]: class AgentKubernetesConfig(BaseModel): """Kubernetes configuration for an agent in a specific environment.""" - - namespace: str = Field( - ..., - description="Kubernetes namespace where the agent will be deployed" - ) - - @field_validator('namespace') + + namespace: str = Field(..., description="Kubernetes namespace where the agent will be deployed") + + @field_validator("namespace") @classmethod def validate_namespace_format(cls, v: str) -> str: """Ensure namespace follows Kubernetes naming conventions.""" if not v or not v.strip(): raise ValueError("Namespace cannot be empty") - + # Basic Kubernetes namespace validation namespace = v.strip().lower() - if not namespace.replace('-', '').replace('.', '').isalnum(): - raise ValueError( - f"Namespace '{v}' must contain only lowercase letters, numbers, " - "hyphens, and periods" - ) - + if not namespace.replace("-", "").replace(".", "").isalnum(): + raise ValueError(f"Namespace '{v}' must contain only lowercase letters, numbers, hyphens, and periods") + if len(namespace) > 63: raise ValueError(f"Namespace '{v}' cannot exceed 63 characters") - + return namespace class AgentEnvironmentConfig(BaseModel): """Complete configuration for an agent in a specific environment.""" - - kubernetes: AgentKubernetesConfig | None = Field( - default=None, - description="Kubernetes deployment configuration" - ) - auth: AgentAuthConfig = Field( - ..., - description="Authentication and authorization configuration" - ) - helm_repository_name: str = Field( - default="scale-egp", - description="Helm repository name for the environment" + + kubernetes: AgentKubernetesConfig | None = Field(default=None, description="Kubernetes deployment configuration") + environment: str | None = Field( + default=None, + description="The environment keyword that this specific environment maps to: either dev, staging, prod", ) + auth: AgentAuthConfig = Field(..., description="Authentication and authorization configuration") + helm_repository_name: str = Field(default="scale-egp", description="Helm repository name for the environment") helm_repository_url: str = Field( - default="https://scale-egp-helm-charts-us-west-2.s3.amazonaws.com/charts", - description="Helm repository url for the environment" + default="https://scale-egp-helm-charts-us-west-2.s3.amazonaws.com/charts", + description="Helm repository url for the environment", ) helm_overrides: Dict[str, Any] = Field( - default_factory=dict, - description="Helm chart value overrides for environment-specific tuning" + default_factory=dict, description="Helm chart value overrides for environment-specific tuning" ) class AgentEnvironmentsConfig(UtilsBaseModel): """All environment configurations for an agent.""" - - schema_version: str = Field( - default="v1", - description="Schema version for validation and compatibility" - ) + + schema_version: str = Field(default="v1", description="Schema version for validation and compatibility") environments: Dict[str, AgentEnvironmentConfig] = Field( - ..., - description="Environment-specific configurations (dev, prod, etc.)" + ..., description="Environment-specific configurations (dev, prod, etc.)" ) - - @field_validator('schema_version') + + @field_validator("schema_version") @classmethod def validate_schema_version(cls, v: str) -> str: """Ensure schema version is supported.""" - supported_versions = ['v1'] + supported_versions = ["v1"] if v not in supported_versions: - raise ValueError( - f"Schema version '{v}' not supported. " - f"Supported versions: {', '.join(supported_versions)}" - ) + raise ValueError(f"Schema version '{v}' not supported. Supported versions: {', '.join(supported_versions)}") return v - - @field_validator('environments') + + @field_validator("environments") @classmethod def validate_environments_not_empty(cls, v: Dict[str, AgentEnvironmentConfig]) -> Dict[str, AgentEnvironmentConfig]: """Ensure at least one environment is defined.""" if not v: raise ValueError("At least one environment must be defined") return v - + def get_config_for_env(self, env_name: str) -> AgentEnvironmentConfig: """Get configuration for a specific environment. - + Args: env_name: Name of the environment (e.g., 'dev', 'prod') - + Returns: AgentEnvironmentConfig for the specified environment - + Raises: ValueError: If environment is not found """ if env_name not in self.environments: - available_envs = ', '.join(self.environments.keys()) + available_envs = ", ".join(self.environments.keys()) raise ValueError( - f"Environment '{env_name}' not found in environments.yaml. " - f"Available environments: {available_envs}" + f"Environment '{env_name}' not found in environments.yaml. Available environments: {available_envs}" ) return self.environments[env_name] - + + def get_configs_for_env(self, env_target: str) -> dict[str, AgentEnvironmentConfig]: + """Get configuration for a specific environment based on the expected mapping. + The environment is either: + 1. explicitly specified like so using a key-map in the environments conifg: + environments: + dev-aws: + environment: "dev" + kubernetes: + namespace: "sgp-000-hello-acp" + auth: + principal: + user_id: 73d0c8bd-4726-434c-9686-eb627d89f078 + account_id: 6887f093600ecd59bbbd3095 + helm_overrides: + + or: it it can be defined at the top level: + dev: + kubernetes: + namespace: "sgp-000-hello-acp" + auth: + principal: + user_id: 73d0c8bd-4726-434c-9686-eb627d89f078 + account_id: 6887f093600ecd59bbbd3095 + helm_overrides: + + if the environment field is not explicitly set, we assume its the same as + the name of the environment + Args: + env_target: Name of the environment target (e.g., 'dev', 'prod') + + Returns: + AgentEnvironmentConfig for the specified environment + + Raises: + ValueError: If environment is not found + """ + envs_to_deploy = {} + if env_target in self.environments: + # this supports if the top-level key is just "dev, staging, etc" and matches + # the environment name exactly without any explicit mapping + envs_to_deploy[env_target] = self.environments[env_target] + + for env_name, config in self.environments.items(): + if config.environment == env_target: + envs_to_deploy[env_name] = config + + if len(envs_to_deploy) == 0: + ## this just finds environments for each target, so "available_envs" refers to each target environment + + available_envs = set() + for env_name, config in self.environments.items(): + if config.environment is not None: + available_envs.add(config.environment) + else: + available_envs.add(env_name) + raise ValueError( + f"Environment '{env_target}' not found in environments.yaml. Available environments: {available_envs}" + ) + + return envs_to_deploy + def list_environments(self) -> list[str]: """Get list of all configured environment names.""" return list(self.environments.keys()) - + @classmethod @override def from_yaml(cls, file_path: str) -> "AgentEnvironmentsConfig": """Load configuration from environments.yaml file. - + Args: file_path: Path to environments.yaml file - + Returns: Parsed and validated AgentEnvironmentsConfig - + Raises: FileNotFoundError: If file doesn't exist ValueError: If file is invalid or doesn't validate @@ -161,16 +201,16 @@ def from_yaml(cls, file_path: str) -> "AgentEnvironmentsConfig": path = Path(file_path) if not path.exists(): raise FileNotFoundError(f"environments.yaml not found: {file_path}") - + try: - with open(path, 'r') as f: + with open(path, "r") as f: data = yaml.safe_load(f) - + if not data: raise ValueError("environments.yaml file is empty") - + return cls.model_validate(data) - + except yaml.YAMLError as e: raise ValueError(f"Invalid YAML format in {file_path}: {e}") from e except Exception as e: @@ -179,18 +219,18 @@ def from_yaml(cls, file_path: str) -> "AgentEnvironmentsConfig": def load_environments_config_from_manifest_dir(manifest_dir: Path) -> AgentEnvironmentsConfig | None: """Helper function to load environments.yaml from same directory as manifest.yaml. - + Args: manifest_dir: Directory containing manifest.yaml - + Returns: AgentEnvironmentsConfig if environments.yaml exists, None otherwise - + Raises: ValueError: If environments.yaml exists but is invalid """ environments_file = manifest_dir / "environments.yaml" if not environments_file.exists(): return None - + return AgentEnvironmentsConfig.from_yaml(str(environments_file)) diff --git a/src/agentex/lib/sdk/config/validation.py b/src/agentex/lib/sdk/config/validation.py index 4b00a682c..d051fa1ae 100644 --- a/src/agentex/lib/sdk/config/validation.py +++ b/src/agentex/lib/sdk/config/validation.py @@ -4,6 +4,7 @@ This module provides validation functions for agent configurations, with clear error messages and best practices enforcement. """ + from __future__ import annotations from typing import Any, Dict, List, Optional @@ -17,7 +18,7 @@ class ConfigValidationError(Exception): """Exception raised when configuration validation fails.""" - + def __init__(self, message: str, file_path: Optional[str] = None): self.file_path = file_path super().__init__(message) @@ -25,88 +26,95 @@ def __init__(self, message: str, file_path: Optional[str] = None): class EnvironmentsValidationError(ConfigValidationError): """Exception raised when environments.yaml validation fails.""" + pass def validate_environments_config( - environments_config: AgentEnvironmentsConfig, - required_environments: Optional[List[str]] = None + environments_config: AgentEnvironmentsConfig, required_environments: Optional[List[str]] = None ) -> None: """ Validate environments configuration with comprehensive checks. - + Args: environments_config: The loaded environments configuration required_environments: List of environment names that must be present - + Raises: EnvironmentsValidationError: If validation fails """ # Check for required environments if required_environments: + # this must exist as a top-level key or via the environment indicator missing_envs: List[str] = [] + environment_mappings = [env.environment for env in environments_config.environments.values() if env.environment] + top_level_envs = [env for env in environments_config.environments] + all_envs = set(environment_mappings + top_level_envs) for env_name in required_environments: - if env_name not in environments_config.environments: + if env_name not in all_envs: missing_envs.append(env_name) - + if missing_envs: - available_envs = list(environments_config.environments.keys()) raise EnvironmentsValidationError( f"Missing required environments: {', '.join(missing_envs)}. " - f"Available environments: {', '.join(available_envs)}" + f"Available environments: {', '.join(all_envs)}" ) - + + # if environment mappings are set, you cannot have a top-level env_name that maps to an `environment: value` + # and another environment that has the mapping i.e. + # enviorments: + # dev: + # .... + # dev1: + # environment: dev + # this is invalid because its unclear if "dev" refers to just that top-level environment or the mapping + # # Validate each environment configuration for env_name, env_config in environments_config.environments.items(): try: _validate_single_environment_config(env_name, env_config) except Exception as e: - raise EnvironmentsValidationError( - f"Environment '{env_name}' configuration error: {str(e)}" - ) from e + raise EnvironmentsValidationError(f"Environment '{env_name}' configuration error: {str(e)}") from e def _validate_single_environment_config(env_name: str, env_config: AgentEnvironmentConfig) -> None: """ Validate a single environment configuration. - + Args: env_name: Name of the environment env_config: AgentEnvironmentConfig instance - + Raises: ValueError: If validation fails """ # Validate namespace naming conventions if kubernetes config exists if env_config.kubernetes and env_config.kubernetes.namespace: namespace = env_config.kubernetes.namespace - + # Check for common namespace naming issues if namespace != namespace.lower(): logger.warning( - f"Namespace '{namespace}' contains uppercase letters. " - "Kubernetes namespaces should be lowercase." + f"Namespace '{namespace}' contains uppercase letters. Kubernetes namespaces should be lowercase." ) - - if namespace.startswith('-') or namespace.endswith('-'): - raise ValueError( - f"Namespace '{namespace}' cannot start or end with hyphens" - ) - + + if namespace.startswith("-") or namespace.endswith("-"): + raise ValueError(f"Namespace '{namespace}' cannot start or end with hyphens") + # Validate auth principal principal = env_config.auth.principal - if not principal.get('user_id'): + if not principal.get("user_id"): raise ValueError("Auth principal must contain non-empty 'user_id'") - + # Check for environment-specific user_id patterns - user_id = principal['user_id'] + user_id = principal["user_id"] if isinstance(user_id, str): - if not any(env_name.lower() in user_id.lower() for env_name in ['dev', 'prod', 'staging', env_name]): + if not any(env_name.lower() in user_id.lower() for env_name in ["dev", "prod", "staging", env_name]): logger.warning( f"User ID '{user_id}' doesn't contain environment indicator. " f"Consider including '{env_name}' in the user_id for clarity." ) - + # Validate helm overrides if present if env_config.helm_overrides: _validate_helm_overrides(env_config.helm_overrides) @@ -115,26 +123,26 @@ def _validate_single_environment_config(env_name: str, env_config: AgentEnvironm def _validate_helm_overrides(helm_overrides: Dict[str, Any]) -> None: """ Validate helm override configuration. - + Args: helm_overrides: Dictionary of helm overrides - + Raises: ValueError: If validation fails """ # Check for common helm override issues - if 'resources' in helm_overrides: - resources = helm_overrides['resources'] + if "resources" in helm_overrides: + resources = helm_overrides["resources"] if isinstance(resources, dict): # Validate resource format - if 'requests' in resources or 'limits' in resources: - for resource_type in ['requests', 'limits']: + if "requests" in resources or "limits" in resources: + for resource_type in ["requests", "limits"]: if resource_type in resources: resource_config: Any = resources[resource_type] if isinstance(resource_config, dict): # Check for valid resource specifications for key, value in resource_config.items(): - if key in ['cpu', 'memory'] and not isinstance(value, str): + if key in ["cpu", "memory"] and not isinstance(value, str): logger.warning( f"Resource {key} should be a string (e.g., '500m', '1Gi'), " f"got {type(value).__name__}: {value}" @@ -144,13 +152,13 @@ def _validate_helm_overrides(helm_overrides: Dict[str, Any]) -> None: def validate_environments_yaml_file(file_path: str) -> AgentEnvironmentsConfig: """ Load and validate environments.yaml file. - + Args: file_path: Path to environments.yaml file - + Returns: Validated AgentEnvironmentsConfig - + Raises: EnvironmentsValidationError: If file is invalid """ @@ -164,66 +172,59 @@ def validate_environments_yaml_file(file_path: str) -> AgentEnvironmentsConfig: "📋 Why required:\n" " Environment-specific settings (auth, namespace, resources)\n" " must be separated from global manifest for proper isolation.", - file_path=file_path + file_path=file_path, ) from None except Exception as e: - raise EnvironmentsValidationError( - f"Invalid environments.yaml file: {str(e)}", - file_path=file_path - ) from e + raise EnvironmentsValidationError(f"Invalid environments.yaml file: {str(e)}", file_path=file_path) from e def validate_manifest_and_environments( - manifest_path: str, - required_environment: Optional[str] = None + manifest_path: str, required_environment: Optional[str] = None ) -> tuple[str, AgentEnvironmentsConfig]: """ Validate both manifest.yaml and environments.yaml files together. - + Args: manifest_path: Path to manifest.yaml file required_environment: Specific environment that must be present - + Returns: Tuple of (manifest_path, environments_config) - + Raises: ConfigValidationError: If validation fails """ manifest_file = Path(manifest_path) if not manifest_file.exists(): raise ConfigValidationError(f"Manifest file not found: {manifest_path}") - + # Look for environments.yaml in same directory environments_file = manifest_file.parent / "environments.yaml" environments_config = validate_environments_yaml_file(str(environments_file)) - + # Validate specific environment if requested if required_environment: - validate_environments_config( - environments_config, - required_environments=[required_environment] - ) - + validate_environments_config(environments_config, required_environments=[required_environment]) + return manifest_path, environments_config def generate_helpful_error_message(error: Exception, context: str = "") -> str: """ Generate helpful error message with troubleshooting tips. - + Args: error: The original exception context: Additional context about where the error occurred - + Returns: Formatted error message with troubleshooting tips """ base_msg = str(error) - + if context: base_msg = f"{context}: {base_msg}" - + # Add troubleshooting tips based on error type if isinstance(error, FileNotFoundError): if "environments.yaml" in base_msg: @@ -246,5 +247,5 @@ def generate_helpful_error_message(error: Exception, context: str = "") -> str: "- Include team and environment (e.g., 'team-dev-agent')\n" "- Keep under 63 characters" ) - + return base_msg diff --git a/tests/lib/cli/__init__.py b/tests/lib/cli/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/lib/cli/test_environment_config.py b/tests/lib/cli/test_environment_config.py new file mode 100644 index 000000000..ef4bfae11 --- /dev/null +++ b/tests/lib/cli/test_environment_config.py @@ -0,0 +1,298 @@ +"""Tests for AgentEnvironmentsConfig.""" + +import tempfile + +import pytest + +from agentex.lib.sdk.config.environment_config import ( + AgentAuthConfig, + AgentKubernetesConfig, + AgentEnvironmentConfig, + AgentEnvironmentsConfig, +) + + +class TestAgentEnvironmentsConfig: + """Test cases for AgentEnvironmentsConfig.get_config_for_env method.""" + + @pytest.fixture + def single_env_config(self) -> AgentEnvironmentsConfig: + """Config with a single environment using direct key name.""" + return AgentEnvironmentsConfig( + schema_version="v1", + environments={ + "dev": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="dev-ns"), + auth=AgentAuthConfig(principal={"user_id": "dev-user"}), + ) + }, + ) + + @pytest.fixture + def multi_env_config(self) -> AgentEnvironmentsConfig: + """Config with multiple environments using direct key names.""" + return AgentEnvironmentsConfig( + schema_version="v1", + environments={ + "dev": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="dev-ns"), + auth=AgentAuthConfig(principal={"user_id": "dev-user"}), + ), + "staging": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="staging-ns"), + auth=AgentAuthConfig(principal={"user_id": "staging-user"}), + ), + "prod": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="prod-ns"), + auth=AgentAuthConfig(principal={"user_id": "prod-user"}), + ), + }, + ) + + @pytest.fixture + def multi_cluster_same_env_config(self) -> AgentEnvironmentsConfig: + """Config with multiple clusters mapping to the same environment keyword.""" + return AgentEnvironmentsConfig( + schema_version="v1", + environments={ + "dev-aws": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="dev-ns-aws"), + environment="dev", + auth=AgentAuthConfig(principal={"user_id": "dev-aws-user"}), + ), + "dev-gcp": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="dev-ns-gcp"), + environment="dev", + auth=AgentAuthConfig(principal={"user_id": "dev-gcp-user"}), + ), + "prod": AgentEnvironmentConfig( + kubernetes=AgentKubernetesConfig(namespace="prod-ns"), + auth=AgentAuthConfig(principal={"user_id": "prod-user"}), + ), + }, + ) + + def test_get_config_by_exact_key_match(self, single_env_config: AgentEnvironmentsConfig): + """Test that exact key match returns the correct config.""" + result = single_env_config.get_config_for_env("dev") + assert result is not None + + def test_get_config_nonexistent_env_raises_error(self, single_env_config: AgentEnvironmentsConfig): + """Test that requesting non-existent environment raises ValueError.""" + with pytest.raises(ValueError, match="not found"): + single_env_config.get_config_for_env("nonexistent") + + def test_get_config_exact_key_with_multiple_envs(self, multi_env_config: AgentEnvironmentsConfig): + """Test getting config by exact key when multiple environments exist.""" + result = multi_env_config.get_config_for_env("staging") + assert result is not None + + def test_get_config_by_specific_cluster_name(self, multi_cluster_same_env_config: AgentEnvironmentsConfig): + """Test getting config by specific cluster name (e.g., dev-aws).""" + result = multi_cluster_same_env_config.get_config_for_env("dev-aws") + assert result is not None + + def test_get_configs_without_explicit_mapping(self, single_env_config: AgentEnvironmentsConfig): + """Test getting config without explicit mapping returns a dict with env name as key.""" + result = single_env_config.get_configs_for_env("dev") + assert isinstance(result, dict) + assert len(result) == 1 + assert "dev" in result + assert result["dev"] == single_env_config.get_config_for_env("dev") + + def test_multiple_envs_same_keyword_returns_multiple(self, multi_cluster_same_env_config: AgentEnvironmentsConfig): + """Test that querying 'dev' when multiple envs have environment='dev' returns multiple. + + Returns a dict mapping env names (dev-aws, dev-gcp) to their configs. + """ + result = multi_cluster_same_env_config.get_configs_for_env("dev") + assert isinstance(result, dict) + assert len(result) == 2 + assert "dev-aws" in result + assert "dev-gcp" in result + assert result["dev-aws"].kubernetes.namespace == "dev-ns-aws" + assert result["dev-gcp"].kubernetes.namespace == "dev-ns-gcp" + + def test_list_environments(self, multi_env_config: AgentEnvironmentsConfig): + """Test listing all environment names.""" + envs = multi_env_config.list_environments() + assert set(envs) == {"dev", "staging", "prod"} + + +class TestAgentEnvironmentsConfigFromYaml: + """Test cases for AgentEnvironmentsConfig.from_yaml method.""" + + def test_load_single_env_yaml(self): + """Test loading a YAML file with a single environment.""" + yaml_content = """ +schema_version: v1 +environments: + dev: + kubernetes: + namespace: dev-namespace + auth: + principal: + user_id: "user-123" + account_id: "account-456" +""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(yaml_content) + f.flush() + + config = AgentEnvironmentsConfig.from_yaml(f.name) + + assert config.schema_version == "v1" + assert "dev" in config.environments + assert config.environments["dev"].kubernetes.namespace == "dev-namespace" + assert config.environments["dev"].auth.principal["user_id"] == "user-123" + + def test_load_multi_env_yaml(self): + """Test loading a YAML file with multiple environments.""" + yaml_content = """ +schema_version: v1 +environments: + dev: + kubernetes: + namespace: dev-namespace + auth: + principal: + user_id: "dev-user" + account_id: "dev-account" + prod: + kubernetes: + namespace: prod-namespace + auth: + principal: + user_id: "prod-user" + account_id: "prod-account" +""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(yaml_content) + f.flush() + + config = AgentEnvironmentsConfig.from_yaml(f.name) + + assert "dev" in config.environments + assert "prod" in config.environments + assert config.environments["dev"].kubernetes.namespace == "dev-namespace" + assert config.environments["prod"].kubernetes.namespace == "prod-namespace" + + def test_load_yaml_with_environment_field_mapping(self): + """Test loading YAML where environments use the 'environment' field for mapping.""" + yaml_content = """ +schema_version: v1 +environments: + dev-aws: + environment: dev + kubernetes: + namespace: dev-aws-ns + auth: + principal: + user_id: "aws-user" + dev-gcp: + environment: dev + kubernetes: + namespace: dev-gcp-ns + auth: + principal: + user_id: "gcp-user" +""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(yaml_content) + f.flush() + + config = AgentEnvironmentsConfig.from_yaml(f.name) + + assert config.environments["dev-aws"].environment == "dev" + assert config.environments["dev-gcp"].environment == "dev" + assert config.environments["dev-aws"].kubernetes.namespace == "dev-aws-ns" + assert config.environments["dev-gcp"].kubernetes.namespace == "dev-gcp-ns" + + def test_load_yaml_with_helm_overrides(self): + """Test loading YAML with helm_overrides.""" + yaml_content = """ +schema_version: v1 +environments: + dev: + kubernetes: + namespace: dev-namespace + auth: + principal: + user_id: "user-123" + helm_overrides: + replicaCount: 3 + resources: + requests: + cpu: "500m" + memory: "1Gi" +""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(yaml_content) + f.flush() + + config = AgentEnvironmentsConfig.from_yaml(f.name) + + assert config.environments["dev"].helm_overrides["replicaCount"] == 3 + assert config.environments["dev"].helm_overrides["resources"]["requests"]["cpu"] == "500m" + + def test_load_yaml_with_custom_helm_repo(self): + """Test loading YAML with custom helm repository settings.""" + yaml_content = """ +schema_version: v1 +environments: + dev: + kubernetes: + namespace: dev-namespace + auth: + principal: + user_id: "user-123" + helm_repository_name: custom-repo + helm_repository_url: https://custom.example.com/charts +""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(yaml_content) + f.flush() + + config = AgentEnvironmentsConfig.from_yaml(f.name) + + assert config.environments["dev"].helm_repository_name == "custom-repo" + assert config.environments["dev"].helm_repository_url == "https://custom.example.com/charts" + + def test_load_nonexistent_yaml_raises_file_not_found(self): + """Test that loading non-existent file raises FileNotFoundError.""" + with pytest.raises(FileNotFoundError, match="environments.yaml not found"): + AgentEnvironmentsConfig.from_yaml("/nonexistent/path/environments.yaml") + + def test_load_empty_yaml_raises_value_error(self): + """Test that loading empty YAML file raises ValueError.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write("") + f.flush() + + with pytest.raises(ValueError, match="empty"): + AgentEnvironmentsConfig.from_yaml(f.name) + + def test_load_invalid_yaml_syntax_raises_value_error(self): + """Test that loading invalid YAML syntax raises ValueError.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write("invalid: yaml: content: [") + f.flush() + + with pytest.raises(ValueError, match="Invalid YAML"): + AgentEnvironmentsConfig.from_yaml(f.name) + + def test_load_yaml_missing_required_auth_raises_error(self): + """Test that YAML missing required 'auth' field raises validation error.""" + yaml_content = """ +schema_version: v1 +environments: + dev: + kubernetes: + namespace: dev-namespace +""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(yaml_content) + f.flush() + + with pytest.raises(ValueError, match="Failed to load"): + AgentEnvironmentsConfig.from_yaml(f.name) diff --git a/uv.lock b/uv.lock index 0a0242aed..391297102 100644 --- a/uv.lock +++ b/uv.lock @@ -8,7 +8,7 @@ resolution-markers = [ [[package]] name = "agentex-sdk" -version = "0.6.7" +version = "0.8.0" source = { editable = "." } dependencies = [ { name = "aiohttp" },