diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml index 9ae4a9b..e33ee0d 100644 --- a/.github/workflows/python-ci.yml +++ b/.github/workflows/python-ci.yml @@ -51,6 +51,7 @@ jobs: - name: Test with pytest and Generate Coverage env: GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} + PYTHONPATH: ${{ github.workspace }}/src # Ensure API key exists for tests that might need it, but allow skipping run: | # List available test files to debug @@ -59,7 +60,7 @@ jobs: # Run pytest with more comprehensive coverage # Generate both XML and HTML reports - python -m pytest --cov=src/cli_code --cov-report=term --cov-report=xml --cov-report=html --verbose test_dir/ + python -m pytest --cov=cli_code --cov-report=term --cov-report=xml --cov-report=html --verbose test_dir/ # Display the overall coverage percentage echo "Overall coverage percentage:" diff --git a/.pytest.ini b/.pytest.ini new file mode 100644 index 0000000..30742a5 --- /dev/null +++ b/.pytest.ini @@ -0,0 +1,5 @@ +[pytest] +markers = + integration: marks tests as integration tests (requires API keys) + slow: marks tests as slow running +python_paths = src \ No newline at end of file diff --git a/README.md b/README.md index af693e0..0428b1a 100644 --- a/README.md +++ b/README.md @@ -235,15 +235,30 @@ pip install -e ".[dev]" # Run tests python -m pytest -# Run coverage analysis -./run_coverage.sh +# Run coverage analysis with the new convenience script +python run_tests_with_coverage.py --html -# Run SonarCloud analysis locally (requires sonar-scanner CLI) -./run_sonar_scan.sh +# For more options: +python run_tests_with_coverage.py --help ``` The project uses [pytest](https://docs.pytest.org/) for testing and [SonarCloud](https://sonarcloud.io/) for code quality and coverage analysis. +### Code Coverage + +We've implemented comprehensive code coverage tracking to ensure the quality and reliability of the codebase. Coverage reports are generated in HTML and XML formats for: + +- Local development with the `run_tests_with_coverage.py` script +- CI/CD pipeline with GitHub Actions +- SonarCloud analysis for visualizing coverage over time + +To improve code coverage, focus on: +1. Adding tests for any new code +2. Identifying and filling gaps in existing test coverage +3. Testing edge cases and error handling paths + +Our coverage goal is to maintain at least 70% overall code coverage. + ## Contributing Contributions are welcome! Please feel free to submit a Pull Request. diff --git a/pyproject.toml b/pyproject.toml index ceb5f20..e68aac8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,6 +102,9 @@ omit = [ "*/docs/*", "*/test_*", "*/__pycache__/*", + "*/venv/*", + "*/.pytest_cache/*", + "*/site-packages/*", ] # Add these new sections for better coverage configuration diff --git a/run_tests_with_coverage.py b/run_tests_with_coverage.py new file mode 100755 index 0000000..e415efb --- /dev/null +++ b/run_tests_with_coverage.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +""" +Script to run tests with coverage reporting. + +This script makes it easy to run the test suite with coverage reporting +and see which parts of the code need more test coverage. + +Usage: + python run_tests_with_coverage.py +""" + +import os +import sys +import subprocess +import argparse +import webbrowser +from pathlib import Path + + +def main(): + parser = argparse.ArgumentParser(description="Run tests with coverage reporting") + parser.add_argument("--html", action="store_true", help="Open HTML report after running") + parser.add_argument("--xml", action="store_true", help="Generate XML report") + parser.add_argument("--skip-tests", action="store_true", help="Skip running tests and just report on existing coverage data") + parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") + args = parser.parse_args() + + # Get the root directory of the project + root_dir = Path(__file__).parent + + # Change to the root directory + os.chdir(root_dir) + + # Add the src directory to Python path to ensure proper imports + sys.path.insert(0, str(root_dir / 'src')) + + if not args.skip_tests: + # Ensure we have the necessary packages + print("Installing required packages...") + subprocess.run([sys.executable, "-m", "pip", "install", "pytest", "pytest-cov"], + check=False) + + # Run pytest with coverage + print("\nRunning tests with coverage...") + cmd = [ + sys.executable, "-m", "pytest", + "--cov=cli_code", + "--cov-report=term", + ] + + # Add XML report if requested + if args.xml: + cmd.append("--cov-report=xml") + + # Always generate HTML report + cmd.append("--cov-report=html") + + # Add verbosity if requested + if args.verbose: + cmd.append("-v") + + # Run tests + result = subprocess.run(cmd + ["test_dir/"], check=False) + + if result.returncode != 0: + print("\n⚠️ Some tests failed! See above for details.") + else: + print("\n✅ All tests passed!") + + # Parse coverage results + try: + html_report = root_dir / "coverage_html" / "index.html" + + if html_report.exists(): + if args.html: + print(f"\nOpening HTML coverage report: {html_report}") + webbrowser.open(f"file://{html_report.absolute()}") + else: + print(f"\nHTML coverage report available at: file://{html_report.absolute()}") + + xml_report = root_dir / "coverage.xml" + if args.xml and xml_report.exists(): + print(f"XML coverage report available at: {xml_report}") + + except Exception as e: + print(f"Error processing coverage reports: {e}") + + print("\nDone!") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/test_dir/test_config_missing_methods.py b/test_dir/test_config_missing_methods.py new file mode 100644 index 0000000..b44b735 --- /dev/null +++ b/test_dir/test_config_missing_methods.py @@ -0,0 +1,185 @@ +""" +Tests for Config class methods that might have been missed in existing tests. +""" + +import os +import tempfile +import pytest +from pathlib import Path +import yaml +from unittest.mock import patch, mock_open + +from cli_code.config import Config + + +@pytest.fixture +def temp_config_dir(): + """Creates a temporary directory for the config file.""" + with tempfile.TemporaryDirectory() as tmpdir: + yield Path(tmpdir) + + +@pytest.fixture +def mock_config(): + """Return a Config instance with mocked file operations.""" + with patch('cli_code.config.Config._load_dotenv'), \ + patch('cli_code.config.Config._ensure_config_exists'), \ + patch('cli_code.config.Config._load_config', return_value={}), \ + patch('cli_code.config.Config._apply_env_vars'): + config = Config() + # Set some test data + config.config = { + "google_api_key": "test-google-key", + "default_provider": "gemini", + "default_model": "models/gemini-1.0-pro", + "ollama_api_url": "http://localhost:11434", + "ollama_default_model": "llama2", + "settings": { + "max_tokens": 1000, + "temperature": 0.7, + } + } + yield config + + +def test_get_credential(mock_config): + """Test get_credential method.""" + # Test existing provider + assert mock_config.get_credential("google") == "test-google-key" + + # Test non-existing provider + assert mock_config.get_credential("non_existing") is None + + # Test with empty config + mock_config.config = {} + assert mock_config.get_credential("google") is None + + +def test_set_credential(mock_config): + """Test set_credential method.""" + # Test setting existing provider + mock_config.set_credential("google", "new-google-key") + assert mock_config.config["google_api_key"] == "new-google-key" + + # Test setting new provider + mock_config.set_credential("openai", "test-openai-key") + assert mock_config.config["openai_api_key"] == "test-openai-key" + + # Test with None value + mock_config.set_credential("google", None) + assert mock_config.config["google_api_key"] is None + + +def test_get_default_provider(mock_config): + """Test get_default_provider method.""" + # Test with existing provider + assert mock_config.get_default_provider() == "gemini" + + # Test with no provider set + mock_config.config["default_provider"] = None + assert mock_config.get_default_provider() == "gemini" # Should return default + + # Test with empty config + mock_config.config = {} + assert mock_config.get_default_provider() == "gemini" # Should return default + + +def test_set_default_provider(mock_config): + """Test set_default_provider method.""" + # Test setting valid provider + mock_config.set_default_provider("openai") + assert mock_config.config["default_provider"] == "openai" + + # Test setting None (should use default) + mock_config.set_default_provider(None) + assert mock_config.config["default_provider"] == "gemini" + + +def test_get_default_model(mock_config): + """Test get_default_model method.""" + # Test without provider (use default provider) + assert mock_config.get_default_model() == "models/gemini-1.0-pro" + + # Test with specific provider + assert mock_config.get_default_model("ollama") == "llama2" + + # Test with non-existing provider + assert mock_config.get_default_model("non_existing") is None + + +def test_set_default_model(mock_config): + """Test set_default_model method.""" + # Test with default provider + mock_config.set_default_model("new-model") + assert mock_config.config["default_model"] == "new-model" + + # Test with specific provider + mock_config.set_default_model("new-ollama-model", "ollama") + assert mock_config.config["ollama_default_model"] == "new-ollama-model" + + # Test with new provider + mock_config.set_default_model("anthropic-model", "anthropic") + assert mock_config.config["anthropic_default_model"] == "anthropic-model" + + +def test_get_setting(mock_config): + """Test get_setting method.""" + # Test existing setting + assert mock_config.get_setting("max_tokens") == 1000 + assert mock_config.get_setting("temperature") == 0.7 + + # Test non-existing setting with default + assert mock_config.get_setting("non_existing", "default_value") == "default_value" + + # Test with empty settings + mock_config.config["settings"] = {} + assert mock_config.get_setting("max_tokens", 2000) == 2000 + + +def test_set_setting(mock_config): + """Test set_setting method.""" + # Test updating existing setting + mock_config.set_setting("max_tokens", 2000) + assert mock_config.config["settings"]["max_tokens"] == 2000 + + # Test adding new setting + mock_config.set_setting("new_setting", "new_value") + assert mock_config.config["settings"]["new_setting"] == "new_value" + + # Test with no settings dict + mock_config.config.pop("settings") + mock_config.set_setting("test_setting", "test_value") + assert mock_config.config["settings"]["test_setting"] == "test_value" + + +def test_save_config(): + """Test _save_config method.""" + with patch('builtins.open', mock_open()) as mock_file, \ + patch('yaml.dump') as mock_yaml_dump, \ + patch('cli_code.config.Config._load_dotenv'), \ + patch('cli_code.config.Config._ensure_config_exists'), \ + patch('cli_code.config.Config._load_config', return_value={}), \ + patch('cli_code.config.Config._apply_env_vars'): + + config = Config() + config.config = {"test": "data"} + config._save_config() + + mock_file.assert_called_once() + mock_yaml_dump.assert_called_once_with({"test": "data"}, mock_file(), default_flow_style=False) + + +def test_save_config_error(): + """Test error handling in _save_config method.""" + with patch('builtins.open', side_effect=PermissionError("Permission denied")), \ + patch('cli_code.config.log.error') as mock_log_error, \ + patch('cli_code.config.Config._load_dotenv'), \ + patch('cli_code.config.Config._ensure_config_exists'), \ + patch('cli_code.config.Config._load_config', return_value={}), \ + patch('cli_code.config.Config._apply_env_vars'): + + config = Config() + config._save_config() + + # Verify error was logged + assert mock_log_error.called \ No newline at end of file diff --git a/test_dir/test_gemini_model.py b/test_dir/test_gemini_model.py new file mode 100644 index 0000000..7613fc4 --- /dev/null +++ b/test_dir/test_gemini_model.py @@ -0,0 +1,364 @@ +""" +Tests specifically for the GeminiModel class to improve code coverage. +""" + +import os +import json +import sys +import unittest +from unittest.mock import patch, MagicMock, mock_open, call +import pytest + +# Check if running in CI +IN_CI = os.environ.get('CI', 'false').lower() == 'true' + +# Handle imports +try: + from cli_code.models.gemini import GeminiModel + from rich.console import Console + import google.generativeai as genai + IMPORTS_AVAILABLE = True +except ImportError: + IMPORTS_AVAILABLE = False + # Create dummy classes for type checking + GeminiModel = MagicMock + Console = MagicMock + genai = MagicMock + +# Set up conditional skipping +SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI +SKIP_REASON = "Required imports not available and not in CI" + + +@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) +class TestGeminiModel: + """Test suite for GeminiModel class, focusing on previously uncovered methods.""" + + def setup_method(self): + """Set up test fixtures.""" + # Mock genai module + self.genai_configure_patch = patch('google.generativeai.configure') + self.mock_genai_configure = self.genai_configure_patch.start() + + self.genai_model_patch = patch('google.generativeai.GenerativeModel') + self.mock_genai_model_class = self.genai_model_patch.start() + self.mock_model_instance = MagicMock() + self.mock_genai_model_class.return_value = self.mock_model_instance + + self.genai_list_models_patch = patch('google.generativeai.list_models') + self.mock_genai_list_models = self.genai_list_models_patch.start() + + # Mock console + self.mock_console = MagicMock(spec=Console) + + # Mock os.path.isdir and os.path.isfile + self.isdir_patch = patch('os.path.isdir') + self.isfile_patch = patch('os.path.isfile') + self.mock_isdir = self.isdir_patch.start() + self.mock_isfile = self.isfile_patch.start() + + # Mock glob + self.glob_patch = patch('glob.glob') + self.mock_glob = self.glob_patch.start() + + # Mock open + self.open_patch = patch('builtins.open', mock_open(read_data="# Test content")) + self.mock_open = self.open_patch.start() + + # Mock get_tool + self.get_tool_patch = patch('cli_code.models.gemini.get_tool') + self.mock_get_tool = self.get_tool_patch.start() + + # Default tool mock + self.mock_tool = MagicMock() + self.mock_tool.execute.return_value = "ls output" + self.mock_get_tool.return_value = self.mock_tool + + def teardown_method(self): + """Tear down test fixtures.""" + self.genai_configure_patch.stop() + self.genai_model_patch.stop() + self.genai_list_models_patch.stop() + self.isdir_patch.stop() + self.isfile_patch.stop() + self.glob_patch.stop() + self.open_patch.stop() + self.get_tool_patch.stop() + + def test_initialization(self): + """Test initialization of GeminiModel.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Check if genai was configured correctly + self.mock_genai_configure.assert_called_once_with(api_key="fake-api-key") + + # Check if model instance was created correctly + self.mock_genai_model_class.assert_called_once() + assert model.api_key == "fake-api-key" + assert model.current_model_name == "gemini-2.5-pro-exp-03-25" + + # Check history initialization + assert len(model.history) == 2 # System prompt and initial model response + + def test_initialize_model_instance(self): + """Test model instance initialization.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Call the method directly to test + model._initialize_model_instance() + + # Verify model was created with correct parameters + self.mock_genai_model_class.assert_called_with( + model_name="gemini-2.5-pro-exp-03-25", + generation_config=model.generation_config, + safety_settings=model.safety_settings, + system_instruction=model.system_instruction + ) + + def test_list_models(self): + """Test listing available models.""" + # Set up mock response + mock_model1 = MagicMock() + mock_model1.name = "models/gemini-pro" + mock_model1.display_name = "Gemini Pro" + mock_model1.description = "A powerful model" + mock_model1.supported_generation_methods = ["generateContent"] + + mock_model2 = MagicMock() + mock_model2.name = "models/gemini-2.5-pro-exp-03-25" + mock_model2.display_name = "Gemini 2.5 Pro" + mock_model2.description = "An experimental model" + mock_model2.supported_generation_methods = ["generateContent"] + + self.mock_genai_list_models.return_value = [mock_model1, mock_model2] + + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + result = model.list_models() + + # Verify list_models was called + self.mock_genai_list_models.assert_called_once() + + # Verify result format + assert len(result) == 2 + assert result[0]["id"] == "models/gemini-pro" + assert result[0]["name"] == "Gemini Pro" + assert result[1]["id"] == "models/gemini-2.5-pro-exp-03-25" + + def test_get_initial_context_with_rules_dir(self): + """Test getting initial context from .rules directory.""" + # Set up mocks + self.mock_isdir.return_value = True + self.mock_glob.return_value = [".rules/context.md", ".rules/tools.md"] + + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + context = model._get_initial_context() + + # Verify directory check + self.mock_isdir.assert_called_with(".rules") + + # Verify glob search + self.mock_glob.assert_called_with(".rules/*.md") + + # Verify files were read + assert self.mock_open.call_count == 2 + + # Check result content + assert "Project rules and guidelines:" in context + assert "# Content from" in context + + def test_get_initial_context_with_readme(self): + """Test getting initial context from README.md when no .rules directory.""" + # Set up mocks + self.mock_isdir.return_value = False + self.mock_isfile.return_value = True + + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + context = model._get_initial_context() + + # Verify README check + self.mock_isfile.assert_called_with("README.md") + + # Verify file reading + self.mock_open.assert_called_once_with("README.md", "r", encoding="utf-8", errors="ignore") + + # Check result content + assert "Project README:" in context + + def test_get_initial_context_with_ls_fallback(self): + """Test getting initial context via ls when no .rules or README.""" + # Set up mocks + self.mock_isdir.return_value = False + self.mock_isfile.return_value = False + + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + context = model._get_initial_context() + + # Verify tool was used + self.mock_get_tool.assert_called_with("ls") + self.mock_tool.execute.assert_called_once() + + # Check result content + assert "Current directory contents" in context + assert "ls output" in context + + def test_create_tool_definitions(self): + """Test creation of tool definitions for Gemini.""" + # Create a mock for AVAILABLE_TOOLS + with patch('cli_code.models.gemini.AVAILABLE_TOOLS') as mock_available_tools: + # Sample tool definition + mock_available_tools.return_value = { + "test_tool": { + "name": "test_tool", + "description": "A test tool", + "parameters": { + "param1": {"type": "string", "description": "A string parameter"}, + "param2": {"type": "integer", "description": "An integer parameter"} + }, + "required": ["param1"] + } + } + + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + tools = model._create_tool_definitions() + + # Verify tools format + assert len(tools) == 1 + assert tools[0]["name"] == "test_tool" + assert "description" in tools[0] + assert "parameters" in tools[0] + + def test_create_system_prompt(self): + """Test creation of system prompt.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + prompt = model._create_system_prompt() + + # Verify prompt contains expected content + assert "function calling capabilities" in prompt + assert "System Prompt for CLI-Code" in prompt + + def test_manage_context_window(self): + """Test context window management.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Add many messages to force context truncation + for i in range(30): + model.add_to_history({"role": "user", "parts": [f"Test message {i}"]}) + model.add_to_history({"role": "model", "parts": [f"Test response {i}"]}) + + # Record initial length + initial_length = len(model.history) + + # Call context management + model._manage_context_window() + + # Verify history was truncated + assert len(model.history) < initial_length + + def test_extract_text_from_response(self): + """Test extracting text from Gemini response.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Create mock response with text + mock_response = MagicMock() + mock_response.parts = [{"text": "Response text"}] + + # Extract text + result = model._extract_text_from_response(mock_response) + + # Verify extraction + assert result == "Response text" + + def test_find_last_model_text(self): + """Test finding last model text in history.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Clear history + model.history = [] + + # Add history entries + model.add_to_history({"role": "user", "parts": ["User message 1"]}) + model.add_to_history({"role": "model", "parts": ["Model response 1"]}) + model.add_to_history({"role": "user", "parts": ["User message 2"]}) + model.add_to_history({"role": "model", "parts": ["Model response 2"]}) + + # Find last model text + result = model._find_last_model_text(model.history) + + # Verify result + assert result == "Model response 2" + + def test_add_to_history(self): + """Test adding messages to history.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Clear history + model.history = [] + + # Add a message + entry = {"role": "user", "parts": ["Test message"]} + model.add_to_history(entry) + + # Verify message was added + assert len(model.history) == 1 + assert model.history[0] == entry + + def test_clear_history(self): + """Test clearing history.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Add a message + model.add_to_history({"role": "user", "parts": ["Test message"]}) + + # Clear history + model.clear_history() + + # Verify history was cleared + assert len(model.history) == 0 + + def test_get_help_text(self): + """Test getting help text.""" + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + help_text = model._get_help_text() + + # Verify help text content + assert "CLI-Code Assistant Help" in help_text + assert "Commands" in help_text + + def test_generate_with_function_calls(self): + """Test generate method with function calls.""" + # Set up mock response with function call + mock_response = MagicMock() + mock_response.candidates = [MagicMock()] + mock_response.candidates[0].content = MagicMock() + mock_response.candidates[0].content.parts = [ + { + "functionCall": { + "name": "test_tool", + "args": {"param1": "value1"} + } + } + ] + mock_response.candidates[0].finish_reason = "FUNCTION_CALL" + + # Set up model instance to return the mock response + self.mock_model_instance.generate_content.return_value = mock_response + + # Mock tool execution + tool_mock = MagicMock() + tool_mock.execute.return_value = "Tool execution result" + self.mock_get_tool.return_value = tool_mock + + # Create model + model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") + + # Call generate + result = model.generate("Test prompt") + + # Verify model was called + self.mock_model_instance.generate_content.assert_called() + + # Verify tool execution + tool_mock.execute.assert_called_with(param1="value1") + + # There should be a second call to generate_content with the tool result + assert self.mock_model_instance.generate_content.call_count >= 2 \ No newline at end of file diff --git a/test_dir/test_ollama_model.py b/test_dir/test_ollama_model.py new file mode 100644 index 0000000..2587b45 --- /dev/null +++ b/test_dir/test_ollama_model.py @@ -0,0 +1,288 @@ +""" +Tests specifically for the OllamaModel class to improve code coverage. +""" + +import os +import json +import sys +import unittest +from unittest.mock import patch, MagicMock, mock_open, call +import pytest + +# Check if running in CI +IN_CI = os.environ.get('CI', 'false').lower() == 'true' + +# Handle imports +try: + from cli_code.models.ollama import OllamaModel + from rich.console import Console + IMPORTS_AVAILABLE = True +except ImportError: + IMPORTS_AVAILABLE = False + OllamaModel = MagicMock + Console = MagicMock + +# Set up conditional skipping +SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI +SKIP_REASON = "Required imports not available and not in CI" + + +@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) +class TestOllamaModel: + """Test suite for OllamaModel class, focusing on previously uncovered methods.""" + + def setup_method(self): + """Set up test fixtures.""" + # Mock OpenAI module before initialization + self.openai_patch = patch('cli_code.models.ollama.OpenAI') + self.mock_openai = self.openai_patch.start() + + # Mock the OpenAI client instance + self.mock_client = MagicMock() + self.mock_openai.return_value = self.mock_client + + # Mock console + self.mock_console = MagicMock(spec=Console) + + # Mock os.path.isdir and os.path.isfile + self.isdir_patch = patch('os.path.isdir') + self.isfile_patch = patch('os.path.isfile') + self.mock_isdir = self.isdir_patch.start() + self.mock_isfile = self.isfile_patch.start() + + # Mock glob + self.glob_patch = patch('glob.glob') + self.mock_glob = self.glob_patch.start() + + # Mock open + self.open_patch = patch('builtins.open', mock_open(read_data="# Test content")) + self.mock_open = self.open_patch.start() + + # Mock get_tool + self.get_tool_patch = patch('cli_code.models.ollama.get_tool') + self.mock_get_tool = self.get_tool_patch.start() + + # Default tool mock + self.mock_tool = MagicMock() + self.mock_tool.execute.return_value = "ls output" + self.mock_get_tool.return_value = self.mock_tool + + def teardown_method(self): + """Tear down test fixtures.""" + self.openai_patch.stop() + self.isdir_patch.stop() + self.isfile_patch.stop() + self.glob_patch.stop() + self.open_patch.stop() + self.get_tool_patch.stop() + + def test_init(self): + """Test initialization of OllamaModel.""" + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + + # Check if OpenAI client was initialized correctly + self.mock_openai.assert_called_once_with( + base_url="http://localhost:11434", + api_key="ollama" + ) + + # Check model attributes + assert model.api_url == "http://localhost:11434" + assert model.model_name == "llama3" + + # Check history initialization + assert len(model.history) == 1 + assert model.history[0]["role"] == "system" + + def test_get_initial_context_with_rules_dir(self): + """Test getting initial context from .rules directory.""" + # Set up mocks + self.mock_isdir.return_value = True + self.mock_glob.return_value = [".rules/context.md", ".rules/tools.md"] + + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + context = model._get_initial_context() + + # Verify directory check + self.mock_isdir.assert_called_with(".rules") + + # Verify glob search + self.mock_glob.assert_called_with(".rules/*.md") + + # Verify files were read + assert self.mock_open.call_count == 2 + + # Check result content + assert "Project rules and guidelines:" in context + assert "# Content from" in context + + def test_get_initial_context_with_readme(self): + """Test getting initial context from README.md when no .rules directory.""" + # Set up mocks + self.mock_isdir.return_value = False + self.mock_isfile.return_value = True + + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + context = model._get_initial_context() + + # Verify README check + self.mock_isfile.assert_called_with("README.md") + + # Verify file reading + self.mock_open.assert_called_once_with("README.md", "r", encoding="utf-8", errors="ignore") + + # Check result content + assert "Project README:" in context + + def test_get_initial_context_with_ls_fallback(self): + """Test getting initial context via ls when no .rules or README.""" + # Set up mocks + self.mock_isdir.return_value = False + self.mock_isfile.return_value = False + + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + context = model._get_initial_context() + + # Verify tool was used + self.mock_get_tool.assert_called_with("ls") + self.mock_tool.execute.assert_called_once() + + # Check result content + assert "Current directory contents" in context + assert "ls output" in context + + def test_prepare_openai_tools(self): + """Test preparation of tools in OpenAI function format.""" + # Create a mock for AVAILABLE_TOOLS + with patch('cli_code.models.ollama.AVAILABLE_TOOLS') as mock_available_tools: + # Sample tool definition + mock_available_tools.return_value = { + "test_tool": { + "name": "test_tool", + "description": "A test tool", + "parameters": { + "param1": {"type": "string", "description": "A string parameter"}, + "param2": {"type": "integer", "description": "An integer parameter"} + }, + "required": ["param1"] + } + } + + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + tools = model._prepare_openai_tools() + + # Verify tools format + assert len(tools) == 1 + assert tools[0]["type"] == "function" + assert tools[0]["function"]["name"] == "test_tool" + assert "parameters" in tools[0]["function"] + assert "properties" in tools[0]["function"]["parameters"] + assert "param1" in tools[0]["function"]["parameters"]["properties"] + assert "param2" in tools[0]["function"]["parameters"]["properties"] + assert tools[0]["function"]["parameters"]["required"] == ["param1"] + + def test_manage_ollama_context(self): + """Test context management for Ollama models.""" + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + + # Add many messages to force context truncation + for i in range(30): + model.add_to_history({"role": "user", "content": f"Test message {i}"}) + model.add_to_history({"role": "assistant", "content": f"Test response {i}"}) + + # Call context management + model._manage_ollama_context() + + # Verify history was truncated but system message preserved + assert len(model.history) < 61 # Less than original count + assert model.history[0]["role"] == "system" # System message preserved + + def test_add_to_history(self): + """Test adding messages to history.""" + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + + # Clear existing history + model.history = [] + + # Add a message + message = {"role": "user", "content": "Test message"} + model.add_to_history(message) + + # Verify message was added + assert len(model.history) == 1 + assert model.history[0] == message + + def test_clear_history(self): + """Test clearing history.""" + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + + # Add some messages + model.add_to_history({"role": "user", "content": "Test message"}) + + # Clear history + model.clear_history() + + # Verify history was cleared + assert len(model.history) == 0 + + def test_list_models(self): + """Test listing available models.""" + # Mock the completion response + mock_response = MagicMock() + mock_models = [ + {"id": "llama3", "object": "model", "created": 1621880188}, + {"id": "mistral", "object": "model", "created": 1622880188} + ] + mock_response.json.return_value = {"data": mock_models} + + # Set up client mock to return response + self.mock_client.models.list.return_value.data = mock_models + + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + result = model.list_models() + + # Verify client method called + self.mock_client.models.list.assert_called_once() + + # Verify result + assert result == mock_models + + def test_generate_with_function_calls(self): + """Test generate method with function calls.""" + # Create response with function calls + mock_message = MagicMock() + mock_message.content = None + mock_message.tool_calls = [ + MagicMock( + function=MagicMock( + name="test_tool", + arguments='{"param1": "value1"}' + ) + ) + ] + + mock_response = MagicMock() + mock_response.choices = [MagicMock( + message=mock_message, + finish_reason="tool_calls" + )] + + # Set up client mock + self.mock_client.chat.completions.create.return_value = mock_response + + # Mock get_tool to return a tool that executes successfully + tool_mock = MagicMock() + tool_mock.execute.return_value = "Tool execution result" + self.mock_get_tool.return_value = tool_mock + + model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") + result = model.generate("Test prompt") + + # Verify client method called + self.mock_client.chat.completions.create.assert_called() + + # Verify tool execution + tool_mock.execute.assert_called_once_with(param1="value1") + + # Check that there was a second API call with the tool results + assert self.mock_client.chat.completions.create.call_count == 2 \ No newline at end of file diff --git a/test_dir/test_system_tools_comprehensive.py b/test_dir/test_system_tools_comprehensive.py new file mode 100644 index 0000000..e39bf05 --- /dev/null +++ b/test_dir/test_system_tools_comprehensive.py @@ -0,0 +1,113 @@ +""" +Comprehensive tests for the system_tools module. +""" + +import pytest +from unittest.mock import patch, MagicMock +import subprocess +import time + +from cli_code.tools.system_tools import BashTool + + +class TestBashTool: + """Test cases for the BashTool class.""" + + def test_init(self): + """Test initialization of BashTool.""" + tool = BashTool() + assert tool.name == "bash" + assert tool.description == "Execute a bash command" + assert isinstance(tool.BANNED_COMMANDS, list) + assert len(tool.BANNED_COMMANDS) > 0 + + def test_banned_commands(self): + """Test that banned commands are rejected.""" + tool = BashTool() + + # Test each banned command + for banned_cmd in tool.BANNED_COMMANDS: + result = tool.execute(f"{banned_cmd} some_args") + assert "not allowed for security reasons" in result + assert banned_cmd in result + + def test_execute_simple_command(self): + """Test executing a simple command.""" + tool = BashTool() + result = tool.execute("echo 'hello world'") + assert "hello world" in result + + def test_execute_with_error(self): + """Test executing a command that returns an error.""" + tool = BashTool() + result = tool.execute("ls /nonexistent_directory") + assert "Command exited with status" in result + assert "STDERR" in result + + @patch('subprocess.Popen') + def test_timeout_handling(self, mock_popen): + """Test handling of command timeouts.""" + # Setup mock to simulate timeout + mock_process = MagicMock() + mock_process.communicate.side_effect = subprocess.TimeoutExpired(cmd="sleep 100", timeout=0.1) + mock_popen.return_value = mock_process + + tool = BashTool() + result = tool.execute("sleep 100", timeout=100) # 100ms timeout + + assert "Command timed out" in result + + @patch('subprocess.Popen') + def test_exception_handling(self, mock_popen): + """Test general exception handling.""" + # Setup mock to raise exception + mock_popen.side_effect = Exception("Test exception") + + tool = BashTool() + result = tool.execute("echo test") + + assert "Error executing command" in result + assert "Test exception" in result + + def test_timeout_conversion(self): + """Test conversion of timeout parameter.""" + tool = BashTool() + + # Test with invalid timeout + with patch('subprocess.Popen') as mock_popen: + mock_process = MagicMock() + mock_process.communicate.return_value = ("output", "") + mock_process.returncode = 0 + mock_popen.return_value = mock_process + + tool.execute("echo test", timeout="invalid") + + # Should use default timeout (30 seconds) + mock_process.communicate.assert_called_with(timeout=30) + + def test_long_output_handling(self): + """Test handling of commands with large output.""" + tool = BashTool() + + # Generate a large output + result = tool.execute("python -c \"print('x' * 10000)\"") + + # Verify the tool can handle large outputs + assert len(result) >= 10000 + assert result.count('x') >= 10000 + + def test_command_with_arguments(self): + """Test executing a command with arguments.""" + tool = BashTool() + + # Test with multiple arguments + result = tool.execute("echo arg1 arg2 arg3") + assert "arg1 arg2 arg3" in result + + # Test with quoted arguments + result = tool.execute("echo 'argument with spaces'") + assert "argument with spaces" in result + + # Test with environment variables + result = tool.execute("echo $HOME") + assert len(result.strip()) > 0 # Should have some content \ No newline at end of file diff --git a/test_dir/test_utils_comprehensive.py b/test_dir/test_utils_comprehensive.py new file mode 100644 index 0000000..867fe7e --- /dev/null +++ b/test_dir/test_utils_comprehensive.py @@ -0,0 +1,62 @@ +""" +Comprehensive tests for the utils module. +""" + +import unittest +import pytest +from cli_code.utils import count_tokens + + +class TestUtilsModule(unittest.TestCase): + """Test cases for the utils module functions.""" + + def test_count_tokens_with_tiktoken(self): + """Test token counting with tiktoken available.""" + # Test with empty string + assert count_tokens("") == 0 + + # Test with short texts + assert count_tokens("Hello") > 0 + assert count_tokens("Hello, world!") > count_tokens("Hello") + + # Test with longer content + long_text = "This is a longer piece of text that should contain multiple tokens. " * 10 + assert count_tokens(long_text) > 20 + + # Test with special characters + special_chars = "!@#$%^&*()_+={}[]|\\:;\"'<>,.?/" + assert count_tokens(special_chars) > 0 + + # Test with numbers + numbers = "12345 67890" + assert count_tokens(numbers) > 0 + + # Test with unicode characters + unicode_text = "こんにちは世界" # Hello world in Japanese + assert count_tokens(unicode_text) > 0 + + # Test with code snippets + code_snippet = """ + def example_function(param1, param2): + \"\"\"This is a docstring.\"\"\" + result = param1 + param2 + return result + """ + assert count_tokens(code_snippet) > 10 + + +def test_count_tokens_mocked_failure(monkeypatch): + """Test the fallback method when tiktoken raises an exception.""" + def mock_encoding_that_fails(*args, **kwargs): + raise ImportError("Simulated import error") + + # Mock the tiktoken encoding to simulate a failure + monkeypatch.setattr("tiktoken.encoding_for_model", mock_encoding_that_fails) + + # Test that the function returns a value using the fallback method + text = "This is a test string" + expected_approx = len(text) // 4 + result = count_tokens(text) + + # The fallback method is approximate, but should be close to this value + assert result == expected_approx \ No newline at end of file