diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml index 12a0e7f..e3dcdea 100644 --- a/.github/workflows/python-ci.yml +++ b/.github/workflows/python-ci.yml @@ -138,7 +138,7 @@ jobs: -Dsonar.organization=vitruviansoftware -Dsonar.python.coverage.reportPaths=coverage.xml -Dsonar.sources=src/cli_code - -Dsonar.tests=test_dir + -Dsonar.tests=tests -Dsonar.sourceEncoding=UTF-8 -Dsonar.scm.provider=git -Dsonar.coverage.jacoco.xmlReportPaths=coverage.xml diff --git a/conftest.py b/conftest.py index 59f1f24..131b016 100644 --- a/conftest.py +++ b/conftest.py @@ -12,22 +12,10 @@ except ImportError: PYTEST_AVAILABLE = False -def pytest_ignore_collect(path): - """ - Determine which test files to ignore during collection. - - Args: - path: Path object representing a test file or directory - - Returns: - bool: True if the file should be ignored, False otherwise - """ - # Check if we're running in CI - in_ci = os.environ.get('CI', 'false').lower() == 'true' - - if in_ci: - # Skip comprehensive test files in CI environments - if '_comprehensive' in str(path): - return True - - return False \ No newline at end of file +def pytest_ignore_collect(path, config): + """Ignore tests containing '_comprehensive' in their path when CI=true.""" + # if os.environ.get("CI") == "true" and "_comprehensive" in str(path): + # print(f"Ignoring comprehensive test in CI: {path}") + # return True + # return False + pass # Keep the function valid syntax, but effectively do nothing. \ No newline at end of file diff --git a/pytest.ini b/pytest.ini index 9d4f802..9dd7d46 100644 --- a/pytest.ini +++ b/pytest.ini @@ -8,7 +8,7 @@ markers = requires_openai: marks tests that require openai module timeout: marks tests with a timeout pythonpath = src -testpaths = tests test_dir +testpaths = tests python_files = test_*.py python_classes = Test* python_functions = test_* diff --git a/sonar-project.properties b/sonar-project.properties index 92a1a87..43bb6e9 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -9,7 +9,7 @@ sonar.projectVersion=0.2.1 # Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows. sonar.sources=src/cli_code -sonar.tests=test_dir +sonar.tests=tests # Coverage report paths sonar.python.coverage.reportPaths=coverage.xml diff --git a/test_dir/conftest.py b/test_dir/conftest.py deleted file mode 100644 index 5f00ca6..0000000 --- a/test_dir/conftest.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Pytest configuration and fixtures. -""" - -import os -import sys -import pytest -from unittest.mock import MagicMock - -# Add src directory to path for imports -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'))) - - -def pytest_configure(config): - """Configure pytest with custom markers.""" - config.addinivalue_line("markers", "integration: mark test as requiring API keys") - config.addinivalue_line("markers", "slow: mark test as slow") - - -def pytest_collection_modifyitems(config, items): - """Process test items to skip tests with missing dependencies.""" - for item in items: - if 'requires_tiktoken' in item.keywords and not _is_module_available('tiktoken'): - item.add_marker(pytest.mark.skip(reason="tiktoken not available")) - if 'requires_yaml' in item.keywords and not _is_module_available('yaml'): - item.add_marker(pytest.mark.skip(reason="yaml not available")) - if 'requires_gemini' in item.keywords and not _is_module_available('google.generativeai'): - item.add_marker(pytest.mark.skip(reason="google.generativeai not available")) - if 'requires_openai' in item.keywords and not _is_module_available('openai'): - item.add_marker(pytest.mark.skip(reason="openai not available")) - - -def _is_module_available(module_name): - """Check if a module is available.""" - try: - __import__(module_name) - return True - except ImportError: - return False - - -@pytest.fixture -def mock_module(): - """Create a MagicMock for a module.""" - return MagicMock() - - -@pytest.fixture -def temp_dir(tmpdir): - """Provide a temporary directory.""" - return tmpdir \ No newline at end of file diff --git a/test_dir/improved/test_quality_tools.py b/test_dir/improved/test_quality_tools.py deleted file mode 100644 index 482cd9b..0000000 --- a/test_dir/improved/test_quality_tools.py +++ /dev/null @@ -1,305 +0,0 @@ -""" -Tests for quality_tools module. -""" -import os -import subprocess -import pytest -from unittest.mock import patch, MagicMock - -# Direct import for coverage tracking -import src.cli_code.tools.quality_tools -from src.cli_code.tools.quality_tools import ( - _run_quality_command, - LinterCheckerTool, - FormatterTool -) - - -def test_linter_checker_tool_init(): - """Test LinterCheckerTool initialization.""" - tool = LinterCheckerTool() - assert tool.name == "linter_checker" - assert "Runs a code linter" in tool.description - - -def test_formatter_tool_init(): - """Test FormatterTool initialization.""" - tool = FormatterTool() - assert tool.name == "formatter" - assert "Runs a code formatter" in tool.description - - -@patch("subprocess.run") -def test_run_quality_command_success(mock_run): - """Test _run_quality_command with successful command execution.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Command output" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "TestTool Result (Exit Code: 0)" in result - assert "Command output" in result - assert "-- Errors --" not in result - mock_run.assert_called_once_with( - ["test", "command"], - capture_output=True, - text=True, - check=False, - timeout=120 - ) - - -@patch("subprocess.run") -def test_run_quality_command_with_errors(mock_run): - """Test _run_quality_command with command that outputs errors.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.stdout = "Command output" - mock_process.stderr = "Error message" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "TestTool Result (Exit Code: 1)" in result - assert "Command output" in result - assert "-- Errors --" in result - assert "Error message" in result - - -@patch("subprocess.run") -def test_run_quality_command_no_output(mock_run): - """Test _run_quality_command with command that produces no output.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "TestTool Result (Exit Code: 0)" in result - assert "(No output)" in result - - -@patch("subprocess.run") -def test_run_quality_command_long_output(mock_run): - """Test _run_quality_command with command that produces very long output.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "A" * 3000 # Longer than 2000 char limit - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "... (output truncated)" in result - assert len(result) < 3000 - - -def test_run_quality_command_file_not_found(): - """Test _run_quality_command with non-existent command.""" - # Set up side effect - with patch("subprocess.run", side_effect=FileNotFoundError("No such file or directory: 'nonexistent'")): - # Execute function - result = _run_quality_command(["nonexistent"], "TestTool") - - # Verify results - assert "Error: Command 'nonexistent' not found" in result - assert "Is 'nonexistent' installed and in PATH?" in result - - -def test_run_quality_command_timeout(): - """Test _run_quality_command with command that times out.""" - # Set up side effect - with patch("subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="slow_command", timeout=120)): - # Execute function - result = _run_quality_command(["slow_command"], "TestTool") - - # Verify results - assert "Error: TestTool run timed out" in result - assert "2 minutes" in result - - -def test_run_quality_command_unexpected_error(): - """Test _run_quality_command with unexpected error.""" - # Set up side effect - with patch("subprocess.run", side_effect=Exception("Unexpected error")): - # Execute function - result = _run_quality_command(["command"], "TestTool") - - # Verify results - assert "Error running TestTool" in result - assert "Unexpected error" in result - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_linter_checker_with_defaults(mock_run_command): - """Test LinterCheckerTool with default parameters.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute() - - # Verify results - assert result == "Linter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["ruff", "check", os.path.abspath(".")] - assert args[1] == "Linter" - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_linter_checker_with_custom_path(mock_run_command): - """Test LinterCheckerTool with custom path.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute(path="src") - - # Verify results - assert result == "Linter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["ruff", "check", os.path.abspath("src")] - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_linter_checker_with_custom_command(mock_run_command): - """Test LinterCheckerTool with custom linter command.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute(linter_command="flake8") - - # Verify results - assert result == "Linter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["flake8", os.path.abspath(".")] - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_linter_checker_with_complex_command(mock_run_command): - """Test LinterCheckerTool with complex command including arguments.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute(linter_command="flake8 --max-line-length=100") - - # Verify results - assert result == "Linter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["flake8", "--max-line-length=100", os.path.abspath(".")] - - -def test_linter_checker_with_parent_directory_traversal(): - """Test LinterCheckerTool with path containing parent directory traversal.""" - tool = LinterCheckerTool() - result = tool.execute(path="../dangerous") - - # Verify results - assert "Error: Invalid path" in result - assert "Cannot access parent directories" in result - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_formatter_with_defaults(mock_run_command): - """Test FormatterTool with default parameters.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute() - - # Verify results - assert result == "Formatter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["black", os.path.abspath(".")] - assert args[1] == "Formatter" - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_formatter_with_custom_path(mock_run_command): - """Test FormatterTool with custom path.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute(path="src") - - # Verify results - assert result == "Formatter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["black", os.path.abspath("src")] - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_formatter_with_custom_command(mock_run_command): - """Test FormatterTool with custom formatter command.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute(formatter_command="prettier") - - # Verify results - assert result == "Formatter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["prettier", os.path.abspath(".")] - - -@patch("src.cli_code.tools.quality_tools._run_quality_command") -def test_formatter_with_complex_command(mock_run_command): - """Test FormatterTool with complex command including arguments.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute(formatter_command="prettier --write") - - # Verify results - assert result == "Formatter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["prettier", "--write", os.path.abspath(".")] - - -def test_formatter_with_parent_directory_traversal(): - """Test FormatterTool with path containing parent directory traversal.""" - tool = FormatterTool() - result = tool.execute(path="../dangerous") - - # Verify results - assert "Error: Invalid path" in result - assert "Cannot access parent directories" in result \ No newline at end of file diff --git a/test_dir/improved/test_summarizer_tool.py b/test_dir/improved/test_summarizer_tool.py deleted file mode 100644 index 11919bb..0000000 --- a/test_dir/improved/test_summarizer_tool.py +++ /dev/null @@ -1,392 +0,0 @@ -""" -Tests for summarizer_tool module. -""" -import os -import pytest -from unittest.mock import patch, MagicMock, mock_open - -import google.generativeai as genai - -# Direct import for coverage tracking -import src.cli_code.tools.summarizer_tool -from src.cli_code.tools.summarizer_tool import ( - SummarizeCodeTool, - MAX_LINES_FOR_FULL_CONTENT, - MAX_CHARS_FOR_FULL_CONTENT, - SUMMARIZATION_SYSTEM_PROMPT -) - -# Mock classes for google.generativeai response structure -class MockPart: - def __init__(self, text): - self.text = text - -class MockContent: - def __init__(self, parts): - self.parts = parts - -class MockFinishReason: - def __init__(self, name): - self.name = name - -class MockCandidate: - def __init__(self, content, finish_reason): - self.content = content - self.finish_reason = finish_reason - -class MockResponse: - def __init__(self, candidates=None): - self.candidates = candidates if candidates is not None else [] - - -def test_summarize_code_tool_init(): - """Test SummarizeCodeTool initialization.""" - # Create a mock model - mock_model = MagicMock() - - # Initialize tool with model - tool = SummarizeCodeTool(model_instance=mock_model) - - # Verify initialization - assert tool.name == "summarize_code" - assert "summary" in tool.description - assert tool.model == mock_model - - -def test_summarize_code_tool_init_without_model(): - """Test SummarizeCodeTool initialization without a model.""" - # Initialize tool without model - tool = SummarizeCodeTool() - - # Verify initialization with None model - assert tool.model is None - - -def test_execute_without_model(): - """Test executing the tool without providing a model.""" - # Initialize tool without model - tool = SummarizeCodeTool() - - # Execute tool - result = tool.execute(file_path="test.py") - - # Verify error message - assert "Error: Summarization tool not properly configured" in result - - -def test_execute_with_parent_directory_traversal(): - """Test executing the tool with a file path containing parent directory traversal.""" - # Initialize tool with mock model - tool = SummarizeCodeTool(model_instance=MagicMock()) - - # Execute tool with parent directory traversal - result = tool.execute(file_path="../dangerous.py") - - # Verify error message - assert "Error: Invalid file path" in result - - -@patch("os.path.exists") -def test_execute_file_not_found(mock_exists): - """Test executing the tool with a non-existent file.""" - # Setup mock - mock_exists.return_value = False - - # Initialize tool with mock model - tool = SummarizeCodeTool(model_instance=MagicMock()) - - # Execute tool with non-existent file - result = tool.execute(file_path="nonexistent.py") - - # Verify error message - assert "Error: File not found" in result - - -@patch("os.path.exists") -@patch("os.path.isfile") -def test_execute_not_a_file(mock_isfile, mock_exists): - """Test executing the tool with a path that is not a file.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = False - - # Initialize tool with mock model - tool = SummarizeCodeTool(model_instance=MagicMock()) - - # Execute tool with directory path - result = tool.execute(file_path="directory/") - - # Verify error message - assert "Error: Path is not a file" in result - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open", new_callable=mock_open, read_data="Small file content") -def test_execute_small_file(mock_file, mock_getsize, mock_isfile, mock_exists): - """Test executing the tool with a small file.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = 100 # Small file size - - # Create mock for line counting - small file - mock_file_handle = mock_file() - mock_file_handle.__iter__.return_value = ["Line 1", "Line 2", "Line 3"] - - # Initialize tool with mock model - mock_model = MagicMock() - tool = SummarizeCodeTool(model_instance=mock_model) - - # Execute tool with small file - result = tool.execute(file_path="small_file.py") - - # Verify full content returned and model not called - assert "Full Content of small_file.py" in result - assert "Small file content" in result - mock_model.generate_content.assert_not_called() - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open") -def test_execute_large_file(mock_file, mock_getsize, mock_isfile, mock_exists): - """Test executing the tool with a large file.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = MAX_CHARS_FOR_FULL_CONTENT + 1000 # Large file - - # Create mock file handle for line counting - large file - file_handle = MagicMock() - file_handle.__iter__.return_value = ["Line " + str(i) for i in range(MAX_LINES_FOR_FULL_CONTENT + 100)] - # Create mock file handle for content reading - file_handle_read = MagicMock() - file_handle_read.read.return_value = "Large file content " * 1000 - - # Set up different return values for different calls to open() - mock_file.side_effect = [file_handle, file_handle_read] - - # Create mock model response - mock_model = MagicMock() - mock_parts = [MockPart("This is a summary of the large file.")] - mock_content = MockContent(mock_parts) - mock_finish_reason = MockFinishReason("STOP") - mock_candidate = MockCandidate(mock_content, mock_finish_reason) - mock_response = MockResponse([mock_candidate]) - mock_model.generate_content.return_value = mock_response - - # Initialize tool with mock model - tool = SummarizeCodeTool(model_instance=mock_model) - - # Execute tool with large file - result = tool.execute(file_path="large_file.py") - - # Verify summary returned and model called - assert "Summary of large_file.py" in result - assert "This is a summary of the large file." in result - mock_model.generate_content.assert_called_once() - - # Verify prompt content - call_args = mock_model.generate_content.call_args[1] - assert "contents" in call_args - - # Verify system prompt - contents = call_args["contents"][0] - assert "role" in contents - assert "parts" in contents - assert SUMMARIZATION_SYSTEM_PROMPT in contents["parts"] - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open") -def test_execute_with_empty_large_file(mock_file, mock_getsize, mock_isfile, mock_exists): - """Test executing the tool with a large but empty file.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = MAX_CHARS_FOR_FULL_CONTENT + 1000 # Large file - - # Create mock file handle for line counting - large file - file_handle = MagicMock() - file_handle.__iter__.return_value = ["Line " + str(i) for i in range(MAX_LINES_FOR_FULL_CONTENT + 100)] - # Create mock file handle for content reading - truly empty content (not just whitespace) - file_handle_read = MagicMock() - file_handle_read.read.return_value = "" # Truly empty, not whitespace - - # Set up different return values for different calls to open() - mock_file.side_effect = [file_handle, file_handle_read] - - # Initialize tool with mock model - mock_model = MagicMock() - # Setup mock response from model - mock_parts = [MockPart("This is a summary of an empty file.")] - mock_content = MockContent(mock_parts) - mock_finish_reason = MockFinishReason("STOP") - mock_candidate = MockCandidate(mock_content, mock_finish_reason) - mock_response = MockResponse([mock_candidate]) - mock_model.generate_content.return_value = mock_response - - # Execute tool with large but empty file - tool = SummarizeCodeTool(model_instance=mock_model) - result = tool.execute(file_path="empty_large_file.py") - - # Verify that the model was called with appropriate parameters - mock_model.generate_content.assert_called_once() - - # Verify the result contains a summary - assert "Summary of empty_large_file.py" in result - assert "This is a summary of an empty file." in result - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open") -def test_execute_with_file_read_error(mock_file, mock_getsize, mock_isfile, mock_exists): - """Test executing the tool with a file that has a read error.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = 100 # Small file - - # Create mock for file read error - mock_file.side_effect = IOError("Read error") - - # Initialize tool with mock model - mock_model = MagicMock() - tool = SummarizeCodeTool(model_instance=mock_model) - - # Execute tool with file that has read error - result = tool.execute(file_path="error_file.py") - - # Verify error message and model not called - assert "Error" in result - assert "Read error" in result - mock_model.generate_content.assert_not_called() - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open") -def test_execute_with_summarization_error(mock_file, mock_getsize, mock_isfile, mock_exists): - """Test executing the tool when summarization fails.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = MAX_CHARS_FOR_FULL_CONTENT + 1000 # Large file - - # Create mock file handle for line counting - large file - file_handle = MagicMock() - file_handle.__iter__.return_value = ["Line " + str(i) for i in range(MAX_LINES_FOR_FULL_CONTENT + 100)] - # Create mock file handle for content reading - file_handle_read = MagicMock() - file_handle_read.read.return_value = "Large file content " * 1000 - - # Set up different return values for different calls to open() - mock_file.side_effect = [file_handle, file_handle_read] - - # Create mock model with error - mock_model = MagicMock() - mock_model.generate_content.side_effect = Exception("Summarization error") - - # Initialize tool with mock model - tool = SummarizeCodeTool(model_instance=mock_model) - - # Execute tool when summarization fails - result = tool.execute(file_path="error_summarize.py") - - # Verify error message - assert "Error generating summary" in result - assert "Summarization error" in result - mock_model.generate_content.assert_called_once() - - -def test_extract_text_success(): - """Test extracting text from a successful response.""" - # Create mock response with successful candidate - mock_parts = [MockPart("Part 1 text."), MockPart("Part 2 text.")] - mock_content = MockContent(mock_parts) - mock_finish_reason = MockFinishReason("STOP") - mock_candidate = MockCandidate(mock_content, mock_finish_reason) - mock_response = MockResponse([mock_candidate]) - - # Initialize tool and extract text - tool = SummarizeCodeTool(model_instance=MagicMock()) - result = tool._extract_text_from_summary_response(mock_response) - - # Verify text extraction - assert result == "Part 1 text.Part 2 text." - - -def test_extract_text_with_failed_finish_reason(): - """Test extracting text when finish reason indicates failure.""" - # Create mock response with error finish reason - mock_parts = [MockPart("Partial text")] - mock_content = MockContent(mock_parts) - mock_finish_reason = MockFinishReason("ERROR") - mock_candidate = MockCandidate(mock_content, mock_finish_reason) - mock_response = MockResponse([mock_candidate]) - - # Initialize tool and extract text - tool = SummarizeCodeTool(model_instance=MagicMock()) - result = tool._extract_text_from_summary_response(mock_response) - - # Verify failure message with reason - assert result == "(Summarization failed: ERROR)" - - -def test_extract_text_with_no_candidates(): - """Test extracting text when response has no candidates.""" - # Create mock response with no candidates - mock_response = MockResponse([]) - - # Initialize tool and extract text - tool = SummarizeCodeTool(model_instance=MagicMock()) - result = tool._extract_text_from_summary_response(mock_response) - - # Verify failure message for no candidates - assert result == "(Summarization failed: No candidates)" - - -def test_extract_text_with_exception(): - """Test extracting text when an exception occurs.""" - # Create mock response that will cause exception - class ExceptionResponse: - @property - def candidates(self): - raise Exception("Extraction error") - - # Initialize tool and extract text - tool = SummarizeCodeTool(model_instance=MagicMock()) - result = tool._extract_text_from_summary_response(ExceptionResponse()) - - # Verify exception message - assert result == "(Error extracting summary text)" - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open") -def test_execute_general_exception(mock_file, mock_getsize, mock_isfile, mock_exists): - """Test executing the tool when a general exception occurs.""" - # Setup mocks to raise exception outside the normal flow - mock_exists.side_effect = Exception("Unexpected general error") - - # Initialize tool with mock model - mock_model = MagicMock() - tool = SummarizeCodeTool(model_instance=mock_model) - - # Execute tool with unexpected error - result = tool.execute(file_path="file.py") - - # Verify error message - assert "Error processing file for summary/view" in result - assert "Unexpected general error" in result - mock_model.generate_content.assert_not_called() \ No newline at end of file diff --git a/test_dir/improved/test_tree_tool.py b/test_dir/improved/test_tree_tool.py deleted file mode 100644 index b953d83..0000000 --- a/test_dir/improved/test_tree_tool.py +++ /dev/null @@ -1,323 +0,0 @@ -""" -Tests for tree_tool module. -""" -import subprocess -import os -import pathlib -from pathlib import Path -import pytest -from unittest.mock import patch, MagicMock - -# Direct import for coverage tracking -import src.cli_code.tools.tree_tool -from src.cli_code.tools.tree_tool import TreeTool, DEFAULT_TREE_DEPTH, MAX_TREE_DEPTH - - -def test_tree_tool_init(): - """Test TreeTool initialization.""" - tool = TreeTool() - assert tool.name == "tree" - assert "directory structure" in tool.description - assert f"depth of {DEFAULT_TREE_DEPTH}" in tool.description - assert "args_schema" in dir(tool) - assert "path" in tool.args_schema - assert "depth" in tool.args_schema - - -@patch("subprocess.run") -def test_tree_success(mock_run): - """Test successful tree command execution.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1/\n ├── file2.txt\n └── file3.txt" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert "file1.txt" in result - assert "dir1/" in result - assert "file2.txt" in result - mock_run.assert_called_once_with( - ["tree", "-L", str(DEFAULT_TREE_DEPTH)], - capture_output=True, - text=True, - check=False, - timeout=15 - ) - - -@patch("subprocess.run") -def test_tree_with_custom_path(mock_run): - """Test tree with custom path.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n└── test_dir/\n └── file.txt" - mock_run.return_value = mock_process - - # Execute tool with custom path - tool = TreeTool() - result = tool.execute(path="test_dir") - - # Verify correct command - mock_run.assert_called_once() - assert "test_dir" in mock_run.call_args[0][0] - - -@patch("subprocess.run") -def test_tree_with_custom_depth_int(mock_run): - """Test tree with custom depth as integer.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Directory tree" - mock_run.return_value = mock_process - - # Execute tool with custom depth - tool = TreeTool() - result = tool.execute(depth=2) - - # Verify depth parameter used - mock_run.assert_called_once() - assert mock_run.call_args[0][0][2] == "2" - - -@patch("subprocess.run") -def test_tree_with_custom_depth_string(mock_run): - """Test tree with custom depth as string.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Directory tree" - mock_run.return_value = mock_process - - # Execute tool with custom depth as string - tool = TreeTool() - result = tool.execute(depth="4") - - # Verify string was converted to int - mock_run.assert_called_once() - assert mock_run.call_args[0][0][2] == "4" - - -@patch("subprocess.run") -def test_tree_with_invalid_depth(mock_run): - """Test tree with invalid depth value.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Directory tree" - mock_run.return_value = mock_process - - # Execute tool with invalid depth - tool = TreeTool() - result = tool.execute(depth="invalid") - - # Verify default was used instead - mock_run.assert_called_once() - assert mock_run.call_args[0][0][2] == str(DEFAULT_TREE_DEPTH) - - -@patch("subprocess.run") -def test_tree_with_depth_exceeding_max(mock_run): - """Test tree with depth exceeding maximum allowed.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Directory tree" - mock_run.return_value = mock_process - - # Execute tool with too large depth - tool = TreeTool() - result = tool.execute(depth=MAX_TREE_DEPTH + 5) - - # Verify depth was clamped to maximum - mock_run.assert_called_once() - assert mock_run.call_args[0][0][2] == str(MAX_TREE_DEPTH) - - -@patch("subprocess.run") -def test_tree_long_output_truncation(mock_run): - """Test truncation of long tree output.""" - # Create a long tree output (> 200 lines) - long_output = ".\n" + "\n".join([f"├── file{i}.txt" for i in range(250)]) - - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = long_output - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify truncation - assert "... (output truncated)" in result - assert len(result.splitlines()) <= 202 # 200 lines + truncation message + header - - -@patch("subprocess.run") -def test_tree_command_not_found(mock_run): - """Test when tree command is not found (returncode 127).""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 127 - mock_process.stderr = "tree: command not found" - mock_run.return_value = mock_process - - # Setup fallback mock - with patch.object(TreeTool, "_fallback_tree_implementation", return_value="Fallback tree output"): - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify fallback was used - assert result == "Fallback tree output" - - -@patch("subprocess.run") -def test_tree_command_other_error(mock_run): - """Test when tree command fails with an error other than 'not found'.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.stderr = "tree: some other error" - mock_run.return_value = mock_process - - # Setup fallback mock - with patch.object(TreeTool, "_fallback_tree_implementation", return_value="Fallback tree output"): - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify fallback was used - assert result == "Fallback tree output" - - -@patch("subprocess.run") -def test_tree_file_not_found_error(mock_run): - """Test handling of FileNotFoundError.""" - # Setup mock to raise FileNotFoundError - mock_run.side_effect = FileNotFoundError("No such file or directory: 'tree'") - - # Setup fallback mock - with patch.object(TreeTool, "_fallback_tree_implementation", return_value="Fallback tree output"): - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify fallback was used - assert result == "Fallback tree output" - - -@patch("subprocess.run") -def test_tree_timeout(mock_run): - """Test handling of command timeout.""" - # Setup mock to raise TimeoutExpired - mock_run.side_effect = subprocess.TimeoutExpired(cmd="tree", timeout=15) - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify timeout message - assert "Error: Tree command timed out" in result - assert "The directory might be too large or complex" in result - - -@patch("subprocess.run") -def test_tree_unexpected_error(mock_run): - """Test handling of unexpected error with successful fallback.""" - # Setup mock to raise an unexpected error - mock_run.side_effect = Exception("Unexpected error") - - # Setup fallback mock - with patch.object(TreeTool, "_fallback_tree_implementation", return_value="Fallback tree output"): - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify fallback was used - assert result == "Fallback tree output" - - -@patch("subprocess.run") -def test_tree_unexpected_error_with_fallback_failure(mock_run): - """Test handling of unexpected error with fallback also failing.""" - # Setup mock to raise an unexpected error - mock_run.side_effect = Exception("Unexpected error") - - # Setup fallback mock to also fail - with patch.object(TreeTool, "_fallback_tree_implementation", side_effect=Exception("Fallback error")): - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify error message - assert "An unexpected error occurred while displaying directory structure" in result - - -@patch("subprocess.run") -def test_fallback_tree_implementation(mock_run): - """Test the fallback tree implementation when tree command fails.""" - # Setup mock to simulate tree command failure - mock_process = MagicMock() - mock_process.returncode = 127 # Command not found - mock_process.stderr = "tree: command not found" - mock_run.return_value = mock_process - - # Mock the fallback implementation to provide a custom output - with patch.object(TreeTool, "_fallback_tree_implementation") as mock_fallback: - mock_fallback.return_value = "Mocked fallback tree output\nfile1.txt\ndir1/\n└── file2.txt" - - # Execute tool - tool = TreeTool() - result = tool.execute(path="test_path") - - # Verify the fallback was called with correct parameters - mock_fallback.assert_called_once_with("test_path", DEFAULT_TREE_DEPTH) - - # Verify result came from fallback - assert result == "Mocked fallback tree output\nfile1.txt\ndir1/\n└── file2.txt" - - -def test_fallback_tree_nonexistent_path(): - """Test fallback tree with non-existent path.""" - with patch("pathlib.Path.resolve", return_value=Path("nonexistent")): - with patch("pathlib.Path.exists", return_value=False): - # Execute fallback implementation - tool = TreeTool() - result = tool._fallback_tree_implementation("nonexistent", 3) - - # Verify error message - assert "Error: Path 'nonexistent' does not exist" in result - - -def test_fallback_tree_not_a_directory(): - """Test fallback tree with path that is not a directory.""" - with patch("pathlib.Path.resolve", return_value=Path("file.txt")): - with patch("pathlib.Path.exists", return_value=True): - with patch("pathlib.Path.is_dir", return_value=False): - # Execute fallback implementation - tool = TreeTool() - result = tool._fallback_tree_implementation("file.txt", 3) - - # Verify error message - assert "Error: Path 'file.txt' is not a directory" in result - - -def test_fallback_tree_with_exception(): - """Test fallback tree handling of unexpected exceptions.""" - with patch("os.walk", side_effect=Exception("Test error")): - # Execute fallback implementation - tool = TreeTool() - result = tool._fallback_tree_implementation(".", 3) - - # Verify error message - assert "Error generating directory tree" in result - assert "Test error" in result \ No newline at end of file diff --git a/test_dir/test_basic_functions.py b/test_dir/test_basic_functions.py deleted file mode 100644 index e6ea7c5..0000000 --- a/test_dir/test_basic_functions.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Tests for basic functions defined (originally in test.py). -""" - -# Assuming the functions to test are accessible -# If they were meant to be part of the main package, they should be moved -# or imported appropriately. For now, define them here for testing. - - -def greet(name): - """Say hello to someone.""" - return f"Hello, {name}!" - - -def calculate_sum(a, b): - """Calculate the sum of two numbers.""" - return a + b - - -# --- Pytest Tests --- - - -def test_greet(): - """Test the greet function.""" - assert greet("World") == "Hello, World!" - assert greet("Alice") == "Hello, Alice!" - assert greet("") == "Hello, !" - - -def test_calculate_sum(): - """Test the calculate_sum function.""" - assert calculate_sum(2, 2) == 4 - assert calculate_sum(0, 0) == 0 - assert calculate_sum(-1, 1) == 0 - assert calculate_sum(100, 200) == 300 diff --git a/test_dir/test_config.py b/test_dir/test_config.py deleted file mode 100644 index 4b6ebaa..0000000 --- a/test_dir/test_config.py +++ /dev/null @@ -1,256 +0,0 @@ -""" -Tests for the configuration management in src/cli_code/config.py. -""" - -import os -import yaml -import unittest -from pathlib import Path -from unittest.mock import patch, mock_open, MagicMock - -import pytest - -# Assume cli_code is importable -from cli_code.config import Config - -# --- Mocks and Fixtures --- - - -@pytest.fixture -def mock_home(tmp_path): - """Fixture to mock Path.home() to use a temporary directory.""" - mock_home_path = tmp_path / ".home" - mock_home_path.mkdir() - with patch.object(Path, "home", return_value=mock_home_path): - yield mock_home_path - - -@pytest.fixture -def mock_config_paths(mock_home): - """Fixture providing expected config paths based on mock_home.""" - config_dir = mock_home / ".config" / "cli-code-agent" - config_file = config_dir / "config.yaml" - return config_dir, config_file - - -@pytest.fixture -def default_config_data(): - """Default configuration data structure.""" - return { - "google_api_key": None, - "default_provider": "gemini", - "default_model": "models/gemini-2.5-pro-exp-03-25", - "ollama_api_url": None, - "ollama_default_model": "llama3.2", - "settings": { - "max_tokens": 1000000, - "temperature": 0.5, - "token_warning_threshold": 800000, - "auto_compact_threshold": 950000, - }, - } - - -# --- Test Cases --- - - -@patch("cli_code.config.Config._load_dotenv", MagicMock()) # Mock dotenv loading -@patch("cli_code.config.Config._load_config") -@patch("cli_code.config.Config._ensure_config_exists") -def test_config_init_calls_ensure_when_load_fails(mock_ensure_config, mock_load_config, mock_config_paths): - """Test Config calls _ensure_config_exists if _load_config returns empty.""" - config_dir, config_file = mock_config_paths - - # Simulate _load_config finding nothing (like file not found or empty) - mock_load_config.return_value = {} - - with patch.dict(os.environ, {}, clear=True): - # We don't need to check inside _ensure_config_exists here, just that it's called - cfg = Config() - - mock_load_config.assert_called_once() - # Verify that _ensure_config_exists was called because load failed - mock_ensure_config.assert_called_once() - # The final config might be the result of _ensure_config_exists potentially setting defaults - # or the empty dict from _load_config depending on internal logic not mocked here. - # Let's focus on the call flow for this test. - - -# Separate test for the behavior *inside* _ensure_config_exists -@patch("builtins.open", new_callable=mock_open) -@patch("pathlib.Path.exists") -@patch("pathlib.Path.mkdir") -@patch("yaml.dump") -def test_ensure_config_exists_creates_default( - mock_yaml_dump, mock_mkdir, mock_exists, mock_open_func, mock_config_paths, default_config_data -): - """Test the _ensure_config_exists method creates a default file.""" - config_dir, config_file = mock_config_paths - - # Simulate config file NOT existing - mock_exists.return_value = False - - # Directly instantiate config temporarily just to call the method - # We need to bypass __init__ logic for this direct method test - with patch.object(Config, "__init__", lambda x: None): # Bypass __init__ - cfg = Config() - cfg.config_dir = config_dir - cfg.config_file = config_file - cfg.config = {} # Start with empty config - - # Call the method under test - cfg._ensure_config_exists() - - # Assertions - mock_mkdir.assert_called_once_with(parents=True, exist_ok=True) - mock_exists.assert_called_with() - mock_open_func.assert_called_once_with(config_file, "w") - mock_yaml_dump.assert_called_once() - args, kwargs = mock_yaml_dump.call_args - # Check the data dumped matches the expected default structure - assert args[0] == default_config_data - - -@patch("cli_code.config.Config._load_dotenv", MagicMock()) # Mock dotenv loading -@patch("cli_code.config.Config._apply_env_vars", MagicMock()) # Mock env var application -@patch("cli_code.config.Config._load_config") -@patch("cli_code.config.Config._ensure_config_exists") # Keep patch but don't assert not called -def test_config_init_loads_existing(mock_ensure_config, mock_load_config, mock_config_paths): - """Test Config loads data from _load_config.""" - config_dir, config_file = mock_config_paths - existing_data = {"google_api_key": "existing_key", "default_provider": "ollama", "settings": {"temperature": 0.8}} - mock_load_config.return_value = existing_data.copy() - - with patch.dict(os.environ, {}, clear=True): - cfg = Config() - - mock_load_config.assert_called_once() - assert cfg.config == existing_data - assert cfg.get_credential("gemini") == "existing_key" - assert cfg.get_default_provider() == "ollama" - assert cfg.get_setting("temperature") == 0.8 - - -@patch("cli_code.config.Config._save_config") # Mock save to prevent file writes -@patch("cli_code.config.Config._load_config") # Correct patch target -def test_config_setters_getters(mock_load_config, mock_save, mock_config_paths): - """Test the various getter and setter methods.""" - config_dir, config_file = mock_config_paths - initial_data = { - "google_api_key": "initial_google_key", - "ollama_api_url": "initial_ollama_url", - "default_provider": "gemini", - "default_model": "gemini-model-1", - "ollama_default_model": "ollama-model-1", - "settings": {"temperature": 0.7, "max_tokens": 500000}, - } - mock_load_config.return_value = initial_data.copy() # Mock the load result - - # Mock other __init__ methods to isolate loading - with ( - patch.dict(os.environ, {}, clear=True), - patch("cli_code.config.Config._load_dotenv", MagicMock()), - patch("cli_code.config.Config._ensure_config_exists", MagicMock()), - patch("cli_code.config.Config._apply_env_vars", MagicMock()), - ): - cfg = Config() - - # Test initial state loaded correctly - assert cfg.get_credential("gemini") == "initial_google_key" - assert cfg.get_credential("ollama") == "initial_ollama_url" - assert cfg.get_default_provider() == "gemini" - assert cfg.get_default_model() == "gemini-model-1" # Default provider is gemini - assert cfg.get_default_model(provider="gemini") == "gemini-model-1" - assert cfg.get_default_model(provider="ollama") == "ollama-model-1" - assert cfg.get_setting("temperature") == 0.7 - assert cfg.get_setting("max_tokens") == 500000 - assert cfg.get_setting("non_existent", default="fallback") == "fallback" - - # Test Setters - cfg.set_credential("gemini", "new_google_key") - assert cfg.config["google_api_key"] == "new_google_key" - assert mock_save.call_count == 1 - cfg.set_credential("ollama", "new_ollama_url") - assert cfg.config["ollama_api_url"] == "new_ollama_url" - assert mock_save.call_count == 2 - - cfg.set_default_provider("ollama") - assert cfg.config["default_provider"] == "ollama" - assert mock_save.call_count == 3 - - # Setting default model when default provider is ollama - cfg.set_default_model("ollama-model-2") - assert cfg.config["ollama_default_model"] == "ollama-model-2" - assert mock_save.call_count == 4 - # Setting default model explicitly for gemini - cfg.set_default_model("gemini-model-2", provider="gemini") - assert cfg.config["default_model"] == "gemini-model-2" - assert mock_save.call_count == 5 - - cfg.set_setting("temperature", 0.9) - assert cfg.config["settings"]["temperature"] == 0.9 - assert mock_save.call_count == 6 - cfg.set_setting("new_setting", True) - assert cfg.config["settings"]["new_setting"] is True - assert mock_save.call_count == 7 - - # Test Getters after setting - assert cfg.get_credential("gemini") == "new_google_key" - assert cfg.get_credential("ollama") == "new_ollama_url" - assert cfg.get_default_provider() == "ollama" - assert cfg.get_default_model() == "ollama-model-2" # Default provider is now ollama - assert cfg.get_default_model(provider="gemini") == "gemini-model-2" - assert cfg.get_default_model(provider="ollama") == "ollama-model-2" - assert cfg.get_setting("temperature") == 0.9 - assert cfg.get_setting("new_setting") is True - - # Test setting unknown provider (should log error, not save) - cfg.set_credential("unknown", "some_key") - assert "unknown" not in cfg.config - assert mock_save.call_count == 7 # No new save call - cfg.set_default_provider("unknown") - assert cfg.config["default_provider"] == "ollama" # Should remain unchanged - assert mock_save.call_count == 7 # No new save call - cfg.set_default_model("unknown-model", provider="unknown") - assert cfg.config.get("unknown_default_model") is None - assert mock_save.call_count == 7 # No new save call - - -# New test combining env var logic check -@patch("cli_code.config.Config._load_dotenv", MagicMock()) # Mock dotenv loading step -@patch("cli_code.config.Config._load_config") -@patch("cli_code.config.Config._ensure_config_exists", MagicMock()) # Mock ensure config -@patch("cli_code.config.Config._save_config") # Mock save to check if called -def test_config_env_var_override(mock_save, mock_load_config, mock_config_paths): - """Test that _apply_env_vars correctly overrides loaded config.""" - config_dir, config_file = mock_config_paths - initial_config_data = { - "google_api_key": "config_key", - "ollama_api_url": "config_url", - "default_provider": "gemini", - "ollama_default_model": "config_ollama", - } - env_vars = { - "CLI_CODE_GOOGLE_API_KEY": "env_key", - "CLI_CODE_OLLAMA_API_URL": "env_url", - "CLI_CODE_DEFAULT_PROVIDER": "ollama", - } - mock_load_config.return_value = initial_config_data.copy() - - with patch.dict(os.environ, env_vars, clear=True): - cfg = Config() - - assert cfg.config["google_api_key"] == "env_key" - assert cfg.config["ollama_api_url"] == "env_url" - assert cfg.config["default_provider"] == "ollama" - assert cfg.config["ollama_default_model"] == "config_ollama" - - -# New simplified test for _migrate_old_config_paths -# @patch('builtins.open', new_callable=mock_open) -# @patch('yaml.safe_load') -# @patch('cli_code.config.Config._save_config') -# def test_migrate_old_config_paths_logic(mock_save, mock_yaml_load, mock_open_func, mock_home): -# ... (implementation removed) ... - -# End of file diff --git a/test_dir/test_config_comprehensive.py b/test_dir/test_config_comprehensive.py deleted file mode 100644 index 3eb97db..0000000 --- a/test_dir/test_config_comprehensive.py +++ /dev/null @@ -1,391 +0,0 @@ -""" -Comprehensive tests for the config module in src/cli_code/config.py. -Focusing on improving test coverage beyond the basic test_config.py - -Configuration in CLI Code supports two approaches: -1. File-based configuration (.yaml): Primary approach for end users who install from pip -2. Environment variables: Used mainly during development for quick experimentation - -Both approaches are supported simultaneously - there is no migration needed as both -configuration methods can coexist. -""" - -import os -import sys -import tempfile -from pathlib import Path -from unittest.mock import patch, mock_open, MagicMock - -# Add the src directory to the path to allow importing cli_code -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) - -import pytest -from cli_code.config import Config, log - - -@pytest.fixture -def mock_home(): - """Create a temporary directory to use as home directory.""" - with patch.dict(os.environ, {"HOME": "/mock/home"}, clear=False): - yield Path("/mock/home") - - -@pytest.fixture -def config_instance(): - """Provide a minimal Config instance for testing individual methods.""" - with patch.object(Config, "__init__", return_value=None): - config = Config() - config.config_dir = Path('/fake/config/dir') - config.config_file = Path('/fake/config/dir/config.yaml') - config.config = {} - yield config - - -@pytest.fixture -def default_config_data(): - """Return default configuration data.""" - return { - 'google_api_key': 'fake-key', - 'default_provider': 'gemini', - 'default_model': 'gemini-pro', - 'ollama_api_url': 'http://localhost:11434', - 'ollama_default_model': 'llama2', - 'settings': { - 'max_tokens': 1000000, - 'temperature': 0.5 - } - } - - -class TestDotEnvLoading: - """Tests for the _load_dotenv method.""" - - def test_load_dotenv_file_not_exists(self, config_instance): - """Test _load_dotenv when .env file doesn't exist.""" - with patch('pathlib.Path.exists', return_value=False), \ - patch('cli_code.config.log') as mock_logger: - - config_instance._load_dotenv() - - # Verify appropriate logging - mock_logger.debug.assert_called_once() - assert "No .env or .env.example file found" in mock_logger.debug.call_args[0][0] - - @pytest.mark.parametrize("env_content,expected_vars", [ - (""" - # This is a comment - CLI_CODE_GOOGLE_API_KEY=test-key - CLI_CODE_OLLAMA_API_URL=http://localhost:11434 - """, - {"CLI_CODE_GOOGLE_API_KEY": "test-key", "CLI_CODE_OLLAMA_API_URL": "http://localhost:11434"}), - - (""" - CLI_CODE_GOOGLE_API_KEY="quoted-key-value" - CLI_CODE_OLLAMA_API_URL='quoted-url' - """, - {"CLI_CODE_GOOGLE_API_KEY": "quoted-key-value", "CLI_CODE_OLLAMA_API_URL": "quoted-url"}), - - (""" - # Comment line - - INVALID_LINE_NO_PREFIX - CLI_CODE_VALID_KEY=valid-value - =missing_key - CLI_CODE_MISSING_VALUE= - """, - {"CLI_CODE_VALID_KEY": "valid-value", "CLI_CODE_MISSING_VALUE": ""}) - ]) - def test_load_dotenv_variations(self, config_instance, env_content, expected_vars): - """Test _load_dotenv with various input formats.""" - with patch('pathlib.Path.exists', return_value=True), \ - patch('builtins.open', mock_open(read_data=env_content)), \ - patch.dict(os.environ, {}, clear=False), \ - patch('cli_code.config.log'): - - config_instance._load_dotenv() - - # Verify environment variables were loaded correctly - for key, value in expected_vars.items(): - assert os.environ.get(key) == value - - def test_load_dotenv_file_read_error(self, config_instance): - """Test _load_dotenv when there's an error reading the .env file.""" - with patch('pathlib.Path.exists', return_value=True), \ - patch('builtins.open', side_effect=Exception("Failed to open file")), \ - patch('cli_code.config.log') as mock_logger: - - config_instance._load_dotenv() - - # Verify error is logged - mock_logger.warning.assert_called_once() - assert "Error loading .env file" in mock_logger.warning.call_args[0][0] - - -class TestConfigErrorHandling: - """Tests for error handling in the Config class.""" - - def test_ensure_config_exists_file_creation(self, config_instance): - """Test _ensure_config_exists creates default file when it doesn't exist.""" - with patch('pathlib.Path.exists', return_value=False), \ - patch('pathlib.Path.mkdir'), \ - patch('builtins.open', mock_open()) as mock_file, \ - patch('yaml.dump') as mock_yaml_dump, \ - patch('cli_code.config.log') as mock_logger: - - config_instance._ensure_config_exists() - - # Verify directory was created - assert config_instance.config_dir.mkdir.called - - # Verify file was opened for writing - mock_file.assert_called_once_with(config_instance.config_file, 'w') - - # Verify yaml.dump was called - mock_yaml_dump.assert_called_once() - - # Verify logging - mock_logger.info.assert_called_once() - - def test_load_config_invalid_yaml(self, config_instance): - """Test _load_config with invalid YAML file.""" - with patch('pathlib.Path.exists', return_value=True), \ - patch('builtins.open', mock_open(read_data="invalid: yaml: content")), \ - patch('yaml.safe_load', side_effect=Exception("YAML parsing error")), \ - patch('cli_code.config.log') as mock_logger: - - result = config_instance._load_config() - - # Verify error is logged and empty dict is returned - mock_logger.error.assert_called_once() - assert result == {} - - def test_ensure_config_directory_error(self, config_instance): - """Test error handling when creating config directory fails.""" - with patch('pathlib.Path.exists', return_value=False), \ - patch('pathlib.Path.mkdir', side_effect=Exception("mkdir error")), \ - patch('cli_code.config.log') as mock_logger: - - config_instance._ensure_config_exists() - - # Verify error is logged - mock_logger.error.assert_called_once() - assert "Failed to create config directory" in mock_logger.error.call_args[0][0] - - def test_save_config_file_write_error(self, config_instance): - """Test _save_config when there's an error writing to the file.""" - with patch('builtins.open', side_effect=Exception("File write error")), \ - patch('cli_code.config.log') as mock_logger: - - config_instance.config = {"test": "data"} - config_instance._save_config() - - # Verify error is logged - mock_logger.error.assert_called_once() - assert "Error saving config file" in mock_logger.error.call_args[0][0] - - -class TestCredentialAndProviderFunctions: - """Tests for credential, provider, and model getter and setter methods.""" - - @pytest.mark.parametrize("provider,config_key,config_value,expected", [ - ('gemini', 'google_api_key', 'test-key', 'test-key'), - ('ollama', 'ollama_api_url', 'test-url', 'test-url'), - ('unknown', None, None, None), - ]) - def test_get_credential(self, config_instance, provider, config_key, config_value, expected): - """Test getting credentials for different providers.""" - if config_key: - config_instance.config = {config_key: config_value} - else: - config_instance.config = {} - - with patch('cli_code.config.log'): - assert config_instance.get_credential(provider) == expected - - @pytest.mark.parametrize("provider,expected_key,value", [ - ('gemini', 'google_api_key', 'new-key'), - ('ollama', 'ollama_api_url', 'new-url'), - ]) - def test_set_credential_valid_providers(self, config_instance, provider, expected_key, value): - """Test setting credentials for valid providers.""" - with patch.object(Config, '_save_config') as mock_save: - config_instance.config = {} - config_instance.set_credential(provider, value) - - assert config_instance.config[expected_key] == value - mock_save.assert_called_once() - - def test_set_credential_unknown_provider(self, config_instance): - """Test setting credential for unknown provider.""" - with patch.object(Config, '_save_config') as mock_save, \ - patch('cli_code.config.log') as mock_logger: - - config_instance.config = {} - config_instance.set_credential('unknown', 'value') - - # Verify error was logged and config not saved - mock_logger.error.assert_called_once() - mock_save.assert_not_called() - - @pytest.mark.parametrize("config_data,provider,expected", [ - ({'default_provider': 'ollama'}, None, 'ollama'), - ({}, None, 'gemini'), # Default when not set - (None, None, 'gemini'), # Default when config is None - ]) - def test_get_default_provider(self, config_instance, config_data, provider, expected): - """Test getting the default provider under different conditions.""" - config_instance.config = config_data - assert config_instance.get_default_provider() == expected - - @pytest.mark.parametrize("provider,model,config_key", [ - ('gemini', 'new-model', 'default_model'), - ('ollama', 'new-model', 'ollama_default_model'), - ]) - def test_set_default_model(self, config_instance, provider, model, config_key): - """Test setting default model for different providers.""" - with patch.object(Config, '_save_config') as mock_save: - config_instance.config = {} - config_instance.set_default_model(model, provider) - - assert config_instance.config[config_key] == model - mock_save.assert_called_once() - - -class TestSettingFunctions: - """Tests for setting getter and setter methods.""" - - @pytest.mark.parametrize("config_data,setting,default,expected", [ - ({'settings': {'max_tokens': 1000}}, 'max_tokens', None, 1000), - ({'settings': {}}, 'missing', 'default-value', 'default-value'), - ({}, 'any-setting', 'fallback', 'fallback'), - (None, 'any-setting', 'fallback', 'fallback'), - ]) - def test_get_setting(self, config_instance, config_data, setting, default, expected): - """Test get_setting method with various inputs.""" - config_instance.config = config_data - assert config_instance.get_setting(setting, default=default) == expected - - def test_set_setting(self, config_instance): - """Test set_setting method.""" - with patch.object(Config, '_save_config') as mock_save: - # Test with existing settings - config_instance.config = {'settings': {'existing': 'old'}} - config_instance.set_setting('new_setting', 'value') - - assert config_instance.config['settings']['new_setting'] == 'value' - assert config_instance.config['settings']['existing'] == 'old' - - # Test when settings dict doesn't exist - config_instance.config = {} - config_instance.set_setting('another', 'value') - - assert config_instance.config['settings']['another'] == 'value' - - # Test when config is None - config_instance.config = None - config_instance.set_setting('third', 'value') - - # Assert: Check that config is still None (or {}) and save was not called - # depending on the desired behavior when config starts as None - # Assuming set_setting does nothing if config is None: - assert config_instance.config is None - # Ensure save was not called in this specific sub-case - # Find the last call before setting config to None - save_call_count_before_none = mock_save.call_count - config_instance.set_setting('fourth', 'value') # Call again with config=None - assert mock_save.call_count == save_call_count_before_none - - -class TestConfigInitialization: - """Tests for the Config class initialization and environment variable handling.""" - - @pytest.mark.timeout(2) # Reduce timeout to 2 seconds - def test_config_init_with_env_vars(self): - """Test that environment variables are correctly loaded during initialization.""" - test_env = { - 'CLI_CODE_GOOGLE_API_KEY': 'env-google-key', - 'CLI_CODE_DEFAULT_PROVIDER': 'env-provider', - 'CLI_CODE_DEFAULT_MODEL': 'env-model', - 'CLI_CODE_OLLAMA_API_URL': 'env-ollama-url', - 'CLI_CODE_OLLAMA_DEFAULT_MODEL': 'env-ollama-model', - 'CLI_CODE_SETTINGS_MAX_TOKENS': '5000', - 'CLI_CODE_SETTINGS_TEMPERATURE': '0.8' - } - - with patch.dict(os.environ, test_env, clear=False), \ - patch.object(Config, '_load_dotenv'), \ - patch.object(Config, '_ensure_config_exists'), \ - patch.object(Config, '_load_config', return_value={}): - - config = Config() - - # Verify environment variables override config values - assert config.config.get('google_api_key') == 'env-google-key' - assert config.config.get('default_provider') == 'env-provider' - assert config.config.get('default_model') == 'env-model' - assert config.config.get('ollama_api_url') == 'env-ollama-url' - assert config.config.get('ollama_default_model') == 'env-ollama-model' - assert config.config.get('settings', {}).get('max_tokens') == 5000 - assert config.config.get('settings', {}).get('temperature') == 0.8 - - @pytest.mark.timeout(2) # Reduce timeout to 2 seconds - def test_paths_initialization(self): - """Test the initialization of paths in Config class.""" - with patch('os.path.expanduser', return_value='/mock/home'), \ - patch.object(Config, '_load_dotenv'), \ - patch.object(Config, '_ensure_config_exists'), \ - patch.object(Config, '_load_config', return_value={}): - - config = Config() - - # Verify paths are correctly initialized - assert config.config_dir == Path('/mock/home/.config/cli-code') - assert config.config_file == Path('/mock/home/.config/cli-code/config.yaml') - - -class TestDotEnvEdgeCases: - """Test edge cases for the _load_dotenv method.""" - - @pytest.mark.timeout(2) # Reduce timeout to 2 seconds - def test_load_dotenv_with_example_file(self, config_instance): - """Test _load_dotenv with .env.example file when .env doesn't exist.""" - example_content = """ - # Example configuration - CLI_CODE_GOOGLE_API_KEY=example-key - """ - - with patch('pathlib.Path.exists', side_effect=[False, True]), \ - patch('builtins.open', mock_open(read_data=example_content)), \ - patch.dict(os.environ, {}, clear=False), \ - patch('cli_code.config.log'): - - config_instance._load_dotenv() - - # Verify environment variables were loaded from example file - assert os.environ.get('CLI_CODE_GOOGLE_API_KEY') == 'example-key' - - -# Optimized test that combines several edge cases in one test -class TestEdgeCases: - """Combined tests for various edge cases.""" - - @pytest.mark.parametrize("method_name,args,config_state,expected_result,should_log_error", [ - ('get_credential', ('unknown',), {}, None, False), - ('get_default_provider', (), None, 'gemini', False), - ('get_default_model', ('gemini',), None, 'models/gemini-1.5-pro-latest', False), - ('get_default_model', ('ollama',), None, 'llama2', False), - ('get_default_model', ('unknown_provider',), {}, None, False), - ('get_setting', ('any_setting', 'fallback'), None, 'fallback', False), - ('get_setting', ('any_key', 'fallback'), None, 'fallback', False), - ]) - def test_edge_cases(self, config_instance, method_name, args, config_state, expected_result, should_log_error): - """Test various edge cases with parametrized inputs.""" - with patch('cli_code.config.log') as mock_logger: - config_instance.config = config_state - method = getattr(config_instance, method_name) - result = method(*args) - - assert result == expected_result - - if should_log_error: - assert mock_logger.error.called or mock_logger.warning.called \ No newline at end of file diff --git a/test_dir/test_config_edge_cases.py b/test_dir/test_config_edge_cases.py deleted file mode 100644 index cba7bbb..0000000 --- a/test_dir/test_config_edge_cases.py +++ /dev/null @@ -1,401 +0,0 @@ -""" -Tests focused on edge cases in the config module to improve coverage. -""" - -import os -import tempfile -import unittest -from pathlib import Path -from unittest import TestCase, mock -from unittest.mock import patch, mock_open, MagicMock - -# Safe import with fallback for CI -try: - from cli_code.config import Config - import yaml - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Mock for CI - class Config: - def __init__(self): - self.config = {} - self.config_file = Path('/mock/config.yaml') - self.config_dir = Path('/mock') - self.env_file = Path('/mock/.env') - - yaml = MagicMock() - - -@unittest.skipIf(not IMPORTS_AVAILABLE, "Required imports not available") -class TestConfigNullHandling(TestCase): - """Tests handling of null/None values in config operations.""" - - def setUp(self): - """Set up test environment with temp directory.""" - self.temp_dir = tempfile.TemporaryDirectory() - self.temp_path = Path(self.temp_dir.name) - - # Create a mock config file path - self.config_file = self.temp_path / "config.yaml" - - # Create patches - self.patches = [] - - # Patch __init__ to avoid filesystem operations - self.patch_init = patch.object(Config, '__init__', return_value=None) - self.mock_init = self.patch_init.start() - self.patches.append(self.patch_init) - - def tearDown(self): - """Clean up test environment.""" - # Stop all patches - for p in self.patches: - p.stop() - - # Delete temp directory - self.temp_dir.cleanup() - - def test_get_default_provider_with_null_config(self): - """Test get_default_provider when config is None.""" - config = Config.__new__(Config) - config.config = None - - # Patch the method to handle null config - original_method = Config.get_default_provider - - def patched_get_default_provider(self): - if self.config is None: - return 'gemini' - return original_method(self) - - with patch.object(Config, 'get_default_provider', patched_get_default_provider): - result = config.get_default_provider() - self.assertEqual(result, 'gemini') - - def test_get_default_model_with_null_config(self): - """Test get_default_model when config is None.""" - config = Config.__new__(Config) - config.config = None - - # Patch the method to handle null config - original_method = Config.get_default_model - - def patched_get_default_model(self, provider=None): - if self.config is None: - return 'gemini-pro' - return original_method(self, provider) - - with patch.object(Config, 'get_default_model', patched_get_default_model): - result = config.get_default_model('gemini') - self.assertEqual(result, 'gemini-pro') - - def test_get_setting_with_null_config(self): - """Test get_setting when config is None.""" - config = Config.__new__(Config) - config.config = None - - # Patch the method to handle null config - original_method = Config.get_setting - - def patched_get_setting(self, setting, default=None): - if self.config is None: - return default - return original_method(self, setting, default) - - with patch.object(Config, 'get_setting', patched_get_setting): - result = config.get_setting('any-setting', 'default-value') - self.assertEqual(result, 'default-value') - - def test_get_credential_with_null_config(self): - """Test get_credential when config is None.""" - config = Config.__new__(Config) - config.config = None - - # Patch the method to handle null config - original_method = Config.get_credential - - def patched_get_credential(self, provider): - if self.config is None: - if provider == "gemini" and "CLI_CODE_GOOGLE_API_KEY" in os.environ: - return os.environ["CLI_CODE_GOOGLE_API_KEY"] - return None - return original_method(self, provider) - - with patch.dict(os.environ, {"CLI_CODE_GOOGLE_API_KEY": "env-api-key"}, clear=False): - with patch.object(Config, 'get_credential', patched_get_credential): - result = config.get_credential('gemini') - self.assertEqual(result, 'env-api-key') - - -@unittest.skipIf(not IMPORTS_AVAILABLE, "Required imports not available") -class TestConfigEdgeCases(TestCase): - """Test various edge cases in the Config class.""" - - def setUp(self): - """Set up test environment with mock paths.""" - # Create patches - self.patches = [] - - # Patch __init__ to avoid filesystem operations - self.patch_init = patch.object(Config, '__init__', return_value=None) - self.mock_init = self.patch_init.start() - self.patches.append(self.patch_init) - - def tearDown(self): - """Clean up test environment.""" - # Stop all patches - for p in self.patches: - p.stop() - - def test_config_initialize_with_no_file(self): - """Test initialization when config file doesn't exist and can't be created.""" - # Create a Config object without calling init - config = Config.__new__(Config) - - # Set up attributes normally set in __init__ - config.config = {} - config.config_file = Path('/mock/config.yaml') - config.config_dir = Path('/mock') - config.env_file = Path('/mock/.env') - - # The test should just verify that these attributes got set - self.assertEqual(config.config, {}) - self.assertEqual(str(config.config_file), '/mock/config.yaml') - - @unittest.skip("Patching os.path.expanduser with Path is tricky - skipping for now") - def test_config_path_with_env_override(self): - """Test override of config path with environment variable.""" - # Test with simpler direct assertions using Path constructor - with patch('os.path.expanduser', return_value='/default/home'): - # Using Path constructor directly to simulate what happens in the config class - config_dir = Path(os.path.expanduser("~/.config/cli-code")) - self.assertEqual(str(config_dir), '/default/home/.config/cli-code') - - # Test with environment variable override - with patch.dict(os.environ, {'CLI_CODE_CONFIG_PATH': '/custom/path'}, clear=False): - # Simulate what the constructor would do using the env var - config_path = os.environ.get('CLI_CODE_CONFIG_PATH') - self.assertEqual(config_path, '/custom/path') - - # When used in a Path constructor - config_dir = Path(config_path) - self.assertEqual(str(config_dir), '/custom/path') - - def test_env_var_config_override(self): - """Simpler test for environment variable config path override.""" - # Test that environment variables are correctly retrieved - with patch.dict(os.environ, {'CLI_CODE_CONFIG_PATH': '/custom/path'}, clear=False): - env_path = os.environ.get('CLI_CODE_CONFIG_PATH') - self.assertEqual(env_path, '/custom/path') - - # Test path conversion - path_obj = Path(env_path) - self.assertEqual(str(path_obj), '/custom/path') - - def test_load_dotenv_with_invalid_file(self): - """Test loading dotenv with invalid file content.""" - mock_env_content = "INVALID_FORMAT_NO_EQUALS\nCLI_CODE_VALID=value" - - # Create a Config object without calling init - config = Config.__new__(Config) - config.env_file = Path('/mock/.env') - - # Mock file operations - with patch('pathlib.Path.exists', return_value=True): - with patch('builtins.open', mock_open(read_data=mock_env_content)): - with patch.dict(os.environ, {}, clear=False): - # Run the method - config._load_dotenv() - - # Check that valid entry was loaded - self.assertEqual(os.environ.get('CLI_CODE_VALID'), 'value') - - def test_load_config_with_invalid_yaml(self): - """Test loading config with invalid YAML content.""" - invalid_yaml = "key: value\ninvalid: : yaml" - - # Create a Config object without calling init - config = Config.__new__(Config) - config.config_file = Path('/mock/config.yaml') - - # Mock file operations - with patch('pathlib.Path.exists', return_value=True): - with patch('builtins.open', mock_open(read_data=invalid_yaml)): - with patch('yaml.safe_load', side_effect=yaml.YAMLError("Invalid YAML")): - # Run the method - result = config._load_config() - - # Should return empty dict on error - self.assertEqual(result, {}) - - def test_save_config_with_permission_error(self): - """Test save_config when permission error occurs.""" - # Create a Config object without calling init - config = Config.__new__(Config) - config.config_file = Path('/mock/config.yaml') - config.config = {'key': 'value'} - - # Mock file operations - with patch('builtins.open', side_effect=PermissionError("Permission denied")): - with patch('cli_code.config.log') as mock_log: - # Run the method - config._save_config() - - # Check that error was logged - mock_log.error.assert_called_once() - args = mock_log.error.call_args[0] - self.assertTrue(any("Permission denied" in str(a) for a in args)) - - def test_set_credential_with_unknown_provider(self): - """Test set_credential with an unknown provider.""" - # Create a Config object without calling init - config = Config.__new__(Config) - config.config = {} - - with patch.object(Config, '_save_config') as mock_save: - # Call with unknown provider - result = config.set_credential('unknown', 'value') - - # Should not save and should implicitly return None - mock_save.assert_not_called() - self.assertIsNone(result) - - def test_set_default_model_with_unknown_provider(self): - """Test set_default_model with an unknown provider.""" - # Create a Config object without calling init - config = Config.__new__(Config) - config.config = {} - - # Let's patch get_default_provider to return a specific value - with patch.object(Config, 'get_default_provider', return_value='unknown'): - with patch.object(Config, '_save_config') as mock_save: - # This should return None/False for the unknown provider - result = config.set_default_model('model', 'unknown') - - # Save should not be called - mock_save.assert_not_called() - self.assertIsNone(result) # Implicitly returns None - - def test_get_default_model_edge_cases(self): - """Test get_default_model with various edge cases.""" - # Create a Config object without calling init - config = Config.__new__(Config) - - # Patch get_default_provider to avoid issues - with patch.object(Config, 'get_default_provider', return_value='gemini'): - # Test with empty config - config.config = {} - self.assertEqual(config.get_default_model('gemini'), "models/gemini-1.5-pro-latest") - - # Test with unknown provider directly (not using get_default_provider) - self.assertIsNone(config.get_default_model('unknown')) - - # Test with custom defaults in config - config.config = { - 'default_model': 'custom-default', - 'ollama_default_model': 'custom-ollama' - } - self.assertEqual(config.get_default_model('gemini'), 'custom-default') - self.assertEqual(config.get_default_model('ollama'), 'custom-ollama') - - def test_missing_credentials_handling(self): - """Test handling of missing credentials.""" - # Create a Config object without calling init - config = Config.__new__(Config) - config.config = {} - - # Test with empty environment and config - with patch.dict(os.environ, {}, clear=False): - self.assertIsNone(config.get_credential('gemini')) - self.assertIsNone(config.get_credential('ollama')) - - # Test with value in environment but not in config - with patch.dict(os.environ, {'CLI_CODE_GOOGLE_API_KEY': 'env-key'}, clear=False): - with patch.object(config, 'config', {'google_api_key': None}): - # Let's also patch _apply_env_vars to simulate updating config from env - with patch.object(Config, '_apply_env_vars') as mock_apply_env: - # This is just to ensure the test environment is set correctly - # In a real scenario, _apply_env_vars would have been called during init - mock_apply_env.side_effect = lambda: setattr(config, 'config', {'google_api_key': 'env-key'}) - mock_apply_env() - self.assertEqual(config.get_credential('gemini'), 'env-key') - - # Test with value in config - config.config = {'google_api_key': 'config-key'} - self.assertEqual(config.get_credential('gemini'), 'config-key') - - def test_apply_env_vars_with_different_types(self): - """Test _apply_env_vars with different types of values.""" - # Create a Config object without calling init - config = Config.__new__(Config) - config.config = {} - - # Test with different types of environment variables - with patch.dict(os.environ, { - 'CLI_CODE_GOOGLE_API_KEY': 'api-key', - 'CLI_CODE_SETTINGS_MAX_TOKENS': '1000', - 'CLI_CODE_SETTINGS_TEMPERATURE': '0.5', - 'CLI_CODE_SETTINGS_DEBUG': 'true', - 'CLI_CODE_SETTINGS_MODEL_NAME': 'gemini-pro' - }, clear=False): - # Call the method - config._apply_env_vars() - - # Check results - self.assertEqual(config.config['google_api_key'], 'api-key') - - # Check settings with different types - self.assertEqual(config.config['settings']['max_tokens'], 1000) # int - self.assertEqual(config.config['settings']['temperature'], 0.5) # float - self.assertEqual(config.config['settings']['debug'], True) # bool - self.assertEqual(config.config['settings']['model_name'], 'gemini-pro') # string - - def test_legacy_config_migration(self): - """Test migration of legacy config format.""" - # Create a Config object without calling init - config = Config.__new__(Config) - - # Create a legacy-style config (nested dicts) - config.config = { - 'gemini': { - 'api_key': 'legacy-key', - 'model': 'legacy-model' - }, - 'ollama': { - 'api_url': 'legacy-url', - 'model': 'legacy-model' - } - } - - # Manually implement config migration (simulate what _migrate_v1_to_v2 would do) - with patch.object(Config, '_save_config') as mock_save: - # Migrate gemini settings - if 'gemini' in config.config and isinstance(config.config['gemini'], dict): - gemini_config = config.config.pop('gemini') - if 'api_key' in gemini_config: - config.config['google_api_key'] = gemini_config['api_key'] - if 'model' in gemini_config: - config.config['default_model'] = gemini_config['model'] - - # Migrate ollama settings - if 'ollama' in config.config and isinstance(config.config['ollama'], dict): - ollama_config = config.config.pop('ollama') - if 'api_url' in ollama_config: - config.config['ollama_api_url'] = ollama_config['api_url'] - if 'model' in ollama_config: - config.config['ollama_default_model'] = ollama_config['model'] - - # Check that config was migrated - self.assertIn('google_api_key', config.config) - self.assertEqual(config.config['google_api_key'], 'legacy-key') - self.assertIn('default_model', config.config) - self.assertEqual(config.config['default_model'], 'legacy-model') - - self.assertIn('ollama_api_url', config.config) - self.assertEqual(config.config['ollama_api_url'], 'legacy-url') - self.assertIn('ollama_default_model', config.config) - self.assertEqual(config.config['ollama_default_model'], 'legacy-model') - - # Save should be called - mock_save.assert_not_called() # We didn't call _save_config in our test \ No newline at end of file diff --git a/test_dir/test_config_missing_methods.py b/test_dir/test_config_missing_methods.py deleted file mode 100644 index 1eeb952..0000000 --- a/test_dir/test_config_missing_methods.py +++ /dev/null @@ -1,272 +0,0 @@ -""" -Tests for Config class methods that might have been missed in existing tests. -""" - -import os -import sys -import tempfile -import pytest -from pathlib import Path -from unittest.mock import patch, mock_open, MagicMock - -# Setup proper import path -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'))) - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Try importing the required modules -try: - import yaml - from cli_code.config import Config - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - yaml = MagicMock() - # Create a dummy Config class for testing - class Config: - def __init__(self): - self.config = {} - self.config_dir = Path("/tmp") - self.config_file = self.config_dir / "config.yaml" - -# Skip tests if imports not available and not in CI -SHOULD_SKIP = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI environment" - - -@pytest.fixture -def temp_config_dir(): - """Creates a temporary directory for the config file.""" - with tempfile.TemporaryDirectory() as tmpdir: - yield Path(tmpdir) - - -@pytest.fixture -def mock_config(): - """Return a Config instance with mocked file operations.""" - with patch('cli_code.config.Config._load_dotenv', create=True), \ - patch('cli_code.config.Config._ensure_config_exists', create=True), \ - patch('cli_code.config.Config._load_config', create=True, return_value={}), \ - patch('cli_code.config.Config._apply_env_vars', create=True): - config = Config() - # Set some test data - config.config = { - "google_api_key": "test-google-key", - "default_provider": "gemini", - "default_model": "models/gemini-1.0-pro", - "ollama_api_url": "http://localhost:11434", - "ollama_default_model": "llama2", - "settings": { - "max_tokens": 1000, - "temperature": 0.7, - } - } - yield config - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_get_credential(mock_config): - """Test get_credential method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "get_credential"): - pytest.skip("get_credential method not available") - - # Test existing provider - assert mock_config.get_credential("google") == "test-google-key" - - # Test non-existing provider - assert mock_config.get_credential("non_existing") is None - - # Test with empty config - mock_config.config = {} - assert mock_config.get_credential("google") is None - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_set_credential(mock_config): - """Test set_credential method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "set_credential"): - pytest.skip("set_credential method not available") - - # Test setting existing provider - mock_config.set_credential("google", "new-google-key") - assert mock_config.config["google_api_key"] == "new-google-key" - - # Test setting new provider - mock_config.set_credential("openai", "test-openai-key") - assert mock_config.config["openai_api_key"] == "test-openai-key" - - # Test with None value - mock_config.set_credential("google", None) - assert mock_config.config["google_api_key"] is None - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_get_default_provider(mock_config): - """Test get_default_provider method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "get_default_provider"): - pytest.skip("get_default_provider method not available") - - # Test with existing provider - assert mock_config.get_default_provider() == "gemini" - - # Test with no provider set - mock_config.config["default_provider"] = None - assert mock_config.get_default_provider() == "gemini" # Should return default - - # Test with empty config - mock_config.config = {} - assert mock_config.get_default_provider() == "gemini" # Should return default - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_set_default_provider(mock_config): - """Test set_default_provider method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "set_default_provider"): - pytest.skip("set_default_provider method not available") - - # Test setting valid provider - mock_config.set_default_provider("openai") - assert mock_config.config["default_provider"] == "openai" - - # Test setting None (should use default) - mock_config.set_default_provider(None) - assert mock_config.config["default_provider"] == "gemini" - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_get_default_model(mock_config): - """Test get_default_model method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "get_default_model"): - pytest.skip("get_default_model method not available") - - # Test without provider (use default provider) - assert mock_config.get_default_model() == "models/gemini-1.0-pro" - - # Test with specific provider - assert mock_config.get_default_model("ollama") == "llama2" - - # Test with non-existing provider - assert mock_config.get_default_model("non_existing") is None - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_set_default_model(mock_config): - """Test set_default_model method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "set_default_model"): - pytest.skip("set_default_model method not available") - - # Test with default provider - mock_config.set_default_model("new-model") - assert mock_config.config["default_model"] == "new-model" - - # Test with specific provider - mock_config.set_default_model("new-ollama-model", "ollama") - assert mock_config.config["ollama_default_model"] == "new-ollama-model" - - # Test with new provider - mock_config.set_default_model("anthropic-model", "anthropic") - assert mock_config.config["anthropic_default_model"] == "anthropic-model" - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_get_setting(mock_config): - """Test get_setting method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "get_setting"): - pytest.skip("get_setting method not available") - - # Test existing setting - assert mock_config.get_setting("max_tokens") == 1000 - assert mock_config.get_setting("temperature") == 0.7 - - # Test non-existing setting with default - assert mock_config.get_setting("non_existing", "default_value") == "default_value" - - # Test with empty settings - mock_config.config["settings"] = {} - assert mock_config.get_setting("max_tokens", 2000) == 2000 - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_set_setting(mock_config): - """Test set_setting method.""" - # Skip if not available and not in CI - if not hasattr(mock_config, "set_setting"): - pytest.skip("set_setting method not available") - - # Test updating existing setting - mock_config.set_setting("max_tokens", 2000) - assert mock_config.config["settings"]["max_tokens"] == 2000 - - # Test adding new setting - mock_config.set_setting("new_setting", "new_value") - assert mock_config.config["settings"]["new_setting"] == "new_value" - - # Test with no settings dict - mock_config.config.pop("settings") - mock_config.set_setting("test_setting", "test_value") - assert mock_config.config["settings"]["test_setting"] == "test_value" - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_save_config(): - """Test _save_config method.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Required imports not available") - - with patch('builtins.open', mock_open()) as mock_file, \ - patch('yaml.dump') as mock_yaml_dump, \ - patch('cli_code.config.Config._load_dotenv', create=True), \ - patch('cli_code.config.Config._ensure_config_exists', create=True), \ - patch('cli_code.config.Config._load_config', create=True, return_value={}), \ - patch('cli_code.config.Config._apply_env_vars', create=True): - - config = Config() - if not hasattr(config, "_save_config"): - pytest.skip("_save_config method not available") - - config.config = {"test": "data"} - config._save_config() - - mock_file.assert_called_once() - mock_yaml_dump.assert_called_once_with({"test": "data"}, mock_file(), default_flow_style=False) - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_yaml -def test_save_config_error(): - """Test error handling in _save_config method.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Required imports not available") - - with patch('builtins.open', side_effect=PermissionError("Permission denied")), \ - patch('cli_code.config.log.error', create=True) as mock_log_error, \ - patch('cli_code.config.Config._load_dotenv', create=True), \ - patch('cli_code.config.Config._ensure_config_exists', create=True), \ - patch('cli_code.config.Config._load_config', create=True, return_value={}), \ - patch('cli_code.config.Config._apply_env_vars', create=True): - - config = Config() - if not hasattr(config, "_save_config"): - pytest.skip("_save_config method not available") - - config._save_config() - - # Verify error was logged - assert mock_log_error.called \ No newline at end of file diff --git a/test_dir/test_directory_tools.py b/test_dir/test_directory_tools.py deleted file mode 100644 index 745bef8..0000000 --- a/test_dir/test_directory_tools.py +++ /dev/null @@ -1,263 +0,0 @@ -""" -Tests for directory tools module. -""" -import os -import subprocess -import pytest -from unittest.mock import patch, MagicMock, mock_open - -# Direct import for coverage tracking -import src.cli_code.tools.directory_tools -from src.cli_code.tools.directory_tools import CreateDirectoryTool, LsTool - - -def test_create_directory_tool_init(): - """Test CreateDirectoryTool initialization.""" - tool = CreateDirectoryTool() - assert tool.name == "create_directory" - assert "Creates a new directory" in tool.description - - -@patch("os.path.exists") -@patch("os.path.isdir") -@patch("os.makedirs") -def test_create_directory_success(mock_makedirs, mock_isdir, mock_exists): - """Test successful directory creation.""" - # Configure mocks - mock_exists.return_value = False - - # Create tool and execute - tool = CreateDirectoryTool() - result = tool.execute("new_directory") - - # Verify - assert "Successfully created directory" in result - mock_makedirs.assert_called_once() - - -@patch("os.path.exists") -@patch("os.path.isdir") -def test_create_directory_already_exists(mock_isdir, mock_exists): - """Test handling when directory already exists.""" - # Configure mocks - mock_exists.return_value = True - mock_isdir.return_value = True - - # Create tool and execute - tool = CreateDirectoryTool() - result = tool.execute("existing_directory") - - # Verify - assert "Directory already exists" in result - - -@patch("os.path.exists") -@patch("os.path.isdir") -def test_create_directory_path_not_dir(mock_isdir, mock_exists): - """Test handling when path exists but is not a directory.""" - # Configure mocks - mock_exists.return_value = True - mock_isdir.return_value = False - - # Create tool and execute - tool = CreateDirectoryTool() - result = tool.execute("not_a_directory") - - # Verify - assert "Path exists but is not a directory" in result - - -def test_create_directory_parent_access(): - """Test blocking access to parent directories.""" - tool = CreateDirectoryTool() - result = tool.execute("../outside_directory") - - # Verify - assert "Invalid path" in result - assert "Cannot access parent directories" in result - - -@patch("os.makedirs") -def test_create_directory_os_error(mock_makedirs): - """Test handling of OSError during directory creation.""" - # Configure mock to raise OSError - mock_makedirs.side_effect = OSError("Permission denied") - - # Create tool and execute - tool = CreateDirectoryTool() - result = tool.execute("protected_directory") - - # Verify - assert "Error creating directory" in result - assert "Permission denied" in result - - -@patch("os.makedirs") -def test_create_directory_unexpected_error(mock_makedirs): - """Test handling of unexpected errors during directory creation.""" - # Configure mock to raise an unexpected error - mock_makedirs.side_effect = ValueError("Unexpected error") - - # Create tool and execute - tool = CreateDirectoryTool() - result = tool.execute("problem_directory") - - # Verify - assert "Error creating directory" in result - - -def test_ls_tool_init(): - """Test LsTool initialization.""" - tool = LsTool() - assert tool.name == "ls" - assert "Lists the contents of a specified directory" in tool.description - assert isinstance(tool.args_schema, dict) - assert "path" in tool.args_schema - - -@patch("subprocess.run") -def test_ls_success(mock_run): - """Test successful directory listing.""" - # Configure mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "total 12\ndrwxr-xr-x 2 user group 4096 Jan 1 10:00 folder1\n-rw-r--r-- 1 user group 1234 Jan 1 10:00 file1.txt" - mock_run.return_value = mock_process - - # Create tool and execute - tool = LsTool() - result = tool.execute("test_dir") - - # Verify - assert "folder1" in result - assert "file1.txt" in result - mock_run.assert_called_once() - assert mock_run.call_args[0][0] == ["ls", "-lA", "test_dir"] - - -@patch("subprocess.run") -def test_ls_default_dir(mock_run): - """Test ls with default directory.""" - # Configure mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "listing content" - mock_run.return_value = mock_process - - # Create tool and execute with no path - tool = LsTool() - result = tool.execute() - - # Verify default directory used - mock_run.assert_called_once() - assert mock_run.call_args[0][0] == ["ls", "-lA", "."] - - -def test_ls_invalid_path(): - """Test ls with path attempting to access parent directory.""" - tool = LsTool() - result = tool.execute("../outside_dir") - - # Verify - assert "Invalid path" in result - assert "Cannot access parent directories" in result - - -@patch("subprocess.run") -def test_ls_directory_not_found(mock_run): - """Test handling when directory is not found.""" - # Configure mock - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.stderr = "ls: cannot access 'nonexistent_dir': No such file or directory" - mock_run.return_value = mock_process - - # Create tool and execute - tool = LsTool() - result = tool.execute("nonexistent_dir") - - # Verify - assert "Directory not found" in result - - -@patch("subprocess.run") -def test_ls_truncate_long_output(mock_run): - """Test truncation of long directory listings.""" - # Create a long listing (more than 100 lines) - long_listing = "\n".join([f"file{i}.txt" for i in range(150)]) - - # Configure mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = long_listing - mock_run.return_value = mock_process - - # Create tool and execute - tool = LsTool() - result = tool.execute("big_dir") - - # Verify truncation - assert "output truncated" in result - # Should only have 101 lines (100 files + truncation message) - assert len(result.splitlines()) <= 101 - - -@patch("subprocess.run") -def test_ls_generic_error(mock_run): - """Test handling of generic errors.""" - # Configure mock - mock_process = MagicMock() - mock_process.returncode = 2 - mock_process.stderr = "ls: some generic error" - mock_run.return_value = mock_process - - # Create tool and execute - tool = LsTool() - result = tool.execute("problem_dir") - - # Verify - assert "Error executing ls command" in result - assert "Code: 2" in result - - -@patch("subprocess.run") -def test_ls_command_not_found(mock_run): - """Test handling when ls command is not found.""" - # Configure mock - mock_run.side_effect = FileNotFoundError("No such file or directory: 'ls'") - - # Create tool and execute - tool = LsTool() - result = tool.execute() - - # Verify - assert "'ls' command not found" in result - - -@patch("subprocess.run") -def test_ls_timeout(mock_run): - """Test handling of ls command timeout.""" - # Configure mock - mock_run.side_effect = subprocess.TimeoutExpired(cmd="ls", timeout=15) - - # Create tool and execute - tool = LsTool() - result = tool.execute() - - # Verify - assert "ls command timed out" in result - - -@patch("subprocess.run") -def test_ls_unexpected_error(mock_run): - """Test handling of unexpected errors during ls command.""" - # Configure mock - mock_run.side_effect = Exception("Something unexpected happened") - - # Create tool and execute - tool = LsTool() - result = tool.execute() - - # Verify - assert "An unexpected error occurred" in result - assert "Something unexpected happened" in result \ No newline at end of file diff --git a/test_dir/test_file_tools.py b/test_dir/test_file_tools.py deleted file mode 100644 index 8eb39da..0000000 --- a/test_dir/test_file_tools.py +++ /dev/null @@ -1,436 +0,0 @@ -""" -Tests for file tools module to improve code coverage. -""" -import os -import tempfile -import pytest -from unittest.mock import patch, MagicMock, mock_open - -# Direct import for coverage tracking -import src.cli_code.tools.file_tools -from src.cli_code.tools.file_tools import ViewTool, EditTool, GrepTool, GlobTool - - -@pytest.fixture -def temp_file(): - """Create a temporary file for testing.""" - with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp: - temp.write("Line 1\nLine 2\nLine 3\nTest pattern\nLine 5\n") - temp_name = temp.name - - yield temp_name - - # Clean up - if os.path.exists(temp_name): - os.unlink(temp_name) - - -@pytest.fixture -def temp_dir(): - """Create a temporary directory for testing.""" - temp_dir = tempfile.mkdtemp() - - # Create some test files in the temp directory - for i in range(3): - file_path = os.path.join(temp_dir, f"test_file_{i}.txt") - with open(file_path, "w") as f: - f.write(f"Content for file {i}\nTest pattern in file {i}\n") - - # Create a subdirectory with files - subdir = os.path.join(temp_dir, "subdir") - os.makedirs(subdir) - with open(os.path.join(subdir, "subfile.txt"), "w") as f: - f.write("Content in subdirectory\n") - - yield temp_dir - - # Clean up is handled by pytest - - -# ViewTool Tests -def test_view_tool_init(): - """Test ViewTool initialization.""" - tool = ViewTool() - assert tool.name == "view" - assert "View specific sections" in tool.description - - -def test_view_entire_file(temp_file): - """Test viewing an entire file.""" - tool = ViewTool() - result = tool.execute(temp_file) - - assert "Full Content" in result - assert "Line 1" in result - assert "Line 5" in result - - -def test_view_with_offset_limit(temp_file): - """Test viewing a specific section of a file.""" - tool = ViewTool() - result = tool.execute(temp_file, offset=2, limit=2) - - assert "Lines 2-3" in result - assert "Line 2" in result - assert "Line 3" in result - assert "Line 1" not in result - assert "Line 5" not in result - - -def test_view_file_not_found(): - """Test viewing a non-existent file.""" - tool = ViewTool() - result = tool.execute("nonexistent_file.txt") - - assert "Error: File not found" in result - - -def test_view_directory(): - """Test attempting to view a directory.""" - tool = ViewTool() - result = tool.execute(os.path.dirname(__file__)) - - assert "Error: Cannot view a directory" in result - - -def test_view_parent_directory_traversal(): - """Test attempting to access parent directory.""" - tool = ViewTool() - result = tool.execute("../outside_file.txt") - - assert "Error: Invalid file path" in result - assert "Cannot access parent directories" in result - - -@patch("os.path.getsize") -def test_view_large_file_without_offset(mock_getsize, temp_file): - """Test viewing a large file without offset/limit.""" - # Mock file size to exceed the limit - mock_getsize.return_value = 60 * 1024 # Greater than MAX_CHARS_FOR_FULL_CONTENT - - tool = ViewTool() - result = tool.execute(temp_file) - - assert "Error: File" in result - assert "is large" in result - assert "summarize_code" in result - - -def test_view_empty_file(): - """Test viewing an empty file.""" - with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp: - temp_name = temp.name - - try: - tool = ViewTool() - result = tool.execute(temp_name) - - assert "Full Content" in result - assert "File is empty" in result - finally: - os.unlink(temp_name) - - -@patch("os.path.exists") -@patch("os.path.isfile") -@patch("os.path.getsize") -@patch("builtins.open") -def test_view_with_exception(mock_open, mock_getsize, mock_isfile, mock_exists): - """Test handling exceptions during file viewing.""" - # Configure mocks to pass initial checks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = 100 # Small file - mock_open.side_effect = Exception("Test error") - - tool = ViewTool() - result = tool.execute("some_file.txt") - - assert "Error viewing file" in result - # The error message may include the exception details - # Just check for a generic error message - assert "error" in result.lower() - - -# EditTool Tests -def test_edit_tool_init(): - """Test EditTool initialization.""" - tool = EditTool() - assert tool.name == "edit" - assert "Edit or create a file" in tool.description - - -def test_edit_create_new_file_with_content(): - """Test creating a new file with content.""" - with tempfile.TemporaryDirectory() as temp_dir: - file_path = os.path.join(temp_dir, "new_file.txt") - - tool = EditTool() - result = tool.execute(file_path, content="Test content") - - assert "Successfully wrote content" in result - - # Verify the file was created with correct content - with open(file_path, "r") as f: - content = f.read() - - assert content == "Test content" - - -def test_edit_existing_file_with_content(temp_file): - """Test overwriting an existing file with new content.""" - tool = EditTool() - result = tool.execute(temp_file, content="New content") - - assert "Successfully wrote content" in result - - # Verify the file was overwritten - with open(temp_file, "r") as f: - content = f.read() - - assert content == "New content" - - -def test_edit_replace_string(temp_file): - """Test replacing a string in a file.""" - tool = EditTool() - result = tool.execute(temp_file, old_string="Line 3", new_string="Modified Line 3") - - assert "Successfully replaced first occurrence" in result - - # Verify the replacement - with open(temp_file, "r") as f: - content = f.read() - - assert "Modified Line 3" in content - # This may fail if the implementation doesn't do an exact match - # Let's check that "Line 3" was replaced rather than the count - assert "Line 1" in content - assert "Line 2" in content - assert "Line 3" not in content or "Modified Line 3" in content - - -def test_edit_delete_string(temp_file): - """Test deleting a string from a file.""" - tool = EditTool() - result = tool.execute(temp_file, old_string="Line 3\n", new_string="") - - assert "Successfully deleted first occurrence" in result - - # Verify the deletion - with open(temp_file, "r") as f: - content = f.read() - - assert "Line 3" not in content - - -def test_edit_string_not_found(temp_file): - """Test replacing a string that doesn't exist.""" - tool = EditTool() - result = tool.execute(temp_file, old_string="NonExistentString", new_string="Replacement") - - assert "Error: `old_string` not found" in result - - -def test_edit_create_empty_file(): - """Test creating an empty file.""" - with tempfile.TemporaryDirectory() as temp_dir: - file_path = os.path.join(temp_dir, "empty_file.txt") - - tool = EditTool() - result = tool.execute(file_path) - - assert "Successfully created/emptied file" in result - - # Verify the file was created and is empty - assert os.path.exists(file_path) - assert os.path.getsize(file_path) == 0 - - -def test_edit_replace_in_nonexistent_file(): - """Test replacing text in a non-existent file.""" - tool = EditTool() - result = tool.execute("nonexistent_file.txt", old_string="old", new_string="new") - - assert "Error: File not found for replacement" in result - - -def test_edit_invalid_arguments(): - """Test edit with invalid argument combinations.""" - tool = EditTool() - result = tool.execute("test.txt", old_string="test") - - assert "Error: Invalid arguments" in result - - -def test_edit_parent_directory_traversal(): - """Test attempting to edit a file with parent directory traversal.""" - tool = EditTool() - result = tool.execute("../outside_file.txt", content="test") - - assert "Error: Invalid file path" in result - - -def test_edit_directory(): - """Test attempting to edit a directory.""" - tool = EditTool() - with patch("builtins.open", side_effect=IsADirectoryError("Is a directory")): - result = tool.execute("test_dir", content="test") - - assert "Error: Cannot edit a directory" in result - - -@patch("os.path.exists") -@patch("os.path.dirname") -@patch("os.makedirs") -def test_edit_create_in_new_directory(mock_makedirs, mock_dirname, mock_exists): - """Test creating a file in a non-existent directory.""" - # Setup mocks - mock_exists.return_value = False - mock_dirname.return_value = "/test/path" - - with patch("builtins.open", mock_open()) as mock_file: - tool = EditTool() - result = tool.execute("/test/path/file.txt", content="test content") - - # Verify directory was created - mock_makedirs.assert_called_once() - assert "Successfully wrote content" in result - - -def test_edit_with_exception(): - """Test handling exceptions during file editing.""" - with patch("builtins.open", side_effect=Exception("Test error")): - tool = EditTool() - result = tool.execute("test.txt", content="test") - - assert "Error editing file" in result - assert "Test error" in result - - -# GrepTool Tests -def test_grep_tool_init(): - """Test GrepTool initialization.""" - tool = GrepTool() - assert tool.name == "grep" - assert "Search for a pattern" in tool.description - - -def test_grep_matches(temp_dir): - """Test finding matches with grep.""" - tool = GrepTool() - result = tool.execute(pattern="Test pattern", path=temp_dir) - - # The actual output format may depend on implementation - assert "test_file_0.txt" in result - assert "test_file_1.txt" in result - assert "test_file_2.txt" in result - assert "Test pattern" in result - - -def test_grep_no_matches(temp_dir): - """Test grep with no matches.""" - tool = GrepTool() - result = tool.execute(pattern="NonExistentPattern", path=temp_dir) - - assert "No matches found" in result - - -def test_grep_with_include(temp_dir): - """Test grep with include filter.""" - tool = GrepTool() - result = tool.execute(pattern="Test pattern", path=temp_dir, include="*_1.txt") - - # The actual output format may depend on implementation - assert "test_file_1.txt" in result - assert "Test pattern" in result - assert "test_file_0.txt" not in result - assert "test_file_2.txt" not in result - - -def test_grep_invalid_path(): - """Test grep with an invalid path.""" - tool = GrepTool() - result = tool.execute(pattern="test", path="../outside") - - assert "Error: Invalid path" in result - - -def test_grep_not_a_directory(): - """Test grep on a file instead of a directory.""" - with tempfile.NamedTemporaryFile() as temp_file: - tool = GrepTool() - result = tool.execute(pattern="test", path=temp_file.name) - - assert "Error: Path is not a directory" in result - - -def test_grep_invalid_regex(): - """Test grep with an invalid regex.""" - tool = GrepTool() - result = tool.execute(pattern="[", path=".") - - assert "Error: Invalid regex pattern" in result - - -# GlobTool Tests -def test_glob_tool_init(): - """Test GlobTool initialization.""" - tool = GlobTool() - assert tool.name == "glob" - assert "Find files/directories matching" in tool.description - - -@patch("glob.glob") -def test_glob_find_files(mock_glob, temp_dir): - """Test finding files with glob.""" - # Mock glob to return all files including subdirectory - mock_paths = [ - os.path.join(temp_dir, "test_file_0.txt"), - os.path.join(temp_dir, "test_file_1.txt"), - os.path.join(temp_dir, "test_file_2.txt"), - os.path.join(temp_dir, "subdir", "subfile.txt") - ] - mock_glob.return_value = mock_paths - - tool = GlobTool() - result = tool.execute(pattern="*.txt", path=temp_dir) - - # Check for all files - for file_path in mock_paths: - assert os.path.basename(file_path) in result - - -def test_glob_no_matches(temp_dir): - """Test glob with no matches.""" - tool = GlobTool() - result = tool.execute(pattern="*.jpg", path=temp_dir) - - assert "No files or directories found" in result - - -def test_glob_invalid_path(): - """Test glob with an invalid path.""" - tool = GlobTool() - result = tool.execute(pattern="*.txt", path="../outside") - - assert "Error: Invalid path" in result - - -def test_glob_not_a_directory(): - """Test glob with a file instead of a directory.""" - with tempfile.NamedTemporaryFile() as temp_file: - tool = GlobTool() - result = tool.execute(pattern="*", path=temp_file.name) - - assert "Error: Path is not a directory" in result - - -def test_glob_with_exception(): - """Test handling exceptions during glob.""" - with patch("glob.glob", side_effect=Exception("Test error")): - tool = GlobTool() - result = tool.execute(pattern="*.txt") - - assert "Error finding files" in result - assert "Test error" in result \ No newline at end of file diff --git a/test_dir/test_gemini_model.py b/test_dir/test_gemini_model.py deleted file mode 100644 index 28fc71f..0000000 --- a/test_dir/test_gemini_model.py +++ /dev/null @@ -1,377 +0,0 @@ -""" -Tests specifically for the GeminiModel class to improve code coverage. -""" - -import os -import json -import sys -import unittest -from unittest.mock import patch, MagicMock, mock_open, call -import pytest -from pathlib import Path - -# Add the src directory to the path for imports -sys.path.insert(0, str(Path(__file__).parent.parent)) - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Handle imports -try: - from rich.console import Console - import google.generativeai as genai - from src.cli_code.models.gemini import GeminiModel - from src.cli_code.tools.base import BaseTool - from src.cli_code.tools import AVAILABLE_TOOLS - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type checking - GeminiModel = MagicMock - Console = MagicMock - genai = MagicMock - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestGeminiModel: - """Test suite for GeminiModel class, focusing on previously uncovered methods.""" - - def setup_method(self): - """Set up test fixtures.""" - # Mock genai module - self.genai_configure_patch = patch('google.generativeai.configure') - self.mock_genai_configure = self.genai_configure_patch.start() - - self.genai_model_patch = patch('google.generativeai.GenerativeModel') - self.mock_genai_model_class = self.genai_model_patch.start() - self.mock_model_instance = MagicMock() - self.mock_genai_model_class.return_value = self.mock_model_instance - - self.genai_list_models_patch = patch('google.generativeai.list_models') - self.mock_genai_list_models = self.genai_list_models_patch.start() - - # Mock console - self.mock_console = MagicMock(spec=Console) - - # Keep get_tool patch here if needed by other tests, or move into tests - self.get_tool_patch = patch('src.cli_code.models.gemini.get_tool') - self.mock_get_tool = self.get_tool_patch.start() - # Configure default mock tool behavior if needed by other tests - self.mock_tool = MagicMock() - self.mock_tool.execute.return_value = "Default tool output" - self.mock_get_tool.return_value = self.mock_tool - - def teardown_method(self): - """Tear down test fixtures.""" - self.genai_configure_patch.stop() - self.genai_model_patch.stop() - self.genai_list_models_patch.stop() - # REMOVED stops for os/glob/open mocks - self.get_tool_patch.stop() - - def test_initialization(self): - """Test initialization of GeminiModel.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Check if genai was configured correctly - self.mock_genai_configure.assert_called_once_with(api_key="fake-api-key") - - # Check if model instance was created correctly - self.mock_genai_model_class.assert_called_once() - assert model.api_key == "fake-api-key" - assert model.current_model_name == "gemini-2.5-pro-exp-03-25" - - # Check history initialization - assert len(model.history) == 2 # System prompt and initial model response - - def test_initialize_model_instance(self): - """Test model instance initialization.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Call the method directly to test - model._initialize_model_instance() - - # Verify model was created with correct parameters - self.mock_genai_model_class.assert_called_with( - model_name="gemini-2.5-pro-exp-03-25", - generation_config=model.generation_config, - safety_settings=model.safety_settings, - system_instruction=model.system_instruction - ) - - def test_list_models(self): - """Test listing available models.""" - # Set up mock response - mock_model1 = MagicMock() - mock_model1.name = "models/gemini-pro" - mock_model1.display_name = "Gemini Pro" - mock_model1.description = "A powerful model" - mock_model1.supported_generation_methods = ["generateContent"] - - mock_model2 = MagicMock() - mock_model2.name = "models/gemini-2.5-pro-exp-03-25" - mock_model2.display_name = "Gemini 2.5 Pro" - mock_model2.description = "An experimental model" - mock_model2.supported_generation_methods = ["generateContent"] - - self.mock_genai_list_models.return_value = [mock_model1, mock_model2] - - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - result = model.list_models() - - # Verify list_models was called - self.mock_genai_list_models.assert_called_once() - - # Verify result format - assert len(result) == 2 - assert result[0]["id"] == "models/gemini-pro" - assert result[0]["name"] == "Gemini Pro" - assert result[1]["id"] == "models/gemini-2.5-pro-exp-03-25" - - def test_get_initial_context_with_rules_dir(self, tmp_path): - """Test getting initial context from .rules directory using tmp_path.""" - # Arrange: Create .rules dir and files - rules_dir = tmp_path / ".rules" - rules_dir.mkdir() - (rules_dir / "context.md").write_text("# Rule context") - (rules_dir / "tools.md").write_text("# Rule tools") - - original_cwd = os.getcwd() - os.chdir(tmp_path) - - # Act - # Create model instance within the test CWD context - model = GeminiModel("fake-api-key", self.mock_console, "gemini-pro") - context = model._get_initial_context() - - # Teardown - os.chdir(original_cwd) - - # Assert - assert "Project rules and guidelines:" in context - assert "# Content from context.md" in context - assert "# Rule context" in context - assert "# Content from tools.md" in context - assert "# Rule tools" in context - - def test_get_initial_context_with_readme(self, tmp_path): - """Test getting initial context from README.md using tmp_path.""" - # Arrange: Create README.md - readme_content = "# Project Readme Content" - (tmp_path / "README.md").write_text(readme_content) - - original_cwd = os.getcwd() - os.chdir(tmp_path) - - # Act - model = GeminiModel("fake-api-key", self.mock_console, "gemini-pro") - context = model._get_initial_context() - - # Teardown - os.chdir(original_cwd) - - # Assert - assert "Project README:" in context - assert readme_content in context - - def test_get_initial_context_with_ls_fallback(self, tmp_path): - """Test getting initial context via ls fallback using tmp_path.""" - # Arrange: tmp_path is empty - (tmp_path / "dummy_for_ls.txt").touch() # Add a file for ls to find - - mock_ls_tool = MagicMock() - ls_output = "dummy_for_ls.txt\n" - mock_ls_tool.execute.return_value = ls_output - - original_cwd = os.getcwd() - os.chdir(tmp_path) - - # Act: Patch get_tool locally - # Note: GeminiModel imports get_tool directly - with patch('src.cli_code.models.gemini.get_tool') as mock_get_tool: - mock_get_tool.return_value = mock_ls_tool - model = GeminiModel("fake-api-key", self.mock_console, "gemini-pro") - context = model._get_initial_context() - - # Teardown - os.chdir(original_cwd) - - # Assert - mock_get_tool.assert_called_once_with("ls") - mock_ls_tool.execute.assert_called_once() - assert "Current directory contents" in context - assert ls_output in context - - def test_create_tool_definitions(self): - """Test creation of tool definitions for Gemini.""" - # Create a mock for AVAILABLE_TOOLS - with patch('src.cli_code.models.gemini.AVAILABLE_TOOLS', new={ - "test_tool": MagicMock() - }): - # Mock the tool instance that will be created - mock_tool_instance = MagicMock() - mock_tool_instance.get_function_declaration.return_value = { - "name": "test_tool", - "description": "A test tool", - "parameters": { - "param1": {"type": "string", "description": "A string parameter"}, - "param2": {"type": "integer", "description": "An integer parameter"} - }, - "required": ["param1"] - } - - # Mock the tool class to return our mock instance - mock_tool_class = MagicMock(return_value=mock_tool_instance) - - # Update the mocked AVAILABLE_TOOLS - with patch('src.cli_code.models.gemini.AVAILABLE_TOOLS', new={ - "test_tool": mock_tool_class - }): - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - tools = model._create_tool_definitions() - - # Verify tools format - assert len(tools) == 1 - assert tools[0]["name"] == "test_tool" - assert "description" in tools[0] - assert "parameters" in tools[0] - - def test_create_system_prompt(self): - """Test creation of system prompt.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - prompt = model._create_system_prompt() - - # Verify prompt contains expected content - assert "function calling capabilities" in prompt - assert "System Prompt for CLI-Code" in prompt - - def test_manage_context_window(self): - """Test context window management.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Add many messages to force context truncation - for i in range(30): - model.add_to_history({"role": "user", "parts": [f"Test message {i}"]}) - model.add_to_history({"role": "model", "parts": [f"Test response {i}"]}) - - # Record initial length - initial_length = len(model.history) - - # Call context management - model._manage_context_window() - - # Verify history was truncated - assert len(model.history) < initial_length - - def test_extract_text_from_response(self): - """Test extracting text from Gemini response.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Create mock response with text - mock_response = MagicMock() - mock_response.parts = [{"text": "Response text"}] - - # Extract text - result = model._extract_text_from_response(mock_response) - - # Verify extraction - assert result == "Response text" - - def test_find_last_model_text(self): - """Test finding last model text in history.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Clear history - model.history = [] - - # Add history entries - model.add_to_history({"role": "user", "parts": ["User message 1"]}) - model.add_to_history({"role": "model", "parts": ["Model response 1"]}) - model.add_to_history({"role": "user", "parts": ["User message 2"]}) - model.add_to_history({"role": "model", "parts": ["Model response 2"]}) - - # Find last model text - result = model._find_last_model_text(model.history) - - # Verify result - assert result == "Model response 2" - - def test_add_to_history(self): - """Test adding messages to history.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Clear history - model.history = [] - - # Add a message - entry = {"role": "user", "parts": ["Test message"]} - model.add_to_history(entry) - - # Verify message was added - assert len(model.history) == 1 - assert model.history[0] == entry - - def test_clear_history(self): - """Test clearing history.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Add a message - model.add_to_history({"role": "user", "parts": ["Test message"]}) - - # Clear history - model.clear_history() - - # Verify history was cleared - assert len(model.history) == 0 - - def test_get_help_text(self): - """Test getting help text.""" - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - help_text = model._get_help_text() - - # Verify help text content - assert "CLI-Code Assistant Help" in help_text - assert "Commands" in help_text - - def test_generate_with_function_calls(self): - """Test generate method with function calls.""" - # Set up mock response with function call - mock_response = MagicMock() - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content = MagicMock() - mock_response.candidates[0].content.parts = [ - { - "functionCall": { - "name": "test_tool", - "args": {"param1": "value1"} - } - } - ] - mock_response.candidates[0].finish_reason = "FUNCTION_CALL" - - # Set up model instance to return the mock response - self.mock_model_instance.generate_content.return_value = mock_response - - # Mock tool execution - tool_mock = MagicMock() - tool_mock.execute.return_value = "Tool execution result" - self.mock_get_tool.return_value = tool_mock - - # Create model - model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - # Call generate - result = model.generate("Test prompt") - - # Verify model was called - self.mock_model_instance.generate_content.assert_called() - - # Verify tool execution - tool_mock.execute.assert_called_with(param1="value1") - - # There should be a second call to generate_content with the tool result - assert self.mock_model_instance.generate_content.call_count >= 2 \ No newline at end of file diff --git a/test_dir/test_gemini_model_advanced.py b/test_dir/test_gemini_model_advanced.py deleted file mode 100644 index 29d9785..0000000 --- a/test_dir/test_gemini_model_advanced.py +++ /dev/null @@ -1,324 +0,0 @@ -""" -Tests specifically for the GeminiModel class targeting advanced scenarios and edge cases -to improve code coverage on complex methods like generate(). -""" - -import os -import json -import sys -from unittest.mock import patch, MagicMock, mock_open, call, ANY -import pytest - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Handle imports -try: - from cli_code.models.gemini import GeminiModel, MAX_AGENT_ITERATIONS - from rich.console import Console - import google.generativeai as genai - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type checking - GeminiModel = MagicMock - Console = MagicMock - genai = MagicMock - MAX_AGENT_ITERATIONS = 10 - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestGeminiModelAdvanced: - """Test suite for GeminiModel class focusing on complex methods and edge cases.""" - - def setup_method(self): - """Set up test fixtures.""" - # Mock genai module - self.genai_configure_patch = patch('google.generativeai.configure') - self.mock_genai_configure = self.genai_configure_patch.start() - - self.genai_model_patch = patch('google.generativeai.GenerativeModel') - self.mock_genai_model_class = self.genai_model_patch.start() - self.mock_model_instance = MagicMock() - self.mock_genai_model_class.return_value = self.mock_model_instance - - # Mock console - self.mock_console = MagicMock(spec=Console) - - # Mock tool-related components - self.get_tool_patch = patch('cli_code.models.gemini.get_tool') - self.mock_get_tool = self.get_tool_patch.start() - - # Default tool mock - self.mock_tool = MagicMock() - self.mock_tool.execute.return_value = "Tool execution result" - self.mock_get_tool.return_value = self.mock_tool - - # Mock initial context method to avoid complexity - self.get_initial_context_patch = patch.object( - GeminiModel, '_get_initial_context', return_value="Initial context") - self.mock_get_initial_context = self.get_initial_context_patch.start() - - # Create model instance - self.model = GeminiModel("fake-api-key", self.mock_console, "gemini-2.5-pro-exp-03-25") - - def teardown_method(self): - """Tear down test fixtures.""" - self.genai_configure_patch.stop() - self.genai_model_patch.stop() - self.get_tool_patch.stop() - self.get_initial_context_patch.stop() - - def test_generate_command_handling(self): - """Test command handling in generate method.""" - # Test /exit command - result = self.model.generate("/exit") - assert result is None - - # Test /help command - result = self.model.generate("/help") - assert "Commands available" in result - - def test_generate_with_text_response(self): - """Test generate method with a simple text response.""" - # Mock the LLM response to return a simple text - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - mock_text_part = MagicMock() - - mock_text_part.text = "This is a simple text response." - mock_content.parts = [mock_text_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - self.mock_model_instance.generate_content.return_value = mock_response - - # Call generate - result = self.model.generate("Tell me something interesting") - - # Verify calls - self.mock_model_instance.generate_content.assert_called_once() - assert "This is a simple text response." in result - - def test_generate_with_function_call(self): - """Test generate method with a function call response.""" - # Set up mock response with function call - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - - # Create function call part - mock_function_part = MagicMock() - mock_function_part.text = None - mock_function_part.function_call = MagicMock() - mock_function_part.function_call.name = "ls" - mock_function_part.function_call.args = {"dir": "."} - - # Create text part for after function execution - mock_text_part = MagicMock() - mock_text_part.text = "Here are the directory contents." - - mock_content.parts = [mock_function_part, mock_text_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - # Set initial response - self.mock_model_instance.generate_content.return_value = mock_response - - # Create a second response for after function execution - mock_response2 = MagicMock() - mock_candidate2 = MagicMock() - mock_content2 = MagicMock() - mock_text_part2 = MagicMock() - - mock_text_part2.text = "Function executed successfully. Here's the result." - mock_content2.parts = [mock_text_part2] - mock_candidate2.content = mock_content2 - mock_response2.candidates = [mock_candidate2] - - # Set up mock to return different responses on successive calls - self.mock_model_instance.generate_content.side_effect = [mock_response, mock_response2] - - # Call generate - result = self.model.generate("List the files in this directory") - - # Verify tool was looked up and executed - self.mock_get_tool.assert_called_with("ls") - self.mock_tool.execute.assert_called_once() - - # Verify final response - assert "Function executed successfully" in result - - def test_generate_task_complete_tool(self): - """Test generate method with task_complete tool call.""" - # Set up mock response with task_complete function call - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - - # Create function call part - mock_function_part = MagicMock() - mock_function_part.text = None - mock_function_part.function_call = MagicMock() - mock_function_part.function_call.name = "task_complete" - mock_function_part.function_call.args = {"summary": "Task completed successfully!"} - - mock_content.parts = [mock_function_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - # Set the response - self.mock_model_instance.generate_content.return_value = mock_response - - # Call generate - result = self.model.generate("Complete this task") - - # Verify result contains the summary - assert "Task completed successfully!" in result - - def test_generate_with_empty_candidates(self): - """Test generate method with empty candidates response.""" - # Mock response with no candidates - mock_response = MagicMock() - mock_response.candidates = [] - - self.mock_model_instance.generate_content.return_value = mock_response - - # Call generate - result = self.model.generate("Generate something") - - # Verify error handling - assert "(Agent received response with no candidates)" in result - - def test_generate_with_empty_content(self): - """Test generate method with empty content in candidate.""" - # Mock response with empty content - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_candidate.content = None - mock_response.candidates = [mock_candidate] - - self.mock_model_instance.generate_content.return_value = mock_response - - # Call generate - result = self.model.generate("Generate something") - - # Verify error handling - assert "(Agent received response candidate with no content/parts)" in result - - def test_generate_with_api_error(self): - """Test generate method when API throws an error.""" - # Mock API error - api_error_message = "API Error" - self.mock_model_instance.generate_content.side_effect = Exception(api_error_message) - - # Call generate - result = self.model.generate("Generate something") - - # Verify error handling with specific assertions - assert "Error calling Gemini API:" in result - assert api_error_message in result - - def test_generate_max_iterations(self): - """Test generate method with maximum iterations reached.""" - # Set up a response that will always include a function call, forcing iterations - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - - # Create function call part - mock_function_part = MagicMock() - mock_function_part.text = None - mock_function_part.function_call = MagicMock() - mock_function_part.function_call.name = "ls" - mock_function_part.function_call.args = {"dir": "."} - - mock_content.parts = [mock_function_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - # Make the model always return a function call - self.mock_model_instance.generate_content.return_value = mock_response - - # Call generate - result = self.model.generate("List files recursively") - - # Verify we hit the max iterations - assert self.mock_model_instance.generate_content.call_count <= MAX_AGENT_ITERATIONS + 1 - assert "Maximum iterations reached" in result - - def test_generate_with_multiple_tools_per_response(self): - """Test generate method with multiple tool calls in a single response.""" - # Set up mock response with multiple function calls - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - - # Create first function call part - mock_function_part1 = MagicMock() - mock_function_part1.text = None - mock_function_part1.function_call = MagicMock() - mock_function_part1.function_call.name = "ls" - mock_function_part1.function_call.args = {"dir": "."} - - # Create second function call part - mock_function_part2 = MagicMock() - mock_function_part2.text = None - mock_function_part2.function_call = MagicMock() - mock_function_part2.function_call.name = "view" - mock_function_part2.function_call.args = {"file_path": "file.txt"} - - # Create text part - mock_text_part = MagicMock() - mock_text_part.text = "Here are the results." - - mock_content.parts = [mock_function_part1, mock_function_part2, mock_text_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - # Set up second response for after function execution - mock_response2 = MagicMock() - mock_candidate2 = MagicMock() - mock_content2 = MagicMock() - mock_text_part2 = MagicMock() - - mock_text_part2.text = "All functions executed." - mock_content2.parts = [mock_text_part2] - mock_candidate2.content = mock_content2 - mock_response2.candidates = [mock_candidate2] - - # Set up mock to return different responses - self.mock_model_instance.generate_content.side_effect = [mock_response, mock_response2] - - # Call generate - result = self.model.generate("List files and view a file") - - # Verify only the first function is executed (since we only process one per turn) - self.mock_get_tool.assert_called_with("ls") - self.mock_tool.execute.assert_called_once_with() # Verify no arguments are passed - - def test_manage_context_window_truncation(self): - """Test specific context window management truncation with many messages.""" - # Add many messages to history - for i in range(40): # More than MAX_HISTORY_TURNS - self.model.add_to_history({"role": "user", "parts": [f"Test message {i}"]}) - self.model.add_to_history({"role": "model", "parts": [f"Test response {i}"]}) - - # Record length before management - initial_length = len(self.model.history) - - # Call the management function - self.model._manage_context_window() - - # Verify truncation occurred - assert len(self.model.history) < initial_length - - # Verify the first message is still the system prompt with specific content check - assert "System Prompt" in str(self.model.history[0]) - assert "function calling capabilities" in str(self.model.history[0]) - assert "CLI-Code" in str(self.model.history[0]) \ No newline at end of file diff --git a/test_dir/test_gemini_model_coverage.py b/test_dir/test_gemini_model_coverage.py deleted file mode 100644 index 60e6e51..0000000 --- a/test_dir/test_gemini_model_coverage.py +++ /dev/null @@ -1,426 +0,0 @@ -""" -Tests specifically for the GeminiModel class to improve code coverage. -This file focuses on increasing coverage for the generate method and its edge cases. -""" - -import os -import json -import unittest -from unittest.mock import patch, MagicMock, mock_open, call, PropertyMock -import pytest - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Handle imports -try: - from cli_code.models.gemini import GeminiModel, MAX_AGENT_ITERATIONS, FALLBACK_MODEL - from rich.console import Console - import google.generativeai as genai - from google.api_core.exceptions import ResourceExhausted - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type checking - GeminiModel = MagicMock - Console = MagicMock - genai = MagicMock - ResourceExhausted = Exception - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestGeminiModelGenerateMethod: - """Test suite for GeminiModel generate method, focusing on error paths and edge cases.""" - - def setup_method(self): - """Set up test fixtures.""" - # Mock genai module - self.genai_configure_patch = patch('google.generativeai.configure') - self.mock_genai_configure = self.genai_configure_patch.start() - - self.genai_model_patch = patch('google.generativeai.GenerativeModel') - self.mock_genai_model_class = self.genai_model_patch.start() - self.mock_model_instance = MagicMock() - self.mock_genai_model_class.return_value = self.mock_model_instance - - # Mock console - self.mock_console = MagicMock(spec=Console) - - # Mock get_tool - self.get_tool_patch = patch('cli_code.models.gemini.get_tool') - self.mock_get_tool = self.get_tool_patch.start() - - # Default tool mock - self.mock_tool = MagicMock() - self.mock_tool.execute.return_value = "Tool executed successfully" - self.mock_get_tool.return_value = self.mock_tool - - # Mock questionary confirm - self.mock_confirm = MagicMock() - self.questionary_patch = patch('questionary.confirm', return_value=self.mock_confirm) - self.mock_questionary = self.questionary_patch.start() - - # Mock MAX_AGENT_ITERATIONS to limit loop execution - self.max_iterations_patch = patch('cli_code.models.gemini.MAX_AGENT_ITERATIONS', 1) - self.mock_max_iterations = self.max_iterations_patch.start() - - # Set up basic model - self.model = GeminiModel("fake-api-key", self.mock_console, "gemini-pro") - - # Prepare mock response for basic tests - self.mock_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - # Set up text part - text_part = MagicMock() - text_part.text = "This is a test response" - - # Set up content parts - content.parts = [text_part] - candidate.content = content - self.mock_response.candidates = [candidate] - - # Setup model to return this response by default - self.mock_model_instance.generate_content.return_value = self.mock_response - - def teardown_method(self): - """Tear down test fixtures.""" - self.genai_configure_patch.stop() - self.genai_model_patch.stop() - self.get_tool_patch.stop() - self.questionary_patch.stop() - self.max_iterations_patch.stop() - - def test_generate_with_exit_command(self): - """Test generating with /exit command.""" - result = self.model.generate("/exit") - assert result is None - - def test_generate_with_help_command(self): - """Test generating with /help command.""" - result = self.model.generate("/help") - assert "Interactive Commands:" in result - - def test_generate_with_simple_text_response(self): - """Test basic text response generation.""" - # Create a simple text-only response - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - - # Set up text part that doesn't trigger function calls - mock_text_part = MagicMock() - mock_text_part.text = "This is a test response" - mock_text_part.function_call = None # Ensure no function call - - # Set up content parts with only text - mock_content.parts = [mock_text_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - # Make generate_content return our simple response - self.mock_model_instance.generate_content.return_value = mock_response - - # Run the test - result = self.model.generate("Tell me about Python") - - # Verify the call and response - self.mock_model_instance.generate_content.assert_called_once() - assert "This is a test response" in result - - def test_generate_with_empty_candidates(self): - """Test handling of empty candidates in response.""" - # Prepare empty candidates - empty_response = MagicMock() - empty_response.candidates = [] - self.mock_model_instance.generate_content.return_value = empty_response - - result = self.model.generate("Hello") - - assert "Error: Empty response received from LLM" in result - - def test_generate_with_empty_content(self): - """Test handling of empty content in response candidate.""" - # Prepare empty content - empty_response = MagicMock() - empty_candidate = MagicMock() - empty_candidate.content = None - empty_response.candidates = [empty_candidate] - self.mock_model_instance.generate_content.return_value = empty_response - - result = self.model.generate("Hello") - - assert "(Agent received response candidate with no content/parts)" in result - - def test_generate_with_function_call(self): - """Test generating with function call in response.""" - # Create function call part - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "ls" - function_part.function_call.args = {"path": "."} - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - self.mock_model_instance.generate_content.return_value = function_call_response - - # Execute - result = self.model.generate("List files") - - # Verify tool was called - self.mock_get_tool.assert_called_with("ls") - self.mock_tool.execute.assert_called_with(path=".") - - def test_generate_with_missing_tool(self): - """Test handling when tool is not found.""" - # Create function call part for non-existent tool - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "nonexistent_tool" - function_part.function_call.args = {} - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - self.mock_model_instance.generate_content.return_value = function_call_response - - # Set up get_tool to return None - self.mock_get_tool.return_value = None - - # Execute - result = self.model.generate("Use nonexistent tool") - - # Verify error handling - self.mock_get_tool.assert_called_with("nonexistent_tool") - # Just check that the result contains the error indication - assert "nonexistent_tool" in result - assert "not available" in result.lower() or "not found" in result.lower() - - def test_generate_with_tool_execution_error(self): - """Test handling when tool execution raises an error.""" - # Create function call part - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "ls" - function_part.function_call.args = {"path": "."} - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - self.mock_model_instance.generate_content.return_value = function_call_response - - # Set up tool to raise exception - self.mock_tool.execute.side_effect = Exception("Tool execution failed") - - # Execute - result = self.model.generate("List files") - - # Verify error handling - self.mock_get_tool.assert_called_with("ls") - # Check that the result contains error information - assert "Error" in result - assert "Tool execution failed" in result - - def test_generate_with_task_complete(self): - """Test handling of task_complete tool call.""" - # Create function call part for task_complete - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "task_complete" - function_part.function_call.args = {"summary": "Task completed successfully"} - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - self.mock_model_instance.generate_content.return_value = function_call_response - - # Set up task_complete tool - task_complete_tool = MagicMock() - task_complete_tool.execute.return_value = "Task completed successfully with details" - self.mock_get_tool.return_value = task_complete_tool - - # Execute - result = self.model.generate("Complete task") - - # Verify task completion handling - self.mock_get_tool.assert_called_with("task_complete") - assert result == "Task completed successfully with details" - - def test_generate_with_file_edit_confirmation_accepted(self): - """Test handling of file edit confirmation when accepted.""" - # Create function call part for edit - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "edit" - function_part.function_call.args = { - "file_path": "test.py", - "content": "print('hello world')" - } - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - self.mock_model_instance.generate_content.return_value = function_call_response - - # Set up confirmation to return True - self.mock_confirm.ask.return_value = True - - # Execute - result = self.model.generate("Edit test.py") - - # Verify confirmation flow - self.mock_confirm.ask.assert_called_once() - self.mock_get_tool.assert_called_with("edit") - self.mock_tool.execute.assert_called_with(file_path="test.py", content="print('hello world')") - - def test_generate_with_file_edit_confirmation_rejected(self): - """Test handling of file edit confirmation when rejected.""" - # Create function call part for edit - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "edit" - function_part.function_call.args = { - "file_path": "test.py", - "content": "print('hello world')" - } - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - self.mock_model_instance.generate_content.return_value = function_call_response - - # Set up confirmation to return False - self.mock_confirm.ask.return_value = False - - # Execute - result = self.model.generate("Edit test.py") - - # Verify rejection handling - self.mock_confirm.ask.assert_called_once() - # Tool should not be executed if rejected - self.mock_tool.execute.assert_not_called() - - def test_generate_with_quota_exceeded_fallback(self): - """Test handling of quota exceeded with fallback model.""" - # Temporarily restore MAX_AGENT_ITERATIONS to allow proper fallback - with patch('cli_code.models.gemini.MAX_AGENT_ITERATIONS', 10): - # Create a simple text-only response for the fallback model - mock_response = MagicMock() - mock_candidate = MagicMock() - mock_content = MagicMock() - - # Set up text part - mock_text_part = MagicMock() - mock_text_part.text = "This is a test response" - mock_text_part.function_call = None # Ensure no function call - - # Set up content parts - mock_content.parts = [mock_text_part] - mock_candidate.content = mock_content - mock_response.candidates = [mock_candidate] - - # Set up first call to raise ResourceExhausted, second call to return our mocked response - self.mock_model_instance.generate_content.side_effect = [ - ResourceExhausted("Quota exceeded"), - mock_response - ] - - # Execute - result = self.model.generate("Hello") - - # Verify fallback handling - assert self.model.current_model_name == FALLBACK_MODEL - assert "This is a test response" in result - self.mock_console.print.assert_any_call( - f"[bold yellow]Quota limit reached for gemini-pro. Switching to fallback model ({FALLBACK_MODEL})...[/bold yellow]" - ) - - def test_generate_with_quota_exceeded_on_fallback(self): - """Test handling when quota is exceeded even on fallback model.""" - # Set the current model to already be the fallback - self.model.current_model_name = FALLBACK_MODEL - - # Set up call to raise ResourceExhausted - self.mock_model_instance.generate_content.side_effect = ResourceExhausted("Quota exceeded") - - # Execute - result = self.model.generate("Hello") - - # Verify fallback failure handling - assert "Error: API quota exceeded for primary and fallback models" in result - self.mock_console.print.assert_any_call( - "[bold red]API quota exceeded for primary and fallback models. Please check your plan/billing.[/bold red]" - ) - - def test_generate_with_max_iterations_reached(self): - """Test handling when max iterations are reached.""" - # Set up responses to keep returning function calls that don't finish the task - function_call_response = MagicMock() - candidate = MagicMock() - content = MagicMock() - - function_part = MagicMock() - function_part.function_call = MagicMock() - function_part.function_call.name = "ls" - function_part.function_call.args = {"path": "."} - - content.parts = [function_part] - candidate.content = content - function_call_response.candidates = [candidate] - - # Always return a function call that will continue the loop - self.mock_model_instance.generate_content.return_value = function_call_response - - # Patch MAX_AGENT_ITERATIONS to a smaller value for testing - with patch('cli_code.models.gemini.MAX_AGENT_ITERATIONS', 3): - result = self.model.generate("List files recursively") - - # Verify max iterations handling - assert "(Task exceeded max iterations" in result - - def test_generate_with_unexpected_exception(self): - """Test handling of unexpected exceptions.""" - # Set up generate_content to raise an exception - self.mock_model_instance.generate_content.side_effect = Exception("Unexpected error") - - # Execute - result = self.model.generate("Hello") - - # Verify exception handling - assert "Error during agent processing: Unexpected error" in result \ No newline at end of file diff --git a/test_dir/test_gemini_model_error_handling.py b/test_dir/test_gemini_model_error_handling.py deleted file mode 100644 index a85154d..0000000 --- a/test_dir/test_gemini_model_error_handling.py +++ /dev/null @@ -1,681 +0,0 @@ -""" -Tests for the Gemini Model error handling scenarios. -""" -import pytest -import json -from unittest.mock import MagicMock, patch, call -import sys -from pathlib import Path -import logging - -# Import the actual exception class -from google.api_core.exceptions import ResourceExhausted, InvalidArgument - -# Add the src directory to the path for imports -sys.path.insert(0, str(Path(__file__).parent.parent)) - -from rich.console import Console - -# Ensure FALLBACK_MODEL is imported -from src.cli_code.models.gemini import GeminiModel, FALLBACK_MODEL -from src.cli_code.tools.base import BaseTool -from src.cli_code.tools import AVAILABLE_TOOLS - - -class TestGeminiModelErrorHandling: - """Tests for error handling in GeminiModel.""" - - @pytest.fixture - def mock_generative_model(self): - """Mock the Gemini generative model.""" - with patch("src.cli_code.models.gemini.genai.GenerativeModel") as mock_model: - mock_instance = MagicMock() - mock_model.return_value = mock_instance - yield mock_instance - - @pytest.fixture - def gemini_model(self, mock_generative_model): - """Create a GeminiModel instance with mocked dependencies.""" - console = Console() - with patch("src.cli_code.models.gemini.genai") as mock_gm: - # Configure the mock - mock_gm.GenerativeModel = MagicMock() - mock_gm.GenerativeModel.return_value = mock_generative_model - - # Create the model - model = GeminiModel(api_key="fake_api_key", console=console, model_name="gemini-pro") - yield model - - @patch("src.cli_code.models.gemini.genai") - def test_initialization_error(self, mock_gm): - """Test error handling during initialization.""" - # Make the GenerativeModel constructor raise an exception - mock_gm.GenerativeModel.side_effect = Exception("API initialization error") - - # Create a console for the model - console = Console() - - # Attempt to create the model - should raise an error - with pytest.raises(Exception) as excinfo: - GeminiModel(api_key="fake_api_key", console=console, model_name="gemini-pro") - - # Verify the error message - assert "API initialization error" in str(excinfo.value) - - def test_empty_prompt_error(self, gemini_model, mock_generative_model): - """Test error handling when an empty prompt is provided.""" - # Call generate with an empty prompt - result = gemini_model.generate("") - - # Verify error message is returned - assert result is not None - assert result == "Error: Cannot process empty prompt. Please provide a valid input." - - # Verify that no API call was made - mock_generative_model.generate_content.assert_not_called() - - def test_api_error_handling(self, gemini_model, mock_generative_model): - """Test handling of API errors during generation.""" - # Make the API call raise an exception - mock_generative_model.generate_content.side_effect = Exception("API error") - - # Call generate - result = gemini_model.generate("Test prompt") - - # Verify error message is returned - assert result is not None - assert "error" in result.lower() - assert "api error" in result.lower() - - def test_rate_limit_error_handling(self, gemini_model, mock_generative_model): - """Test handling of rate limit errors.""" - # Create a rate limit error - rate_limit_error = Exception("Rate limit exceeded") - mock_generative_model.generate_content.side_effect = rate_limit_error - - # Call generate - result = gemini_model.generate("Test prompt") - - # Verify rate limit error message is returned - assert result is not None - assert "rate limit" in result.lower() or "quota" in result.lower() - - def test_invalid_api_key_error(self, gemini_model, mock_generative_model): - """Test handling of invalid API key errors.""" - # Create an authentication error - auth_error = Exception("Invalid API key") - mock_generative_model.generate_content.side_effect = auth_error - - # Call generate - result = gemini_model.generate("Test prompt") - - # Verify authentication error message is returned - assert result is not None - assert "api key" in result.lower() or "authentication" in result.lower() - - def test_model_not_found_error(self, mock_generative_model): - """Test handling of model not found errors.""" - # Create a console for the model - console = Console() - - # Create the model with an invalid model name - with patch("src.cli_code.models.gemini.genai") as mock_gm: - mock_gm.GenerativeModel.side_effect = Exception("Model not found: nonexistent-model") - - # Attempt to create the model - with pytest.raises(Exception) as excinfo: - GeminiModel(api_key="fake_api_key", console=console, model_name="nonexistent-model") - - # Verify the error message - assert "model not found" in str(excinfo.value).lower() - - @patch("src.cli_code.models.gemini.get_tool") - def test_tool_execution_error(self, mock_get_tool, gemini_model, mock_generative_model): - """Test handling of errors during tool execution.""" - # Configure the mock to return a response with a function call - mock_response = MagicMock() - mock_parts = [MagicMock()] - mock_parts[0].text = None # No text - mock_parts[0].function_call = MagicMock() - mock_parts[0].function_call.name = "test_tool" - mock_parts[0].function_call.args = {"arg1": "value1"} - - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content.parts = mock_parts - - mock_generative_model.generate_content.return_value = mock_response - - # Make the tool execution raise an error - mock_tool = MagicMock() - mock_tool.execute.side_effect = Exception("Tool execution error") - mock_get_tool.return_value = mock_tool - - # Call generate - result = gemini_model.generate("Use the test_tool") - - # Verify tool error is handled and included in the response - assert result is not None - assert result == "Error: Tool execution error with test_tool: Tool execution error" - - def test_invalid_function_call_format(self, gemini_model, mock_generative_model): - """Test handling of invalid function call format.""" - # Configure the mock to return a response with an invalid function call - mock_response = MagicMock() - mock_parts = [MagicMock()] - mock_parts[0].text = None # No text - mock_parts[0].function_call = MagicMock() - mock_parts[0].function_call.name = "nonexistent_tool" # Tool doesn't exist - mock_parts[0].function_call.args = {"arg1": "value1"} - - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content.parts = mock_parts - - mock_generative_model.generate_content.return_value = mock_response - - # Call generate - result = gemini_model.generate("Use a tool") - - # Verify invalid tool error is handled - assert result is not None - assert "tool not found" in result.lower() or "nonexistent_tool" in result.lower() - - def test_missing_required_args(self, gemini_model, mock_generative_model): - """Test handling of function calls with missing required arguments.""" - # Create a mock test tool with required arguments - test_tool = MagicMock() - test_tool.name = "test_tool" - test_tool.execute = MagicMock(side_effect=ValueError("Missing required argument 'required_param'")) - - # Configure the mock to return a response with a function call missing required args - mock_response = MagicMock() - mock_parts = [MagicMock()] - mock_parts[0].text = None # No text - mock_parts[0].function_call = MagicMock() - mock_parts[0].function_call.name = "test_tool" - mock_parts[0].function_call.args = {} # Empty args, missing required ones - - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content.parts = mock_parts - - mock_generative_model.generate_content.return_value = mock_response - - # Patch the get_tool function to return our test tool - with patch("src.cli_code.models.gemini.get_tool") as mock_get_tool: - mock_get_tool.return_value = test_tool - - # Call generate - result = gemini_model.generate("Use a tool") - - # Verify missing args error is handled - assert result is not None - assert "missing" in result.lower() or "required" in result.lower() or "argument" in result.lower() - - def test_handling_empty_response(self, gemini_model, mock_generative_model): - """Test handling of empty response from the API.""" - # Configure the mock to return an empty response - mock_response = MagicMock() - mock_response.candidates = [] # No candidates - - mock_generative_model.generate_content.return_value = mock_response - - # Call generate - result = gemini_model.generate("Test prompt") - - # Verify empty response is handled - assert result is not None - assert "empty response" in result.lower() or "no response" in result.lower() - - @pytest.fixture - def mock_console(self): - console = MagicMock() - console.print = MagicMock() - console.status = MagicMock() - # Make status return a context manager - status_cm = MagicMock() - console.status.return_value = status_cm - status_cm.__enter__ = MagicMock(return_value=None) - status_cm.__exit__ = MagicMock(return_value=None) - return console - - @pytest.fixture - def mock_genai(self): - genai = MagicMock() - genai.GenerativeModel = MagicMock() - return genai - - def test_init_without_api_key(self, mock_console): - """Test initialization when API key is not provided.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - # Execute and expect the ValueError - with pytest.raises(ValueError, match="Gemini API key is required"): - model = GeminiModel(None, mock_console) - - def test_init_with_invalid_api_key(self, mock_console): - """Test initialization with an invalid API key.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - with patch('src.cli_code.models.gemini.genai') as mock_genai: - mock_genai.configure.side_effect = ImportError("No module named 'google.generativeai'") - - # Should raise ConnectionError - with pytest.raises(ConnectionError): - model = GeminiModel("invalid_key", mock_console) - - @patch('src.cli_code.models.gemini.genai') - def test_generate_without_client(self, mock_genai, mock_console): - """Test generate method when the client is not initialized.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - # Create model that will have model=None - model = GeminiModel("valid_key", mock_console) - # Manually set model to None to simulate uninitialized client - model.model = None - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Error" in result and "not initialized" in result - - @patch('src.cli_code.models.gemini.genai') - def test_generate_with_api_error(self, mock_genai, mock_console): - """Test generate method when the API call fails.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - # Create a model with a mock - model = GeminiModel("valid_key", mock_console) - - # Configure the mock to raise an exception - mock_model = MagicMock() - model.model = mock_model - mock_model.generate_content.side_effect = Exception("API Error") - - # Execute - result = model.generate("test prompt") - - # Assert error during agent processing appears - assert "Error during agent processing" in result - - @patch('src.cli_code.models.gemini.genai') - def test_generate_with_safety_block(self, mock_genai, mock_console): - """Test generate method when content is blocked by safety filters.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Mock the model - mock_model = MagicMock() - model.model = mock_model - - # Configure the mock to return a blocked response - mock_response = MagicMock() - mock_response.prompt_feedback = MagicMock() - mock_response.prompt_feedback.block_reason = "SAFETY" - mock_response.candidates = [] - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Empty response" in result or "no candidates" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - @patch('src.cli_code.models.gemini.get_tool') - @patch('src.cli_code.models.gemini.json.loads') - def test_generate_with_invalid_tool_call(self, mock_json_loads, mock_get_tool, mock_genai, mock_console): - """Test generate method with invalid JSON in tool arguments.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Create a mock response with tool calls - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_part = MagicMock() - mock_part.function_call = MagicMock() - mock_part.function_call.name = "test_tool" - mock_part.function_call.args = "invalid_json" - mock_response.candidates[0].content.parts = [mock_part] - mock_model.generate_content.return_value = mock_response - - # Make JSON decoding fail - mock_json_loads.side_effect = json.JSONDecodeError("Expecting value", "", 0) - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Error" in result - - @patch('src.cli_code.models.gemini.genai') - @patch('src.cli_code.models.gemini.get_tool') - def test_generate_with_missing_required_tool_args(self, mock_get_tool, mock_genai, mock_console): - """Test generate method when required tool arguments are missing.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Create a mock response with tool calls - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_part = MagicMock() - mock_part.function_call = MagicMock() - mock_part.function_call.name = "test_tool" - mock_part.function_call.args = {} # Empty args dict - mock_response.candidates[0].content.parts = [mock_part] - mock_model.generate_content.return_value = mock_response - - # Mock the tool to have required params - tool_mock = MagicMock() - tool_declaration = MagicMock() - tool_declaration.parameters = {"required": ["required_param"]} - tool_mock.get_function_declaration.return_value = tool_declaration - mock_get_tool.return_value = tool_mock - - # Execute - result = model.generate("test prompt") - - # We should get to the max iterations with the tool response - assert "max iterations" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - def test_generate_with_tool_not_found(self, mock_genai, mock_console): - """Test generate method when a requested tool is not found.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Create a mock response with tool calls - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_part = MagicMock() - mock_part.function_call = MagicMock() - mock_part.function_call.name = "nonexistent_tool" - mock_part.function_call.args = {} - mock_response.candidates[0].content.parts = [mock_part] - mock_model.generate_content.return_value = mock_response - - # Mock get_tool to return None for nonexistent tool - with patch('src.cli_code.models.gemini.get_tool', return_value=None): - # Execute - result = model.generate("test prompt") - - # We should mention the tool not found - assert "not found" in result.lower() or "not available" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - @patch('src.cli_code.models.gemini.get_tool') - def test_generate_with_tool_execution_error(self, mock_get_tool, mock_genai, mock_console): - """Test generate method when a tool execution raises an error.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Create a mock response with tool calls - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_part = MagicMock() - mock_part.function_call = MagicMock() - mock_part.function_call.name = "test_tool" - mock_part.function_call.args = {} - mock_response.candidates[0].content.parts = [mock_part] - mock_model.generate_content.return_value = mock_response - - # Mock the tool to raise an exception - tool_mock = MagicMock() - tool_mock.execute.side_effect = Exception("Tool execution error") - mock_get_tool.return_value = tool_mock - - # Execute - result = model.generate("test prompt") - - # Assert - assert "error" in result.lower() and "tool" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - def test_list_models_error(self, mock_genai, mock_console): - """Test list_models method when an error occurs.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock to raise an exception - mock_genai.list_models.side_effect = Exception("List models error") - - # Execute - result = model.list_models() - - # Assert - assert result == [] - mock_console.print.assert_called() - - @patch('src.cli_code.models.gemini.genai') - def test_generate_with_empty_response(self, mock_genai, mock_console): - """Test generate method when the API returns an empty response.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Create a response with no candidates - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [] # Empty candidates - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("test prompt") - - # Assert - assert "no candidates" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - def test_generate_with_malformed_response(self, mock_genai, mock_console): - """Test generate method when the API returns a malformed response.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console) - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Create a malformed response - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content = None # Missing content - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("test prompt") - - # Assert - assert "no content" in result.lower() or "no parts" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - @patch('src.cli_code.models.gemini.get_tool') - @patch('src.cli_code.models.gemini.questionary') - def test_generate_with_tool_confirmation_rejected(self, mock_questionary, mock_get_tool, mock_genai, mock_console): - """Test generate method when user rejects sensitive tool confirmation.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console, "gemini-pro") # Use the fixture? - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Mock the tool instance - mock_tool = MagicMock() - mock_get_tool.return_value = mock_tool - - # Mock the confirmation to return False (rejected) - confirm_mock = MagicMock() - confirm_mock.ask.return_value = False - mock_questionary.confirm.return_value = confirm_mock - - # Create a mock response with a sensitive tool call (e.g., edit) - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_part = MagicMock() - mock_part.function_call = MagicMock() - mock_part.function_call.name = "edit" # Sensitive tool - mock_part.function_call.args = {'file_path': 'test.py', 'content': 'new content'} - mock_response.candidates[0].content.parts = [mock_part] - - # First call returns the function call - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("Edit the file test.py") - - # Assertions - mock_questionary.confirm.assert_called_once() # Check confirm was called - mock_tool.execute.assert_not_called() # Tool should NOT be executed - # The agent loop might continue or timeout, check for rejection message in history/result - # Depending on loop continuation logic, it might hit max iterations or return the rejection text - assert "rejected" in result.lower() or "maximum iterations" in result.lower() - - @patch('src.cli_code.models.gemini.genai') - @patch('src.cli_code.models.gemini.get_tool') - @patch('src.cli_code.models.gemini.questionary') - def test_generate_with_tool_confirmation_cancelled(self, mock_questionary, mock_get_tool, mock_genai, mock_console): - """Test generate method when user cancels sensitive tool confirmation.""" - # Setup - with patch('src.cli_code.models.gemini.log'): - model = GeminiModel("valid_key", mock_console, "gemini-pro") - - # Configure the mock model - mock_model = MagicMock() - model.model = mock_model - - # Mock the tool instance - mock_tool = MagicMock() - mock_get_tool.return_value = mock_tool - - # Mock the confirmation to return None (cancelled) - confirm_mock = MagicMock() - confirm_mock.ask.return_value = None - mock_questionary.confirm.return_value = confirm_mock - - # Create a mock response with a sensitive tool call (e.g., edit) - mock_response = MagicMock() - mock_response.prompt_feedback = None - mock_response.candidates = [MagicMock()] - mock_part = MagicMock() - mock_part.function_call = MagicMock() - mock_part.function_call.name = "edit" # Sensitive tool - mock_part.function_call.args = {'file_path': 'test.py', 'content': 'new content'} - mock_response.candidates[0].content.parts = [mock_part] - - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("Edit the file test.py") - - # Assertions - mock_questionary.confirm.assert_called_once() # Check confirm was called - mock_tool.execute.assert_not_called() # Tool should NOT be executed - assert "cancelled confirmation" in result.lower() - assert "edit on test.py" in result.lower() - -# --- Standalone Test for Quota Fallback --- -@pytest.mark.skip(reason="This test needs to be rewritten with proper mocking of the Gemini API integration path") -def test_generate_with_quota_error_and_fallback_returns_success(): - """Test that GeminiModel falls back to the fallback model on quota error and returns success.""" - with patch('src.cli_code.models.gemini.Console') as mock_console_cls, \ - patch('src.cli_code.models.gemini.genai') as mock_genai, \ - patch('src.cli_code.models.gemini.GeminiModel._initialize_model_instance') as mock_init_model, \ - patch('src.cli_code.models.gemini.AVAILABLE_TOOLS', {}) as mock_available_tools, \ - patch('src.cli_code.models.gemini.log') as mock_log: - - # Arrange - mock_console = MagicMock() - mock_console_cls.return_value = mock_console - - # Mocks for the primary and fallback model behaviors - mock_primary_model_instance = MagicMock(name="PrimaryModelInstance") - mock_fallback_model_instance = MagicMock(name="FallbackModelInstance") - - # Configure Mock genai module with ResourceExhausted exception - mock_genai.GenerativeModel.return_value = mock_primary_model_instance - mock_genai.api_core.exceptions.ResourceExhausted = ResourceExhausted - - # Configure the generate_content behavior for the primary mock to raise the ResourceExhausted exception - mock_primary_model_instance.generate_content.side_effect = ResourceExhausted("Quota exhausted") - - # Configure the generate_content behavior for the fallback mock - mock_fallback_response = MagicMock() - mock_fallback_candidate = MagicMock() - mock_fallback_part = MagicMock() - mock_fallback_part.text = "Fallback successful" - mock_fallback_candidate.content = MagicMock() - mock_fallback_candidate.content.parts = [mock_fallback_part] - mock_fallback_response.candidates = [mock_fallback_candidate] - mock_fallback_model_instance.generate_content.return_value = mock_fallback_response - - # Define the side effect for the _initialize_model_instance method - def init_side_effect(*args, **kwargs): - # After the quota error, replace the model with the fallback model - if mock_init_model.call_count > 1: - # Replace the model that will be returned by GenerativeModel - mock_genai.GenerativeModel.return_value = mock_fallback_model_instance - return None - return None - - mock_init_model.side_effect = init_side_effect - - # Setup the GeminiModel instance - gemini_model = GeminiModel(api_key="fake_key", model_name="gemini-1.5-pro-latest", console=mock_console) - - # Create an empty history to allow test to run properly - gemini_model.history = [ - {"role": "user", "parts": [{"text": "test prompt"}]} - ] - - # Act - response = gemini_model.generate("test prompt") - - # Assert - # Check that warning and info logs were called - mock_log.warning.assert_any_call("Quota exceeded for model 'gemini-1.5-pro-latest': 429 Quota exhausted") - mock_log.info.assert_any_call("Switching to fallback model: gemini-1.0-pro") - - # Check initialization was called twice - assert mock_init_model.call_count >= 2 - - # Check that generate_content was called - assert mock_primary_model_instance.generate_content.call_count >= 1 - assert mock_fallback_model_instance.generate_content.call_count >= 1 - - # Check final response - assert response == "Fallback successful" - -# ... (End of file or other tests) ... \ No newline at end of file diff --git a/test_dir/test_main.py b/test_dir/test_main.py deleted file mode 100644 index f41b0be..0000000 --- a/test_dir/test_main.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -Tests for the CLI main module. -""" -import pytest -from unittest.mock import patch, MagicMock -from click.testing import CliRunner - -from cli_code.main import cli - - -@pytest.fixture -def mock_config(): - """Fixture to provide a mocked Config object.""" - with patch('cli_code.main.config') as mock_config: - # Set some reasonable default behavior for the config mock - mock_config.get_default_provider.return_value = "gemini" - mock_config.get_default_model.return_value = "gemini-pro" - mock_config.get_credential.return_value = "fake-api-key" - yield mock_config - - -@pytest.fixture -def runner(): - """Fixture to provide a CliRunner instance.""" - return CliRunner() - - -@patch('cli_code.main.start_interactive_session') -def test_cli_default_invocation(mock_start_session, runner, mock_config): - """Test the default CLI invocation starts an interactive session.""" - result = runner.invoke(cli) - assert result.exit_code == 0 - mock_start_session.assert_called_once() - - -def test_setup_command(runner, mock_config): - """Test the setup command.""" - result = runner.invoke(cli, ['setup', '--provider', 'gemini', 'fake-api-key']) - assert result.exit_code == 0 - mock_config.set_credential.assert_called_once_with('gemini', 'fake-api-key') - - -def test_set_default_provider(runner, mock_config): - """Test the set-default-provider command.""" - result = runner.invoke(cli, ['set-default-provider', 'ollama']) - assert result.exit_code == 0 - mock_config.set_default_provider.assert_called_once_with('ollama') - - -def test_set_default_model(runner, mock_config): - """Test the set-default-model command.""" - result = runner.invoke(cli, ['set-default-model', '--provider', 'gemini', 'gemini-pro-vision']) - assert result.exit_code == 0 - mock_config.set_default_model.assert_called_once_with('gemini-pro-vision', provider='gemini') - - -@patch('cli_code.main.GeminiModel') -def test_list_models_gemini(mock_gemini_model, runner, mock_config): - """Test the list-models command for Gemini provider.""" - # Setup mock model instance - mock_instance = MagicMock() - mock_instance.list_models.return_value = [ - {"name": "gemini-pro", "displayName": "Gemini Pro"}, - {"name": "gemini-pro-vision", "displayName": "Gemini Pro Vision"} - ] - mock_gemini_model.return_value = mock_instance - - result = runner.invoke(cli, ['list-models', '--provider', 'gemini']) - assert result.exit_code == 0 - mock_gemini_model.assert_called_once() - mock_instance.list_models.assert_called_once() - - -@patch('cli_code.main.OllamaModel') -def test_list_models_ollama(mock_ollama_model, runner, mock_config): - """Test the list-models command for Ollama provider.""" - # Setup mock model instance - mock_instance = MagicMock() - mock_instance.list_models.return_value = [ - {"name": "llama2", "displayName": "Llama 2"}, - {"name": "mistral", "displayName": "Mistral"} - ] - mock_ollama_model.return_value = mock_instance - - result = runner.invoke(cli, ['list-models', '--provider', 'ollama']) - assert result.exit_code == 0 - mock_ollama_model.assert_called_once() - mock_instance.list_models.assert_called_once() \ No newline at end of file diff --git a/test_dir/test_main_comprehensive.py b/test_dir/test_main_comprehensive.py deleted file mode 100644 index 3628cb1..0000000 --- a/test_dir/test_main_comprehensive.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Comprehensive tests for the main module to improve coverage. -This file extends the existing tests in test_main.py with more edge cases, -error conditions, and specific code paths that weren't previously tested. -""" - -import os -import sys -import unittest -from unittest import mock -from unittest.mock import patch, MagicMock -from typing import Any, Optional, Callable - -# Determine if we're running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Add the src directory to the path to allow importing cli_code -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.dirname(current_dir) -sys.path.insert(0, parent_dir) - -# Import pytest if available, otherwise create dummy markers -try: - import pytest - timeout = pytest.mark.timeout - PYTEST_AVAILABLE = True -except ImportError: - PYTEST_AVAILABLE = False - # Create a dummy timeout decorator if pytest is not available - def timeout(seconds: int) -> Callable: - """Dummy timeout decorator for environments without pytest.""" - def decorator(f: Callable) -> Callable: - return f - return decorator - -# Import click.testing if available, otherwise mock it -try: - from click.testing import CliRunner - CLICK_AVAILABLE = True -except ImportError: - CLICK_AVAILABLE = False - class CliRunner: - """Mock CliRunner for environments where click is not available.""" - def invoke(self, cmd: Any, args: Optional[list] = None) -> Any: - """Mock invoke method.""" - class Result: - exit_code = 0 - output = "" - return Result() - -# Import from main module if available, otherwise skip the tests -try: - from cli_code.main import cli, start_interactive_session, show_help, console - MAIN_MODULE_AVAILABLE = True -except ImportError: - MAIN_MODULE_AVAILABLE = False - # Create placeholder objects for testing - cli = None - start_interactive_session = lambda provider, model_name, console: None # noqa: E731 - show_help = lambda provider: None # noqa: E731 - console = None - -# Skip all tests if any required component is missing -SHOULD_SKIP_TESTS = IN_CI or not all([MAIN_MODULE_AVAILABLE, CLICK_AVAILABLE]) -skip_reason = "Tests skipped in CI or missing dependencies" - - -@unittest.skipIf(SHOULD_SKIP_TESTS, skip_reason) -class TestCliInteractive(unittest.TestCase): - """Basic tests for the main CLI functionality.""" - - def setUp(self) -> None: - """Set up test environment.""" - self.runner = CliRunner() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - - # Configure default mock behavior - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_default_model.return_value = "gemini-pro" - self.mock_config.get_credential.return_value = "fake-api-key" - - def tearDown(self) -> None: - """Clean up after tests.""" - self.console_patcher.stop() - self.config_patcher.stop() - - @timeout(2) - def test_start_interactive_session_with_no_credential(self) -> None: - """Test interactive session when no credential is found.""" - # Override default mock behavior for this test - self.mock_config.get_credential.return_value = None - - # Call function under test - if start_interactive_session and self.mock_console: - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Check expected behavior - very basic check to avoid errors - self.mock_console.print.assert_called() - - @timeout(2) - def test_show_help_function(self) -> None: - """Test the show_help function.""" - with patch('cli_code.main.console') as mock_console: - with patch('cli_code.main.AVAILABLE_TOOLS', {"tool1": None, "tool2": None}): - # Call function under test - if show_help: - show_help("gemini") - - # Check expected behavior - mock_console.print.assert_called_once() - - -@unittest.skipIf(SHOULD_SKIP_TESTS, skip_reason) -class TestListModels(unittest.TestCase): - """Tests for the list-models command.""" - - def setUp(self) -> None: - """Set up test environment.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - - # Configure default mock behavior - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_credential.return_value = "fake-api-key" - - def tearDown(self) -> None: - """Clean up after tests.""" - self.config_patcher.stop() - - @timeout(2) - def test_list_models_missing_credential(self) -> None: - """Test list-models command when credential is missing.""" - # Override default mock behavior - self.mock_config.get_credential.return_value = None - - # Use basic unittest assertions since we may not have Click in CI - if cli and self.runner: - result = self.runner.invoke(cli, ['list-models']) - self.assertEqual(result.exit_code, 0) - - -if __name__ == "__main__" and not SHOULD_SKIP_TESTS: - unittest.main() \ No newline at end of file diff --git a/test_dir/test_main_edge_cases.py b/test_dir/test_main_edge_cases.py deleted file mode 100644 index 2ff71ab..0000000 --- a/test_dir/test_main_edge_cases.py +++ /dev/null @@ -1,248 +0,0 @@ -""" -Tests for edge cases and additional error handling in the main.py module. -This file focuses on advanced edge cases and error paths not covered in other tests. -""" - -import os -import sys -import unittest -from unittest.mock import patch, MagicMock, mock_open, call -import tempfile -from pathlib import Path - -# Ensure we can import the module -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.dirname(current_dir) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - -# Handle missing dependencies gracefully -try: - import pytest - from click.testing import CliRunner - from cli_code.main import cli, start_interactive_session, show_help, console - IMPORTS_AVAILABLE = True -except ImportError: - # Create dummy fixtures and mocks if imports aren't available - IMPORTS_AVAILABLE = False - pytest = MagicMock() - pytest.mark.timeout = lambda seconds: lambda f: f - - class DummyCliRunner: - def invoke(self, *args, **kwargs): - class Result: - exit_code = 0 - output = "" - return Result() - - CliRunner = DummyCliRunner - cli = MagicMock() - start_interactive_session = MagicMock() - show_help = MagicMock() - console = MagicMock() - -# Determine if we're running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE or IN_CI - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestCliAdvancedErrors: - """Test advanced error handling scenarios in the CLI.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_default_model.return_value = "gemini-pro" - self.mock_config.get_credential.return_value = "fake-api-key" - - # Patch sys.exit to prevent test from exiting - self.exit_patcher = patch('cli_code.main.sys.exit') - self.mock_exit = self.exit_patcher.start() - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - self.exit_patcher.stop() - - @pytest.mark.timeout(5) - def test_cli_invalid_provider(self): - """Test CLI behavior with invalid provider (should never happen due to click.Choice).""" - with patch('cli_code.main.config.get_default_provider') as mock_get_provider: - # Simulate an invalid provider somehow getting through - mock_get_provider.return_value = "invalid-provider" - - # Since the code uses click's Choice validation and has error handling, - # we expect it to call exit with code 1 - result = self.runner.invoke(cli, []) - - # Check error handling occurred - assert self.mock_exit.called, "Should call sys.exit for invalid provider" - - @pytest.mark.timeout(5) - def test_cli_with_missing_default_model(self): - """Test CLI behavior when get_default_model returns None.""" - self.mock_config.get_default_model.return_value = None - - # This should trigger the error path that calls sys.exit(1) - result = self.runner.invoke(cli, []) - - # Should call exit with error - self.mock_exit.assert_called_once_with(1) - - # Verify it printed an error message - self.mock_console.print.assert_any_call( - "[bold red]Error:[/bold red] No default model configured for provider 'gemini' and no model specified with --model." - ) - - @pytest.mark.timeout(5) - def test_cli_with_no_config(self): - """Test CLI behavior when config is None.""" - # Patch cli_code.main.config to be None - with patch('cli_code.main.config', None): - result = self.runner.invoke(cli, []) - - # Should exit with error - self.mock_exit.assert_called_once_with(1) - - # Should print error message - self.mock_console.print.assert_called_once_with( - "[bold red]Configuration could not be loaded. Cannot proceed.[/bold red]" - ) - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestOllamaSpecificBehavior: - """Test Ollama-specific behavior and edge cases.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "ollama" - self.mock_config.get_default_model.return_value = "llama2" - self.mock_config.get_credential.return_value = "http://localhost:11434" - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - - @pytest.mark.timeout(5) - def test_setup_ollama_provider(self): - """Test setting up the Ollama provider.""" - # Configure mock_console.print to properly store args - mock_output = [] - self.mock_console.print.side_effect = lambda *args, **kwargs: mock_output.append(' '.join(str(a) for a in args)) - - result = self.runner.invoke(cli, ['setup', '--provider', 'ollama', 'http://localhost:11434']) - - # Check API URL was saved - self.mock_config.set_credential.assert_called_once_with('ollama', 'http://localhost:11434') - - # Check that Ollama-specific messages were shown - assert any('Ollama server' in output for output in mock_output), "Should display Ollama-specific setup notes" - - @pytest.mark.timeout(5) - def test_list_models_ollama(self): - """Test listing models with Ollama provider.""" - # Configure mock_console.print to properly store args - mock_output = [] - self.mock_console.print.side_effect = lambda *args, **kwargs: mock_output.append(' '.join(str(a) for a in args)) - - with patch('cli_code.main.OllamaModel') as mock_ollama: - mock_instance = MagicMock() - mock_instance.list_models.return_value = [ - {"name": "llama2", "id": "llama2"}, - {"name": "mistral", "id": "mistral"} - ] - mock_ollama.return_value = mock_instance - - result = self.runner.invoke(cli, ['list-models']) - - # Should fetch models from Ollama - mock_ollama.assert_called_with( - api_url='http://localhost:11434', - console=self.mock_console, - model_name=None - ) - - # Should print the models - mock_instance.list_models.assert_called_once() - - # Check for expected output elements in the console - assert any('Fetching models' in output for output in mock_output), "Should show fetching models message" - - @pytest.mark.timeout(5) - def test_ollama_connection_error(self): - """Test handling of Ollama connection errors.""" - # Configure mock_console.print to properly store args - mock_output = [] - self.mock_console.print.side_effect = lambda *args, **kwargs: mock_output.append(' '.join(str(a) for a in args)) - - with patch('cli_code.main.OllamaModel') as mock_ollama: - mock_instance = MagicMock() - mock_instance.list_models.side_effect = ConnectionError("Failed to connect to Ollama server") - mock_ollama.return_value = mock_instance - - result = self.runner.invoke(cli, ['list-models']) - - # Should attempt to fetch models - mock_instance.list_models.assert_called_once() - - # Connection error should be handled with log message, - # which we verified in the test run's captured log output - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestShowHelpFunction: - """Test the show_help function.""" - - def setup_method(self): - """Set up test fixtures.""" - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Add patch for Panel to prevent errors - self.panel_patcher = patch('cli_code.main.Panel', return_value="Test panel") - self.mock_panel = self.panel_patcher.start() - - def teardown_method(self): - """Teardown test fixtures.""" - self.console_patcher.stop() - self.panel_patcher.stop() - - @pytest.mark.timeout(5) - def test_show_help_function(self): - """Test show_help with different providers.""" - # Test with gemini - show_help("gemini") - - # Test with ollama - show_help("ollama") - - # Test with unknown provider - show_help("unknown_provider") - - # Verify mock_panel was called properly - assert self.mock_panel.call_count >= 3, "Panel should be created for each help call" - - # Verify console.print was called for each help display - assert self.mock_console.print.call_count >= 3, "Help panel should be printed for each provider" - - -if __name__ == "__main__": - unittest.main() \ No newline at end of file diff --git a/test_dir/test_main_improved.py b/test_dir/test_main_improved.py deleted file mode 100644 index b3f23f5..0000000 --- a/test_dir/test_main_improved.py +++ /dev/null @@ -1,448 +0,0 @@ -""" -Improved tests for the main module to increase coverage. -This file focuses on testing error handling, edge cases, and untested code paths. -""" - -import os -import sys -import unittest -from unittest.mock import patch, MagicMock, mock_open, call -import tempfile -from pathlib import Path - -# Ensure we can import the module -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.dirname(current_dir) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - -# Handle missing dependencies gracefully -try: - import pytest - from click.testing import CliRunner - from cli_code.main import cli, start_interactive_session, show_help, console - IMPORTS_AVAILABLE = True -except ImportError: - # Create dummy fixtures and mocks if imports aren't available - IMPORTS_AVAILABLE = False - pytest = MagicMock() - pytest.mark.timeout = lambda seconds: lambda f: f - - class DummyCliRunner: - def invoke(self, *args, **kwargs): - class Result: - exit_code = 0 - output = "" - return Result() - - CliRunner = DummyCliRunner - cli = MagicMock() - start_interactive_session = MagicMock() - show_help = MagicMock() - console = MagicMock() - -# Determine if we're running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE or IN_CI - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestMainErrorHandling: - """Test error handling in the main module.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_default_model.return_value = "gemini-pro" - self.mock_config.get_credential.return_value = "fake-api-key" - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - - @pytest.mark.timeout(5) - def test_cli_with_missing_config(self): - """Test CLI behavior when config is None.""" - with patch('cli_code.main.config', None): - with patch('cli_code.main.sys.exit') as mock_exit: - result = self.runner.invoke(cli, []) - mock_exit.assert_called_once_with(1) - - @pytest.mark.timeout(5) - def test_cli_with_missing_model(self): - """Test CLI behavior when no model is provided or configured.""" - # Set up config to return None for get_default_model - self.mock_config.get_default_model.return_value = None - - with patch('cli_code.main.sys.exit') as mock_exit: - result = self.runner.invoke(cli, []) - mock_exit.assert_called_once_with(1) - - @pytest.mark.timeout(5) - def test_setup_with_missing_config(self): - """Test setup command behavior when config is None.""" - with patch('cli_code.main.config', None): - result = self.runner.invoke(cli, ['setup', '--provider', 'gemini', 'api-key']) - assert result.exit_code == 0 - self.mock_console.print.assert_called_with("[bold red]Config error.[/bold red]") - - @pytest.mark.timeout(5) - def test_setup_with_exception(self): - """Test setup command when an exception occurs.""" - self.mock_config.set_credential.side_effect = Exception("Test error") - - result = self.runner.invoke(cli, ['setup', '--provider', 'gemini', 'api-key']) - assert result.exit_code == 0 - - # Check that error was printed - self.mock_console.print.assert_any_call( - "[bold red]Error saving API Key:[/bold red] Test error") - - @pytest.mark.timeout(5) - def test_set_default_provider_with_exception(self): - """Test set-default-provider when an exception occurs.""" - self.mock_config.set_default_provider.side_effect = Exception("Test error") - - result = self.runner.invoke(cli, ['set-default-provider', 'gemini']) - assert result.exit_code == 0 - - # Check that error was printed - self.mock_console.print.assert_any_call( - "[bold red]Error setting default provider:[/bold red] Test error") - - @pytest.mark.timeout(5) - def test_set_default_model_with_exception(self): - """Test set-default-model when an exception occurs.""" - self.mock_config.set_default_model.side_effect = Exception("Test error") - - result = self.runner.invoke(cli, ['set-default-model', 'gemini-pro']) - assert result.exit_code == 0 - - # Check that error was printed - self.mock_console.print.assert_any_call( - "[bold red]Error setting default model for gemini:[/bold red] Test error") - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestListModelsCommand: - """Test list-models command thoroughly.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_credential.return_value = "fake-api-key" - self.mock_config.get_default_model.return_value = "gemini-pro" - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - - @pytest.mark.timeout(5) - def test_list_models_with_missing_config(self): - """Test list-models when config is None.""" - with patch('cli_code.main.config', None): - result = self.runner.invoke(cli, ['list-models']) - assert result.exit_code == 0 - self.mock_console.print.assert_called_with("[bold red]Config error.[/bold red]") - - @pytest.mark.timeout(5) - def test_list_models_with_missing_credential(self): - """Test list-models when credential is missing.""" - self.mock_config.get_credential.return_value = None - - result = self.runner.invoke(cli, ['list-models', '--provider', 'gemini']) - assert result.exit_code == 0 - - # Check that error was printed - self.mock_console.print.assert_any_call( - "[bold red]Error:[/bold red] Gemini API Key not found.") - - @pytest.mark.timeout(5) - def test_list_models_with_empty_list(self): - """Test list-models when no models are returned.""" - with patch('cli_code.main.GeminiModel') as mock_gemini_model: - mock_instance = MagicMock() - mock_instance.list_models.return_value = [] - mock_gemini_model.return_value = mock_instance - - result = self.runner.invoke(cli, ['list-models', '--provider', 'gemini']) - assert result.exit_code == 0 - - # Check message about no models - self.mock_console.print.assert_any_call( - "[yellow]No models found or reported by provider 'gemini'.[/yellow]") - - @pytest.mark.timeout(5) - def test_list_models_with_exception(self): - """Test list-models when an exception occurs.""" - with patch('cli_code.main.GeminiModel') as mock_gemini_model: - mock_gemini_model.side_effect = Exception("Test error") - - result = self.runner.invoke(cli, ['list-models', '--provider', 'gemini']) - assert result.exit_code == 0 - - # Check error message - self.mock_console.print.assert_any_call( - "[bold red]Error listing models for gemini:[/bold red] Test error") - - @pytest.mark.timeout(5) - def test_list_models_with_unknown_provider(self): - """Test list-models with an unknown provider (custom mock value).""" - # Use mock to override get_default_provider with custom, invalid value - self.mock_config.get_default_provider.return_value = "unknown" - - # Using provider from config (let an unknown response come back) - result = self.runner.invoke(cli, ['list-models']) - assert result.exit_code == 0 - - # Should report unknown provider - self.mock_console.print.assert_any_call( - "[bold red]Error:[/bold red] Unknown provider 'unknown'.") - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestInteractiveSession: - """Test interactive session functionality.""" - - def setup_method(self): - """Set up test fixtures.""" - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_credential.return_value = "fake-api-key" - - # Add patch for Markdown to prevent errors - self.markdown_patcher = patch('cli_code.main.Markdown', return_value=MagicMock()) - self.mock_markdown = self.markdown_patcher.start() - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - self.markdown_patcher.stop() - - @pytest.mark.timeout(5) - def test_interactive_session_with_missing_config(self): - """Test interactive session when config is None.""" - with patch('cli_code.main.config', None): - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - self.mock_console.print.assert_called_with("[bold red]Config error.[/bold red]") - - @pytest.mark.timeout(5) - def test_interactive_session_with_missing_credential(self): - """Test interactive session when credential is missing.""" - self.mock_config.get_credential.return_value = None - - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Check that error was printed about missing credential - self.mock_console.print.assert_any_call( - "\n[bold red]Error:[/bold red] Gemini API Key not found.") - - @pytest.mark.timeout(5) - def test_interactive_session_with_model_initialization_error(self): - """Test interactive session when model initialization fails.""" - with patch('cli_code.main.GeminiModel') as mock_gemini_model: - mock_gemini_model.side_effect = Exception("Test error") - - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Check that error was printed - self.mock_console.print.assert_any_call( - "\n[bold red]Error initializing model 'gemini-pro':[/bold red] Test error") - - @pytest.mark.timeout(5) - def test_interactive_session_with_unknown_provider(self): - """Test interactive session with an unknown provider.""" - start_interactive_session( - provider="unknown", - model_name="model-name", - console=self.mock_console - ) - - # Check for unknown provider message - self.mock_console.print.assert_any_call( - "[bold red]Error:[/bold red] Unknown provider 'unknown'. Cannot initialize.") - - @pytest.mark.timeout(5) - def test_context_initialization_with_rules_dir(self): - """Test context initialization with .rules directory.""" - # Set up a directory structure with .rules - with tempfile.TemporaryDirectory() as temp_dir: - # Create .rules directory with some MD files - rules_dir = Path(temp_dir) / ".rules" - rules_dir.mkdir() - (rules_dir / "rule1.md").write_text("Rule 1") - (rules_dir / "rule2.md").write_text("Rule 2") - - # Create a mock agent instance - mock_agent = MagicMock() - mock_agent.generate.return_value = "Mock response" - - # Patch directory checks and os.listdir - with patch('os.path.isdir', return_value=True), \ - patch('os.listdir', return_value=["rule1.md", "rule2.md"]), \ - patch('cli_code.main.GeminiModel', return_value=mock_agent), \ - patch('builtins.open', mock_open(read_data="Mock rule content")): - - # Mock console.input for exit - self.mock_console.input.side_effect = ["/exit"] - - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Check context initialization message - self.mock_console.print.assert_any_call( - "[dim]Context will be initialized from 2 .rules/*.md files.[/dim]") - - @pytest.mark.timeout(5) - def test_context_initialization_with_empty_rules_dir(self): - """Test context initialization with empty .rules directory.""" - # Create a mock agent instance - mock_agent = MagicMock() - mock_agent.generate.return_value = "Mock response" - - with patch('os.path.isdir', return_value=True), \ - patch('os.listdir', return_value=[]), \ - patch('cli_code.main.GeminiModel', return_value=mock_agent): - - # Mock console.input for exit - self.mock_console.input.side_effect = ["/exit"] - - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Check context initialization message - self.mock_console.print.assert_any_call( - "[dim]Context will be initialized from directory listing (ls) - .rules directory exists but contains no .md files.[/dim]") - - @pytest.mark.timeout(5) - def test_context_initialization_with_readme(self): - """Test context initialization with README.md.""" - # Create a mock agent instance - mock_agent = MagicMock() - mock_agent.generate.return_value = "Mock response" - - with patch('os.path.isdir', return_value=False), \ - patch('os.path.isfile', return_value=True), \ - patch('cli_code.main.GeminiModel', return_value=mock_agent), \ - patch('builtins.open', mock_open(read_data="Mock README content")): - - # Mock console.input for exit - self.mock_console.input.side_effect = ["/exit"] - - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Check context initialization message - self.mock_console.print.assert_any_call( - "[dim]Context will be initialized from README.md.[/dim]") - - @pytest.mark.timeout(5) - def test_interactive_session_interactions(self): - """Test interactive session user interactions.""" - # Mock the model agent - mock_agent = MagicMock() - # Ensure response is a string to avoid Markdown parsing issues - mock_agent.generate.side_effect = [ - "Response 1", # Regular response - "", # Response to command (empty string instead of None) - "", # Empty response (empty string instead of None) - "Response 4" # Final response - ] - - # Patch GeminiModel to return our mock agent - with patch('cli_code.main.GeminiModel', return_value=mock_agent): - # Mock console.input to simulate user interactions - self.mock_console.input.side_effect = [ - "Hello", # Regular input - "/custom", # Unknown command - "Empty input", # Will get empty response - "/exit" # Exit command - ] - - # Patch Markdown specifically for this test to avoid type errors - with patch('cli_code.main.Markdown', return_value=MagicMock()): - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Verify interactions - assert mock_agent.generate.call_count == 3 # Should be called for all inputs except /exit - self.mock_console.print.assert_any_call("[yellow]Unknown command:[/yellow] /custom") - self.mock_console.print.assert_any_call("[red]Received an empty response from the model.[/red]") - - @pytest.mark.timeout(5) - def test_show_help_command(self): - """Test the /help command in interactive session.""" - # Create a mock agent instance - mock_agent = MagicMock() - mock_agent.generate.return_value = "Mock response" - - # Set up mocks - with patch('cli_code.main.AVAILABLE_TOOLS', {"tool1": None, "tool2": None}): - # Mock console.input to simulate user interactions - self.mock_console.input.side_effect = [ - "/help", # Help command - "/exit" # Exit command - ] - - # Patch start_interactive_session to avoid creating a real model - with patch('cli_code.main.GeminiModel', return_value=mock_agent): - # Call with actual show_help - with patch('cli_code.main.show_help') as mock_show_help: - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Verify show_help was called - mock_show_help.assert_called_once_with("gemini") - - -if __name__ == "__main__" and not SHOULD_SKIP_TESTS: - pytest.main(["-xvs", __file__]) \ No newline at end of file diff --git a/test_dir/test_model_basic.py b/test_dir/test_model_basic.py deleted file mode 100644 index 96713ee..0000000 --- a/test_dir/test_model_basic.py +++ /dev/null @@ -1,368 +0,0 @@ -""" -Tests for basic model functionality that doesn't require API access. -These tests focus on increasing coverage for the model classes. -""" - -from unittest import TestCase, skipIf, mock -from unittest.mock import MagicMock, patch -import os -import sys -import json - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Import necessary modules safely with better error handling -IMPORTS_AVAILABLE = False -IMPORT_ERROR = None - -try: - # Set up mocks for external dependencies before importing model classes - if 'google' not in sys.modules: - mock_google = MagicMock() - mock_google.generativeai = MagicMock() - sys.modules['google'] = mock_google - sys.modules['google.generativeai'] = mock_google.generativeai - - # Mock requests before importing - if 'requests' not in sys.modules: - mock_requests = MagicMock() - sys.modules['requests'] = mock_requests - - # Now try to import the model classes - from cli_code.models.base import AbstractModelAgent - from cli_code.models.gemini import GeminiModelAgent - from cli_code.models.ollama import OllamaModelAgent - IMPORTS_AVAILABLE = True -except ImportError as e: - IMPORT_ERROR = str(e) - # Create dummy classes for type checking - class AbstractModelAgent: pass - class GeminiModelAgent: pass - class OllamaModelAgent: pass - -# Check if we should skip all tests - only skip if imports truly failed -# But in CI, we can still run tests with mocked modules -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = f"Required model imports not available: {IMPORT_ERROR}" if IMPORT_ERROR else "Required model imports not available" - -@skipIf(SHOULD_SKIP_TESTS, SKIP_REASON) -class TestGeminiModelBasics(TestCase): - """Test basic GeminiModelAgent functionality that doesn't require API calls.""" - - def setUp(self): - """Set up test environment.""" - # Create patches for external dependencies - self.patch_configure = patch('google.generativeai.configure') - self.patch_get_model = patch('google.generativeai.get_model') - - # Start patches - self.mock_configure = self.patch_configure.start() - self.mock_get_model = self.patch_get_model.start() - - # Set up default mock model - self.mock_model = MagicMock() - self.mock_get_model.return_value = self.mock_model - - def tearDown(self): - """Clean up test environment.""" - # Stop patches - self.patch_configure.stop() - self.patch_get_model.stop() - - def test_gemini_init(self): - """Test initialization of GeminiModelAgent.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Verify API key was passed to configure - self.mock_configure.assert_called_once_with(api_key="fake-api-key") - - # Check agent properties - self.assertEqual(agent.model_name, "gemini-pro") - self.assertEqual(agent.api_key, "fake-api-key") - self.assertEqual(agent.history, []) - - def test_gemini_clear_history(self): - """Test history clearing functionality.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Add some fake history - agent.history = [{"role": "user", "parts": ["test message"]}] - - # Clear history - agent.clear_history() - - # Verify history is cleared - self.assertEqual(agent.history, []) - - def test_gemini_add_system_prompt(self): - """Test adding system prompt to history.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Add system prompt - agent.add_system_prompt("I am a helpful AI assistant") - - # Verify system prompt was added to history - self.assertEqual(len(agent.history), 1) - self.assertEqual(agent.history[0]["role"], "model") - self.assertEqual(agent.history[0]["parts"][0]["text"], "I am a helpful AI assistant") - - def test_gemini_append_history(self): - """Test appending to history.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Append to history - agent.append_to_history(role="user", content="Hello") - agent.append_to_history(role="model", content="Hi there!") - - # Verify history entries - self.assertEqual(len(agent.history), 2) - self.assertEqual(agent.history[0]["role"], "user") - self.assertEqual(agent.history[0]["parts"][0]["text"], "Hello") - self.assertEqual(agent.history[1]["role"], "model") - self.assertEqual(agent.history[1]["parts"][0]["text"], "Hi there!") - - def test_gemini_chat_generation_parameters(self): - """Test chat generation parameters are properly set.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Setup the mock model's generate_content to return a valid response - mock_response = MagicMock() - mock_content = MagicMock() - mock_content.text = "Generated response" - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content = mock_content - self.mock_model.generate_content.return_value = mock_response - - # Add some history before chat - agent.add_system_prompt("System prompt") - agent.append_to_history(role="user", content="Hello") - - # Call chat method with custom parameters - response = agent.chat("What can you help me with?", temperature=0.2, max_tokens=1000) - - # Verify the model was called with correct parameters - self.mock_model.generate_content.assert_called_once() - args, kwargs = self.mock_model.generate_content.call_args - - # Check that history was included - self.assertEqual(len(args[0]), 3) # System prompt + user message + new query - - # Check generation parameters - self.assertIn('generation_config', kwargs) - - # Check response handling - self.assertEqual(response, "Generated response") - - def test_gemini_parse_response(self): - """Test parsing different response formats from the Gemini API.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Mock normal response - normal_response = MagicMock() - normal_content = MagicMock() - normal_content.text = "Normal response" - normal_response.candidates = [MagicMock()] - normal_response.candidates[0].content = normal_content - - # Mock empty response - empty_response = MagicMock() - empty_response.candidates = [] - - # Mock response with finish reason not STOP - blocked_response = MagicMock() - blocked_response.candidates = [MagicMock()] - blocked_candidate = blocked_response.candidates[0] - blocked_candidate.content.text = "Blocked content" - blocked_candidate.finish_reason = MagicMock() - blocked_candidate.finish_reason.name = "SAFETY" - - # Test normal response parsing - result = agent._parse_response(normal_response) - self.assertEqual(result, "Normal response") - - # Test empty response parsing - result = agent._parse_response(empty_response) - self.assertEqual(result, "No response generated. Please try again.") - - # Test blocked response parsing - result = agent._parse_response(blocked_response) - self.assertEqual(result, "The response was blocked due to: SAFETY") - - def test_gemini_content_handling(self): - """Test content handling for different input types.""" - agent = GeminiModelAgent("fake-api-key", "gemini-pro") - - # Test string content - parts = agent._prepare_content("Hello world") - self.assertEqual(len(parts), 1) - self.assertEqual(parts[0]["text"], "Hello world") - - # Test list content - parts = agent._prepare_content(["Hello", "world"]) - self.assertEqual(len(parts), 2) - self.assertEqual(parts[0]["text"], "Hello") - self.assertEqual(parts[1]["text"], "world") - - # Test already formatted content - parts = agent._prepare_content([{"text": "Already formatted"}]) - self.assertEqual(len(parts), 1) - self.assertEqual(parts[0]["text"], "Already formatted") - - # Test empty content - parts = agent._prepare_content("") - self.assertEqual(len(parts), 1) - self.assertEqual(parts[0]["text"], "") - - -@skipIf(SHOULD_SKIP_TESTS, SKIP_REASON) -class TestOllamaModelBasics(TestCase): - """Test basic OllamaModelAgent functionality that doesn't require API calls.""" - - def setUp(self): - """Set up test environment.""" - # Create patches for external dependencies - self.patch_requests_post = patch('requests.post') - - # Start patches - self.mock_post = self.patch_requests_post.start() - - # Setup default response - mock_response = MagicMock() - mock_response.json.return_value = {"message": {"content": "Response from model"}} - self.mock_post.return_value = mock_response - - def tearDown(self): - """Clean up test environment.""" - # Stop patches - self.patch_requests_post.stop() - - def test_ollama_init(self): - """Test initialization of OllamaModelAgent.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Check agent properties - self.assertEqual(agent.model_name, "llama2") - self.assertEqual(agent.api_url, "http://localhost:11434") - self.assertEqual(agent.history, []) - - def test_ollama_clear_history(self): - """Test history clearing functionality.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Add some fake history - agent.history = [{"role": "user", "content": "test message"}] - - # Clear history - agent.clear_history() - - # Verify history is cleared - self.assertEqual(agent.history, []) - - def test_ollama_add_system_prompt(self): - """Test adding system prompt to history.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Add system prompt - agent.add_system_prompt("I am a helpful AI assistant") - - # Verify system prompt was added to history - self.assertEqual(len(agent.history), 1) - self.assertEqual(agent.history[0]["role"], "system") - self.assertEqual(agent.history[0]["content"], "I am a helpful AI assistant") - - def test_ollama_append_history(self): - """Test appending to history.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Append to history - agent.append_to_history(role="user", content="Hello") - agent.append_to_history(role="assistant", content="Hi there!") - - # Verify history entries - self.assertEqual(len(agent.history), 2) - self.assertEqual(agent.history[0]["role"], "user") - self.assertEqual(agent.history[0]["content"], "Hello") - self.assertEqual(agent.history[1]["role"], "assistant") - self.assertEqual(agent.history[1]["content"], "Hi there!") - - def test_ollama_prepare_chat_params(self): - """Test preparing parameters for chat request.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Add history entries - agent.add_system_prompt("System instructions") - agent.append_to_history(role="user", content="Hello") - - # Prepare chat params and verify structure - params = agent._prepare_chat_params() - - self.assertEqual(params["model"], "llama2") - self.assertEqual(len(params["messages"]), 2) - self.assertEqual(params["messages"][0]["role"], "system") - self.assertEqual(params["messages"][0]["content"], "System instructions") - self.assertEqual(params["messages"][1]["role"], "user") - self.assertEqual(params["messages"][1]["content"], "Hello") - - def test_ollama_chat_with_parameters(self): - """Test chat method with various parameters.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Add a system prompt - agent.add_system_prompt("Be helpful") - - # Call chat with different parameters - result = agent.chat("Hello", temperature=0.3, max_tokens=2000) - - # Verify the post request was called with correct parameters - self.mock_post.assert_called_once() - args, kwargs = self.mock_post.call_args - - # Check URL - self.assertEqual(args[0], "http://localhost:11434/api/chat") - - # Check JSON payload - json_data = kwargs.get('json', {}) - self.assertEqual(json_data["model"], "llama2") - self.assertEqual(len(json_data["messages"]), 3) # System + history + new message - self.assertEqual(json_data["temperature"], 0.3) - self.assertEqual(json_data["max_tokens"], 2000) - - # Verify the response was correctly processed - self.assertEqual(result, "Response from model") - - def test_ollama_error_handling(self): - """Test handling of various error cases.""" - agent = OllamaModelAgent("http://localhost:11434", "llama2") - - # Test connection error - self.mock_post.side_effect = Exception("Connection failed") - result = agent.chat("Hello") - self.assertTrue("Error communicating with Ollama API" in result) - - # Test bad response - self.mock_post.side_effect = None - mock_response = MagicMock() - mock_response.json.return_value = {"error": "Model not found"} - self.mock_post.return_value = mock_response - result = agent.chat("Hello") - self.assertTrue("Error" in result) - - # Test missing content in response - mock_response.json.return_value = {"message": {}} # Missing content - result = agent.chat("Hello") - self.assertTrue("Unexpected response format" in result) - - def test_ollama_url_handling(self): - """Test handling of different URL formats.""" - # Test with trailing slash - agent = OllamaModelAgent("http://localhost:11434/", "llama2") - self.assertEqual(agent.api_url, "http://localhost:11434") - - # Test without protocol - agent = OllamaModelAgent("localhost:11434", "llama2") - self.assertEqual(agent.api_url, "http://localhost:11434") - - # Test with https - agent = OllamaModelAgent("https://ollama.example.com", "llama2") - self.assertEqual(agent.api_url, "https://ollama.example.com") \ No newline at end of file diff --git a/test_dir/test_model_error_handling_additional.py b/test_dir/test_model_error_handling_additional.py deleted file mode 100644 index c34c481..0000000 --- a/test_dir/test_model_error_handling_additional.py +++ /dev/null @@ -1,400 +0,0 @@ -""" -Additional comprehensive error handling tests for Ollama and Gemini models. -""" -import pytest -import json -from unittest.mock import MagicMock, patch, call -import sys -import os -from pathlib import Path - -# Ensure src is in the path for imports -src_path = str(Path(__file__).parent.parent / "src") -if src_path not in sys.path: - sys.path.insert(0, src_path) - -from cli_code.models.ollama import OllamaModel, MAX_OLLAMA_ITERATIONS -from cli_code.models.gemini import GeminiModel -from cli_code.tools.base import BaseTool - - -class TestModelContextHandling: - """Tests for context window handling in both model classes.""" - - @pytest.fixture - def mock_console(self): - console = MagicMock() - console.print = MagicMock() - console.status = MagicMock() - # Make status return a context manager - status_cm = MagicMock() - console.status.return_value = status_cm - status_cm.__enter__ = MagicMock(return_value=None) - status_cm.__exit__ = MagicMock(return_value=None) - return console - - @pytest.fixture - def mock_ollama_client(self): - client = MagicMock() - client.chat.completions.create = MagicMock() - client.models.list = MagicMock() - return client - - @pytest.fixture - def mock_genai(self): - with patch("cli_code.models.gemini.genai") as mock: - yield mock - - @patch('cli_code.models.ollama.count_tokens') - def test_ollama_manage_context_trimming(self, mock_count_tokens, mock_console, mock_ollama_client): - """Test Ollama model context window management when history exceeds token limit.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_ollama_client - - # Mock the token counting to return a large value - mock_count_tokens.return_value = 9000 # Higher than OLLAMA_MAX_CONTEXT_TOKENS (8000) - - # Add a few messages to history - model.history = [ - {"role": "system", "content": "System prompt"}, - {"role": "user", "content": "User message 1"}, - {"role": "assistant", "content": "Assistant response 1"}, - {"role": "user", "content": "User message 2"}, - {"role": "assistant", "content": "Assistant response 2"}, - ] - - # Execute - original_length = len(model.history) - model._manage_ollama_context() - - # Assert - # Should have removed some messages but kept system prompt - assert len(model.history) < original_length - assert model.history[0]["role"] == "system" # System prompt should be preserved - - @patch('cli_code.models.gemini.genai') - def test_gemini_manage_context_window(self, mock_genai, mock_console): - """Test Gemini model context window management.""" - # Setup - # Mock generative model for initialization - mock_instance = MagicMock() - mock_genai.GenerativeModel.return_value = mock_instance - - # Create the model - model = GeminiModel(api_key="fake_api_key", console=mock_console) - - # Create a large history - need more than (MAX_HISTORY_TURNS * 3 + 2) items - # MAX_HISTORY_TURNS is 20, so we need > 62 items - model.history = [] - for i in range(22): # This will generate 66 items (3 per "round") - model.history.append({"role": "user", "parts": [f"User message {i}"]}) - model.history.append({"role": "model", "parts": [f"Model response {i}"]}) - model.history.append({"role": "model", "parts": [{"function_call": {"name": "test"}, "text": None}]}) - - # Execute - original_length = len(model.history) - assert original_length > 62 # Verify we're over the limit - model._manage_context_window() - - # Assert - assert len(model.history) < original_length - assert len(model.history) <= (20 * 3 + 2) # MAX_HISTORY_TURNS * 3 + 2 - - def test_ollama_history_handling(self, mock_console): - """Test Ollama add_to_history and clear_history methods.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model._manage_ollama_context = MagicMock() # Mock to avoid side effects - - # Test clear_history - model.history = [{"role": "system", "content": "System prompt"}] - model.clear_history() - assert len(model.history) == 1 # Should keep system prompt - assert model.history[0]["role"] == "system" - - # Test adding system message - model.history = [] - model.add_to_history({"role": "system", "content": "New system prompt"}) - assert len(model.history) == 1 - assert model.history[0]["role"] == "system" - - # Test adding user message - model.add_to_history({"role": "user", "content": "User message"}) - assert len(model.history) == 2 - assert model.history[1]["role"] == "user" - - # Test adding assistant message - model.add_to_history({"role": "assistant", "content": "Assistant response"}) - assert len(model.history) == 3 - assert model.history[2]["role"] == "assistant" - - # Test adding with custom role - implementation accepts any role - model.add_to_history({"role": "custom", "content": "Custom message"}) - assert len(model.history) == 4 - assert model.history[3]["role"] == "custom" - - -class TestModelConfiguration: - """Tests for model configuration and initialization.""" - - @pytest.fixture - def mock_console(self): - console = MagicMock() - console.print = MagicMock() - return console - - @patch('cli_code.models.gemini.genai') - def test_gemini_initialization_with_env_variable(self, mock_genai, mock_console): - """Test Gemini initialization with API key from environment variable.""" - # Setup - # Mock generative model for initialization - mock_instance = MagicMock() - mock_genai.GenerativeModel.return_value = mock_instance - - # Mock os.environ - with patch.dict('os.environ', {'GEMINI_API_KEY': 'dummy_key_from_env'}): - # Execute - model = GeminiModel(api_key="dummy_key_from_env", console=mock_console) - - # Assert - assert model.api_key == "dummy_key_from_env" - mock_genai.configure.assert_called_once_with(api_key="dummy_key_from_env") - - def test_ollama_initialization_with_invalid_url(self, mock_console): - """Test Ollama initialization with invalid URL.""" - # Shouldn't raise an error immediately, but should fail on first API call - model = OllamaModel("http://invalid:1234", mock_console, "llama3") - - # Should have a client despite invalid URL - assert model.client is not None - - # Mock the client's methods to raise exceptions - model.client.chat.completions.create = MagicMock(side_effect=Exception("Connection failed")) - model.client.models.list = MagicMock(side_effect=Exception("Connection failed")) - - # Execute API call and verify error handling - result = model.generate("test prompt") - assert "error" in result.lower() - - # Execute list_models and verify error handling - result = model.list_models() - assert result is None - - @patch('cli_code.models.gemini.genai') - def test_gemini_model_selection(self, mock_genai, mock_console): - """Test Gemini model selection and fallback behavior.""" - # Setup - mock_instance = MagicMock() - # Make first initialization fail, simulating unavailable model - mock_genai.GenerativeModel.side_effect = [ - Exception("Model not available"), # First call fails - MagicMock() # Second call succeeds with fallback model - ] - - with pytest.raises(Exception) as excinfo: - # Execute - should raise exception when primary model fails - GeminiModel(api_key="fake_api_key", console=mock_console, model_name="unavailable-model") - - assert "Could not initialize Gemini model" in str(excinfo.value) - - -class TestToolManagement: - """Tests for tool management in both models.""" - - @pytest.fixture - def mock_console(self): - console = MagicMock() - console.print = MagicMock() - return console - - @pytest.fixture - def mock_ollama_client(self): - client = MagicMock() - client.chat.completions.create = MagicMock() - return client - - @pytest.fixture - def mock_test_tool(self): - tool = MagicMock(spec=BaseTool) - tool.name = "test_tool" - tool.description = "A test tool" - tool.required_args = ["arg1"] - tool.get_function_declaration = MagicMock(return_value=MagicMock()) - tool.execute = MagicMock(return_value="Tool executed") - return tool - - @patch('cli_code.models.ollama.get_tool') - def test_ollama_tool_handling_with_missing_args(self, mock_get_tool, mock_console, mock_ollama_client, mock_test_tool): - """Test Ollama handling of tool calls with missing required arguments.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_ollama_client - model.add_to_history = MagicMock() # Mock history method - - # Make get_tool return our mock tool - mock_get_tool.return_value = mock_test_tool - - # Create mock response with a tool call missing required args - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [ - MagicMock( - function=MagicMock( - name="test_tool", - arguments='{}' # Missing required arg1 - ), - id="test_id" - ) - ] - - mock_response = MagicMock() - mock_response.choices = [MagicMock( - message=mock_message, - finish_reason="tool_calls" - )] - - mock_ollama_client.chat.completions.create.return_value = mock_response - - # Execute - result = model.generate("Use test_tool") - - # Assert - the model reaches max iterations in this case - assert "maximum iterations" in result.lower() or "max iterations" in result.lower() - # The tool gets executed despite missing args in the implementation - - @patch('cli_code.models.gemini.genai') - @patch('cli_code.models.gemini.get_tool') - def test_gemini_function_call_in_stream(self, mock_get_tool, mock_genai, mock_console, mock_test_tool): - """Test Gemini handling of function call in streaming response.""" - # Setup - # Mock generative model for initialization - mock_model = MagicMock() - mock_genai.GenerativeModel.return_value = mock_model - - # Create the model - model = GeminiModel(api_key="fake_api_key", console=mock_console) - - # Mock get_tool to return our test tool - mock_get_tool.return_value = mock_test_tool - - # Mock the streaming response - mock_response = MagicMock() - - # Create a mock function call in the response - mock_parts = [MagicMock()] - mock_parts[0].text = None - mock_parts[0].function_call = MagicMock() - mock_parts[0].function_call.name = "test_tool" - mock_parts[0].function_call.args = {"arg1": "value1"} # Include required arg - - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content.parts = mock_parts - - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("Use test_tool") - - # Assert - assert mock_test_tool.execute.called # Tool should be executed - # Test reaches max iterations in current implementation - assert "max iterations" in result.lower() - - -class TestModelEdgeCases: - """Tests for edge cases in both model implementations.""" - - @pytest.fixture - def mock_console(self): - console = MagicMock() - console.print = MagicMock() - return console - - @pytest.fixture - def mock_ollama_client(self): - client = MagicMock() - client.chat.completions.create = MagicMock() - return client - - @patch('cli_code.models.ollama.MessageToDict') - def test_ollama_protobuf_conversion_failure(self, mock_message_to_dict, mock_console, mock_ollama_client): - """Test Ollama handling of protobuf conversion failures.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_ollama_client - - # We'll mock _prepare_openai_tools instead of patching json.dumps globally - model._prepare_openai_tools = MagicMock(return_value=None) - - # Make MessageToDict raise an exception - mock_message_to_dict.side_effect = Exception("Protobuf conversion failed") - - # Mock the response with a tool call - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [ - MagicMock( - function=MagicMock( - name="test_tool", - arguments='{}' - ), - id="test_id" - ) - ] - - mock_response = MagicMock() - mock_response.choices = [MagicMock( - message=mock_message, - finish_reason="tool_calls" - )] - - mock_ollama_client.chat.completions.create.return_value = mock_response - - # Execute - result = model.generate("Use test_tool") - - # Assert - the model reaches maximum iterations - assert "maximum iterations" in result.lower() - - @patch('cli_code.models.gemini.genai') - def test_gemini_empty_response_parts(self, mock_genai, mock_console): - """Test Gemini handling of empty response parts.""" - # Setup - # Mock generative model for initialization - mock_model = MagicMock() - mock_genai.GenerativeModel.return_value = mock_model - - # Create the model - model = GeminiModel(api_key="fake_api_key", console=mock_console) - - # Mock a response with empty parts - mock_response = MagicMock() - mock_response.candidates = [MagicMock()] - mock_response.candidates[0].content.parts = [] # Empty parts - - mock_model.generate_content.return_value = mock_response - - # Execute - result = model.generate("Test prompt") - - # Assert - assert "no content" in result.lower() or "content/parts" in result.lower() - - def test_ollama_with_empty_system_prompt(self, mock_console): - """Test Ollama with an empty system prompt.""" - # Setup - initialize with normal system prompt - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - - # Replace system prompt with empty string - model.system_prompt = "" - model.history = [{"role": "system", "content": ""}] - - # Verify it doesn't cause errors in initialization or history management - model._manage_ollama_context() - assert len(model.history) == 1 - assert model.history[0]["content"] == "" - - -if __name__ == "__main__": - pytest.main(["-xvs", __file__]) \ No newline at end of file diff --git a/test_dir/test_model_integration.py b/test_dir/test_model_integration.py deleted file mode 100644 index 0b87731..0000000 --- a/test_dir/test_model_integration.py +++ /dev/null @@ -1,358 +0,0 @@ -""" -Tests for model integration aspects of the cli-code application. -This file focuses on testing the integration between the CLI and different model providers. -""" - -import os -import sys -import unittest -from unittest.mock import patch, MagicMock, mock_open, call -import tempfile -from pathlib import Path - -# Ensure we can import the module -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.dirname(current_dir) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - -# Handle missing dependencies gracefully -try: - import pytest - from click.testing import CliRunner - from cli_code.main import cli, start_interactive_session - from cli_code.models.base import AbstractModelAgent - IMPORTS_AVAILABLE = True -except ImportError: - # Create dummy fixtures and mocks if imports aren't available - IMPORTS_AVAILABLE = False - pytest = MagicMock() - pytest.mark.timeout = lambda seconds: lambda f: f - - class DummyCliRunner: - def invoke(self, *args, **kwargs): - class Result: - exit_code = 0 - output = "" - return Result() - - CliRunner = DummyCliRunner - cli = MagicMock() - start_interactive_session = MagicMock() - AbstractModelAgent = MagicMock() - -# Determine if we're running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE or IN_CI - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestGeminiModelIntegration: - """Test integration with Gemini models.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_default_model.return_value = "gemini-pro" - self.mock_config.get_credential.return_value = "fake-api-key" - - # Patch the GeminiModel class - self.gemini_patcher = patch('cli_code.main.GeminiModel') - self.mock_gemini_model_class = self.gemini_patcher.start() - self.mock_gemini_instance = MagicMock() - self.mock_gemini_model_class.return_value = self.mock_gemini_instance - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - self.gemini_patcher.stop() - - @pytest.mark.timeout(5) - def test_gemini_model_initialization(self): - """Test initialization of Gemini model.""" - result = self.runner.invoke(cli, []) - assert result.exit_code == 0 - - # Verify model was initialized with correct parameters - self.mock_gemini_model_class.assert_called_once_with( - api_key="fake-api-key", - console=self.mock_console, - model_name="gemini-pro" - ) - - @pytest.mark.timeout(5) - def test_gemini_model_custom_model_name(self): - """Test using a custom Gemini model name.""" - result = self.runner.invoke(cli, ['--model', 'gemini-2.5-pro-exp-03-25']) - assert result.exit_code == 0 - - # Verify model was initialized with custom model name - self.mock_gemini_model_class.assert_called_once_with( - api_key="fake-api-key", - console=self.mock_console, - model_name="gemini-2.5-pro-exp-03-25" - ) - - @pytest.mark.timeout(5) - def test_gemini_model_tools_initialization(self): - """Test that tools are properly initialized for Gemini model.""" - # Need to mock the tools setup - with patch('cli_code.main.AVAILABLE_TOOLS') as mock_tools: - mock_tools.return_value = ['tool1', 'tool2'] - - result = self.runner.invoke(cli, []) - assert result.exit_code == 0 - - # Verify inject_tools was called on the model instance - self.mock_gemini_instance.inject_tools.assert_called_once() - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestOllamaModelIntegration: - """Test integration with Ollama models.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "ollama" - self.mock_config.get_default_model.return_value = "llama2" - self.mock_config.get_credential.return_value = "http://localhost:11434" - - # Patch the OllamaModel class - self.ollama_patcher = patch('cli_code.main.OllamaModel') - self.mock_ollama_model_class = self.ollama_patcher.start() - self.mock_ollama_instance = MagicMock() - self.mock_ollama_model_class.return_value = self.mock_ollama_instance - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - self.ollama_patcher.stop() - - @pytest.mark.timeout(5) - def test_ollama_model_initialization(self): - """Test initialization of Ollama model.""" - result = self.runner.invoke(cli, []) - assert result.exit_code == 0 - - # Verify model was initialized with correct parameters - self.mock_ollama_model_class.assert_called_once_with( - api_url="http://localhost:11434", - console=self.mock_console, - model_name="llama2" - ) - - @pytest.mark.timeout(5) - def test_ollama_model_custom_model_name(self): - """Test using a custom Ollama model name.""" - result = self.runner.invoke(cli, ['--model', 'mistral']) - assert result.exit_code == 0 - - # Verify model was initialized with custom model name - self.mock_ollama_model_class.assert_called_once_with( - api_url="http://localhost:11434", - console=self.mock_console, - model_name="mistral" - ) - - @pytest.mark.timeout(5) - def test_ollama_model_tools_initialization(self): - """Test that tools are properly initialized for Ollama model.""" - # Need to mock the tools setup - with patch('cli_code.main.AVAILABLE_TOOLS') as mock_tools: - mock_tools.return_value = ['tool1', 'tool2'] - - result = self.runner.invoke(cli, []) - assert result.exit_code == 0 - - # Verify inject_tools was called on the model instance - self.mock_ollama_instance.inject_tools.assert_called_once() - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestProviderSwitching: - """Test switching between different model providers.""" - - def setup_method(self): - """Set up test fixtures.""" - self.runner = CliRunner() - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - # Set default behavior for mocks - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_default_model.side_effect = lambda provider=None: { - "gemini": "gemini-pro", - "ollama": "llama2", - None: "gemini-pro" # Default to gemini model - }.get(provider) - self.mock_config.get_credential.side_effect = lambda provider: { - "gemini": "fake-api-key", - "ollama": "http://localhost:11434" - }.get(provider) - - # Patch the model classes - self.gemini_patcher = patch('cli_code.main.GeminiModel') - self.mock_gemini_model_class = self.gemini_patcher.start() - self.mock_gemini_instance = MagicMock() - self.mock_gemini_model_class.return_value = self.mock_gemini_instance - - self.ollama_patcher = patch('cli_code.main.OllamaModel') - self.mock_ollama_model_class = self.ollama_patcher.start() - self.mock_ollama_instance = MagicMock() - self.mock_ollama_model_class.return_value = self.mock_ollama_instance - - def teardown_method(self): - """Teardown test fixtures.""" - self.config_patcher.stop() - self.console_patcher.stop() - self.gemini_patcher.stop() - self.ollama_patcher.stop() - - @pytest.mark.timeout(5) - def test_switch_provider_via_cli_option(self): - """Test switching provider via CLI option.""" - # Default should be gemini - result = self.runner.invoke(cli, []) - assert result.exit_code == 0 - self.mock_gemini_model_class.assert_called_once() - self.mock_ollama_model_class.assert_not_called() - - # Reset mock call counts - self.mock_gemini_model_class.reset_mock() - self.mock_ollama_model_class.reset_mock() - - # Switch to ollama via CLI option - result = self.runner.invoke(cli, ['--provider', 'ollama']) - assert result.exit_code == 0 - self.mock_gemini_model_class.assert_not_called() - self.mock_ollama_model_class.assert_called_once() - - @pytest.mark.timeout(5) - def test_set_default_provider_command(self): - """Test set-default-provider command.""" - # Test setting gemini as default - result = self.runner.invoke(cli, ['set-default-provider', 'gemini']) - assert result.exit_code == 0 - self.mock_config.set_default_provider.assert_called_once_with('gemini') - - # Reset mock - self.mock_config.set_default_provider.reset_mock() - - # Test setting ollama as default - result = self.runner.invoke(cli, ['set-default-provider', 'ollama']) - assert result.exit_code == 0 - self.mock_config.set_default_provider.assert_called_once_with('ollama') - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason="Required imports not available or running in CI") -class TestToolIntegration: - """Test integration of tools with models.""" - - def setup_method(self): - """Set up test fixtures.""" - self.console_patcher = patch('cli_code.main.console') - self.mock_console = self.console_patcher.start() - - self.config_patcher = patch('cli_code.main.config') - self.mock_config = self.config_patcher.start() - self.mock_config.get_default_provider.return_value = "gemini" - self.mock_config.get_default_model.return_value = "gemini-pro" - self.mock_config.get_credential.return_value = "fake-api-key" - - # Patch the model class - self.gemini_patcher = patch('cli_code.main.GeminiModel') - self.mock_gemini_model_class = self.gemini_patcher.start() - self.mock_gemini_instance = MagicMock() - self.mock_gemini_model_class.return_value = self.mock_gemini_instance - - # Create mock tools - self.tool1 = MagicMock() - self.tool1.name = "tool1" - self.tool1.function_name = "tool1_func" - self.tool1.description = "Tool 1 description" - - self.tool2 = MagicMock() - self.tool2.name = "tool2" - self.tool2.function_name = "tool2_func" - self.tool2.description = "Tool 2 description" - - # Patch AVAILABLE_TOOLS - self.tools_patcher = patch('cli_code.main.AVAILABLE_TOOLS', return_value=[self.tool1, self.tool2]) - self.mock_tools = self.tools_patcher.start() - - # Patch input for interactive session - self.input_patcher = patch('builtins.input') - self.mock_input = self.input_patcher.start() - self.mock_input.return_value = "exit" # Always exit to end the session - - def teardown_method(self): - """Teardown test fixtures.""" - self.console_patcher.stop() - self.config_patcher.stop() - self.gemini_patcher.stop() - self.tools_patcher.stop() - self.input_patcher.stop() - - @pytest.mark.timeout(5) - def test_tools_injected_to_model(self): - """Test that tools are injected into the model.""" - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Verify model was created with correct parameters - self.mock_gemini_model_class.assert_called_once_with( - api_key="fake-api-key", - console=self.mock_console, - model_name="gemini-pro" - ) - - # Verify tools were injected - self.mock_gemini_instance.inject_tools.assert_called_once() - - # Get the tools that were injected - tools_injected = self.mock_gemini_instance.inject_tools.call_args[0][0] - - # Verify both tools are in the injected list - tool_names = [tool.name for tool in tools_injected] - assert "tool1" in tool_names - assert "tool2" in tool_names - - @pytest.mark.timeout(5) - def test_tool_invocation(self): - """Test tool invocation in the model.""" - # Setup model to return prompt that appears to use a tool - self.mock_gemini_instance.ask.return_value = "I'll use tool1 to help you with that." - - start_interactive_session( - provider="gemini", - model_name="gemini-pro", - console=self.mock_console - ) - - # Verify ask was called (would trigger tool invocation if implemented) - self.mock_gemini_instance.ask.assert_called_once() - - -if __name__ == "__main__": - unittest.main() \ No newline at end of file diff --git a/test_dir/test_models_base.py b/test_dir/test_models_base.py deleted file mode 100644 index c836430..0000000 --- a/test_dir/test_models_base.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Tests for the AbstractModelAgent base class. -""" -import pytest -from unittest.mock import MagicMock - -# Direct import for coverage tracking -import src.cli_code.models.base -from src.cli_code.models.base import AbstractModelAgent - - -class TestModelImplementation(AbstractModelAgent): - """A concrete implementation of AbstractModelAgent for testing.""" - - def generate(self, prompt): - """Test implementation of the generate method.""" - return f"Response to: {prompt}" - - def list_models(self): - """Test implementation of the list_models method.""" - return [{"name": "test-model", "displayName": "Test Model"}] - - -def test_abstract_model_init(): - """Test initialization of a concrete model implementation.""" - console = MagicMock() - model = TestModelImplementation(console=console, model_name="test-model") - - assert model.console == console - assert model.model_name == "test-model" - - -def test_generate_method(): - """Test the generate method of the concrete implementation.""" - model = TestModelImplementation(console=MagicMock(), model_name="test-model") - response = model.generate("Hello") - - assert response == "Response to: Hello" - - -def test_list_models_method(): - """Test the list_models method of the concrete implementation.""" - model = TestModelImplementation(console=MagicMock(), model_name="test-model") - models = model.list_models() - - assert len(models) == 1 - assert models[0]["name"] == "test-model" - assert models[0]["displayName"] == "Test Model" - - -def test_abstract_class_methods(): - """Test that AbstractModelAgent cannot be instantiated directly.""" - with pytest.raises(TypeError): - AbstractModelAgent(console=MagicMock(), model_name="test-model") \ No newline at end of file diff --git a/test_dir/test_ollama_model.py b/test_dir/test_ollama_model.py deleted file mode 100644 index 2587b45..0000000 --- a/test_dir/test_ollama_model.py +++ /dev/null @@ -1,288 +0,0 @@ -""" -Tests specifically for the OllamaModel class to improve code coverage. -""" - -import os -import json -import sys -import unittest -from unittest.mock import patch, MagicMock, mock_open, call -import pytest - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Handle imports -try: - from cli_code.models.ollama import OllamaModel - from rich.console import Console - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - OllamaModel = MagicMock - Console = MagicMock - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestOllamaModel: - """Test suite for OllamaModel class, focusing on previously uncovered methods.""" - - def setup_method(self): - """Set up test fixtures.""" - # Mock OpenAI module before initialization - self.openai_patch = patch('cli_code.models.ollama.OpenAI') - self.mock_openai = self.openai_patch.start() - - # Mock the OpenAI client instance - self.mock_client = MagicMock() - self.mock_openai.return_value = self.mock_client - - # Mock console - self.mock_console = MagicMock(spec=Console) - - # Mock os.path.isdir and os.path.isfile - self.isdir_patch = patch('os.path.isdir') - self.isfile_patch = patch('os.path.isfile') - self.mock_isdir = self.isdir_patch.start() - self.mock_isfile = self.isfile_patch.start() - - # Mock glob - self.glob_patch = patch('glob.glob') - self.mock_glob = self.glob_patch.start() - - # Mock open - self.open_patch = patch('builtins.open', mock_open(read_data="# Test content")) - self.mock_open = self.open_patch.start() - - # Mock get_tool - self.get_tool_patch = patch('cli_code.models.ollama.get_tool') - self.mock_get_tool = self.get_tool_patch.start() - - # Default tool mock - self.mock_tool = MagicMock() - self.mock_tool.execute.return_value = "ls output" - self.mock_get_tool.return_value = self.mock_tool - - def teardown_method(self): - """Tear down test fixtures.""" - self.openai_patch.stop() - self.isdir_patch.stop() - self.isfile_patch.stop() - self.glob_patch.stop() - self.open_patch.stop() - self.get_tool_patch.stop() - - def test_init(self): - """Test initialization of OllamaModel.""" - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - - # Check if OpenAI client was initialized correctly - self.mock_openai.assert_called_once_with( - base_url="http://localhost:11434", - api_key="ollama" - ) - - # Check model attributes - assert model.api_url == "http://localhost:11434" - assert model.model_name == "llama3" - - # Check history initialization - assert len(model.history) == 1 - assert model.history[0]["role"] == "system" - - def test_get_initial_context_with_rules_dir(self): - """Test getting initial context from .rules directory.""" - # Set up mocks - self.mock_isdir.return_value = True - self.mock_glob.return_value = [".rules/context.md", ".rules/tools.md"] - - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - context = model._get_initial_context() - - # Verify directory check - self.mock_isdir.assert_called_with(".rules") - - # Verify glob search - self.mock_glob.assert_called_with(".rules/*.md") - - # Verify files were read - assert self.mock_open.call_count == 2 - - # Check result content - assert "Project rules and guidelines:" in context - assert "# Content from" in context - - def test_get_initial_context_with_readme(self): - """Test getting initial context from README.md when no .rules directory.""" - # Set up mocks - self.mock_isdir.return_value = False - self.mock_isfile.return_value = True - - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - context = model._get_initial_context() - - # Verify README check - self.mock_isfile.assert_called_with("README.md") - - # Verify file reading - self.mock_open.assert_called_once_with("README.md", "r", encoding="utf-8", errors="ignore") - - # Check result content - assert "Project README:" in context - - def test_get_initial_context_with_ls_fallback(self): - """Test getting initial context via ls when no .rules or README.""" - # Set up mocks - self.mock_isdir.return_value = False - self.mock_isfile.return_value = False - - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - context = model._get_initial_context() - - # Verify tool was used - self.mock_get_tool.assert_called_with("ls") - self.mock_tool.execute.assert_called_once() - - # Check result content - assert "Current directory contents" in context - assert "ls output" in context - - def test_prepare_openai_tools(self): - """Test preparation of tools in OpenAI function format.""" - # Create a mock for AVAILABLE_TOOLS - with patch('cli_code.models.ollama.AVAILABLE_TOOLS') as mock_available_tools: - # Sample tool definition - mock_available_tools.return_value = { - "test_tool": { - "name": "test_tool", - "description": "A test tool", - "parameters": { - "param1": {"type": "string", "description": "A string parameter"}, - "param2": {"type": "integer", "description": "An integer parameter"} - }, - "required": ["param1"] - } - } - - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - tools = model._prepare_openai_tools() - - # Verify tools format - assert len(tools) == 1 - assert tools[0]["type"] == "function" - assert tools[0]["function"]["name"] == "test_tool" - assert "parameters" in tools[0]["function"] - assert "properties" in tools[0]["function"]["parameters"] - assert "param1" in tools[0]["function"]["parameters"]["properties"] - assert "param2" in tools[0]["function"]["parameters"]["properties"] - assert tools[0]["function"]["parameters"]["required"] == ["param1"] - - def test_manage_ollama_context(self): - """Test context management for Ollama models.""" - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - - # Add many messages to force context truncation - for i in range(30): - model.add_to_history({"role": "user", "content": f"Test message {i}"}) - model.add_to_history({"role": "assistant", "content": f"Test response {i}"}) - - # Call context management - model._manage_ollama_context() - - # Verify history was truncated but system message preserved - assert len(model.history) < 61 # Less than original count - assert model.history[0]["role"] == "system" # System message preserved - - def test_add_to_history(self): - """Test adding messages to history.""" - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - - # Clear existing history - model.history = [] - - # Add a message - message = {"role": "user", "content": "Test message"} - model.add_to_history(message) - - # Verify message was added - assert len(model.history) == 1 - assert model.history[0] == message - - def test_clear_history(self): - """Test clearing history.""" - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - - # Add some messages - model.add_to_history({"role": "user", "content": "Test message"}) - - # Clear history - model.clear_history() - - # Verify history was cleared - assert len(model.history) == 0 - - def test_list_models(self): - """Test listing available models.""" - # Mock the completion response - mock_response = MagicMock() - mock_models = [ - {"id": "llama3", "object": "model", "created": 1621880188}, - {"id": "mistral", "object": "model", "created": 1622880188} - ] - mock_response.json.return_value = {"data": mock_models} - - # Set up client mock to return response - self.mock_client.models.list.return_value.data = mock_models - - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - result = model.list_models() - - # Verify client method called - self.mock_client.models.list.assert_called_once() - - # Verify result - assert result == mock_models - - def test_generate_with_function_calls(self): - """Test generate method with function calls.""" - # Create response with function calls - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [ - MagicMock( - function=MagicMock( - name="test_tool", - arguments='{"param1": "value1"}' - ) - ) - ] - - mock_response = MagicMock() - mock_response.choices = [MagicMock( - message=mock_message, - finish_reason="tool_calls" - )] - - # Set up client mock - self.mock_client.chat.completions.create.return_value = mock_response - - # Mock get_tool to return a tool that executes successfully - tool_mock = MagicMock() - tool_mock.execute.return_value = "Tool execution result" - self.mock_get_tool.return_value = tool_mock - - model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - result = model.generate("Test prompt") - - # Verify client method called - self.mock_client.chat.completions.create.assert_called() - - # Verify tool execution - tool_mock.execute.assert_called_once_with(param1="value1") - - # Check that there was a second API call with the tool results - assert self.mock_client.chat.completions.create.call_count == 2 \ No newline at end of file diff --git a/test_dir/test_ollama_model_advanced.py b/test_dir/test_ollama_model_advanced.py deleted file mode 100644 index ea20752..0000000 --- a/test_dir/test_ollama_model_advanced.py +++ /dev/null @@ -1,452 +0,0 @@ -""" -Tests specifically for the OllamaModel class targeting advanced scenarios and edge cases -to improve code coverage on complex methods like generate(). -""" - -import os -import json -import sys -from unittest.mock import patch, MagicMock, mock_open, call, ANY -import pytest - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Handle imports -try: - from cli_code.models.ollama import OllamaModel, MAX_OLLAMA_ITERATIONS - from rich.console import Console - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type checking - OllamaModel = MagicMock - Console = MagicMock - MAX_OLLAMA_ITERATIONS = 5 - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestOllamaModelAdvanced: - """Test suite for OllamaModel class focusing on complex methods and edge cases.""" - - def setup_method(self): - """Set up test fixtures.""" - # Mock OpenAI module - self.openai_patch = patch('cli_code.models.ollama.OpenAI') - self.mock_openai = self.openai_patch.start() - - # Mock the OpenAI client instance - self.mock_client = MagicMock() - self.mock_openai.return_value = self.mock_client - - # Mock console - self.mock_console = MagicMock(spec=Console) - - # Mock tool-related components - self.get_tool_patch = patch('cli_code.models.ollama.get_tool') - self.mock_get_tool = self.get_tool_patch.start() - - # Default tool mock - self.mock_tool = MagicMock() - self.mock_tool.execute.return_value = "Tool execution result" - self.mock_get_tool.return_value = self.mock_tool - - # Mock initial context method to avoid complexity - self.get_initial_context_patch = patch.object( - OllamaModel, '_get_initial_context', return_value="Initial context") - self.mock_get_initial_context = self.get_initial_context_patch.start() - - # Set up mock for JSON loads - self.json_loads_patch = patch('json.loads') - self.mock_json_loads = self.json_loads_patch.start() - - # Mock questionary for user confirmations - self.questionary_patch = patch('questionary.confirm') - self.mock_questionary = self.questionary_patch.start() - self.mock_questionary_confirm = MagicMock() - self.mock_questionary.return_value = self.mock_questionary_confirm - self.mock_questionary_confirm.ask.return_value = True # Default to confirmed - - # Create model instance - self.model = OllamaModel("http://localhost:11434", self.mock_console, "llama3") - - def teardown_method(self): - """Tear down test fixtures.""" - self.openai_patch.stop() - self.get_tool_patch.stop() - self.get_initial_context_patch.stop() - self.json_loads_patch.stop() - self.questionary_patch.stop() - - def test_generate_with_text_response(self): - """Test generate method with a simple text response.""" - # Mock chat completions response with text - mock_message = MagicMock() - mock_message.content = "This is a simple text response." - mock_message.tool_calls = None - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - self.mock_client.chat.completions.create.return_value = mock_response - - # Call generate - result = self.model.generate("Tell me something interesting") - - # Verify API was called correctly - self.mock_client.chat.completions.create.assert_called_once() - call_kwargs = self.mock_client.chat.completions.create.call_args[1] - assert call_kwargs["model"] == "llama3" - - # Verify result - assert result == "This is a simple text response." - - def test_generate_with_tool_call(self): - """Test generate method with a tool call response.""" - # Mock a tool call in the response - mock_tool_call = MagicMock() - mock_tool_call.id = "call123" - mock_tool_call.function.name = "ls" - mock_tool_call.function.arguments = '{"dir": "."}' - - # Parse the arguments as expected - self.mock_json_loads.return_value = {"dir": "."} - - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [mock_tool_call] - mock_message.model_dump.return_value = {"role": "assistant", "tool_calls": [{"type": "function", "function": {"name": "ls", "arguments": '{"dir": "."}'}}]} - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - # Set up initial response - self.mock_client.chat.completions.create.return_value = mock_response - - # Create a second response for after tool execution - mock_message2 = MagicMock() - mock_message2.content = "Tool executed successfully." - mock_message2.tool_calls = None - - mock_choice2 = MagicMock() - mock_choice2.message = mock_message2 - - mock_response2 = MagicMock() - mock_response2.choices = [mock_choice2] - - # Set up successive responses - self.mock_client.chat.completions.create.side_effect = [mock_response, mock_response2] - - # Call generate - result = self.model.generate("List the files in this directory") - - # Verify tool was called - self.mock_get_tool.assert_called_with("ls") - self.mock_tool.execute.assert_called_once() - - assert result == "Tool executed successfully." - # Example of a more specific assertion - # assert "Tool executed successfully" in result and "ls" in result - - def test_generate_with_task_complete_tool(self): - """Test generate method with task_complete tool.""" - # Mock a task_complete tool call - mock_tool_call = MagicMock() - mock_tool_call.id = "call123" - mock_tool_call.function.name = "task_complete" - mock_tool_call.function.arguments = '{"summary": "Task completed successfully!"}' - - # Parse the arguments as expected - self.mock_json_loads.return_value = {"summary": "Task completed successfully!"} - - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [mock_tool_call] - mock_message.model_dump.return_value = {"role": "assistant", "tool_calls": [{"type": "function", "function": {"name": "task_complete", "arguments": '{"summary": "Task completed successfully!"}'}}]} - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - self.mock_client.chat.completions.create.return_value = mock_response - - # Call generate - result = self.model.generate("Complete this task") - - # Verify result contains the summary - assert result == "Task completed successfully!" - - def test_generate_with_sensitive_tool_approved(self): - """Test generate method with sensitive tool that requires approval.""" - # Mock a sensitive tool call (edit) - mock_tool_call = MagicMock() - mock_tool_call.id = "call123" - mock_tool_call.function.name = "edit" - mock_tool_call.function.arguments = '{"file_path": "file.txt", "content": "new content"}' - - # Parse the arguments as expected - self.mock_json_loads.return_value = {"file_path": "file.txt", "content": "new content"} - - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [mock_tool_call] - mock_message.model_dump.return_value = {"role": "assistant", "tool_calls": [{"type": "function", "function": {"name": "edit", "arguments": '{"file_path": "file.txt", "content": "new content"}'}}]} - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - # Set up confirmation to be approved - self.mock_questionary_confirm.ask.return_value = True - - # Set up initial response - self.mock_client.chat.completions.create.return_value = mock_response - - # Create a second response for after tool execution - mock_message2 = MagicMock() - mock_message2.content = "Edit completed." - mock_message2.tool_calls = None - - mock_choice2 = MagicMock() - mock_choice2.message = mock_message2 - - mock_response2 = MagicMock() - mock_response2.choices = [mock_choice2] - - # Set up successive responses - self.mock_client.chat.completions.create.side_effect = [mock_response, mock_response2] - - # Call generate - result = self.model.generate("Edit this file") - - # Verify user was asked for confirmation - self.mock_questionary_confirm.ask.assert_called_once() - - # Verify tool was called after approval - self.mock_get_tool.assert_called_with("edit") - self.mock_tool.execute.assert_called_once() - - # Verify result - assert result == "Edit completed." - - def test_generate_with_sensitive_tool_rejected(self): - """Test generate method with sensitive tool that is rejected.""" - # Mock a sensitive tool call (edit) - mock_tool_call = MagicMock() - mock_tool_call.id = "call123" - mock_tool_call.function.name = "edit" - mock_tool_call.function.arguments = '{"file_path": "file.txt", "content": "new content"}' - - # Parse the arguments as expected - self.mock_json_loads.return_value = {"file_path": "file.txt", "content": "new content"} - - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [mock_tool_call] - mock_message.model_dump.return_value = {"role": "assistant", "tool_calls": [{"type": "function", "function": {"name": "edit", "arguments": '{"file_path": "file.txt", "content": "new content"}'}}]} - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - # Set up confirmation to be rejected - self.mock_questionary_confirm.ask.return_value = False - - # Set up initial response - self.mock_client.chat.completions.create.return_value = mock_response - - # Create a second response for after rejection - mock_message2 = MagicMock() - mock_message2.content = "I'll find another approach." - mock_message2.tool_calls = None - - mock_choice2 = MagicMock() - mock_choice2.message = mock_message2 - - mock_response2 = MagicMock() - mock_response2.choices = [mock_choice2] - - # Set up successive responses - self.mock_client.chat.completions.create.side_effect = [mock_response, mock_response2] - - # Call generate - result = self.model.generate("Edit this file") - - # Verify user was asked for confirmation - self.mock_questionary_confirm.ask.assert_called_once() - - # Verify tool was NOT called after rejection - self.mock_tool.execute.assert_not_called() - - # Verify result - assert result == "I'll find another approach." - - def test_generate_with_api_error(self): - """Test generate method with API error.""" - # Mock API error - self.mock_client.chat.completions.create.side_effect = Exception("API Error") - - # Call generate - result = self.model.generate("Generate something") - - # Verify error handling - assert "Error calling Ollama API:" in result - # Example of a more specific assertion - # assert result == "Error calling Ollama API: API Error" - - def test_generate_max_iterations(self): - """Test generate method with maximum iterations reached.""" - # Mock a tool call that will keep being returned - mock_tool_call = MagicMock() - mock_tool_call.id = "call123" - mock_tool_call.function.name = "ls" - mock_tool_call.function.arguments = '{"dir": "."}' - - # Parse the arguments as expected - self.mock_json_loads.return_value = {"dir": "."} - - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [mock_tool_call] - mock_message.model_dump.return_value = {"role": "assistant", "tool_calls": [{"type": "function", "function": {"name": "ls", "arguments": '{"dir": "."}'}}]} - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - # Always return the same response with a tool call to force iteration - self.mock_client.chat.completions.create.return_value = mock_response - - # Call generate - result = self.model.generate("List files recursively") - - # Verify max iterations were handled - assert self.mock_client.chat.completions.create.call_count <= MAX_OLLAMA_ITERATIONS + 1 - assert "Maximum iterations" in result - - def test_manage_ollama_context(self): - """Test context window management for Ollama.""" - # Add many messages to history - for i in range(30): # Many more than fits in context - self.model.add_to_history({"role": "user", "content": f"Message {i}"}) - self.model.add_to_history({"role": "assistant", "content": f"Response {i}"}) - - # Record history length before management - initial_length = len(self.model.history) - - # Manage context - self.model._manage_ollama_context() - - # Verify truncation - assert len(self.model.history) < initial_length - - # Verify system prompt is preserved with specific content check - assert self.model.history[0]["role"] == "system" - # Example of a more specific assertion - # assert self.model.history[0]["content"] == "You are a helpful AI coding assistant..." - assert "You are a helpful AI coding assistant" in self.model.history[0]["content"] - assert "function calling capabilities" in self.model.history[0]["content"] - - def test_generate_with_token_counting(self): - """Test generate method with token counting and context management.""" - # Mock token counting to simulate context window being exceeded - with patch('cli_code.models.ollama.count_tokens') as mock_count_tokens: - # Set up a high token count to trigger context management - mock_count_tokens.return_value = 10000 # Above context limit - - # Set up a basic response - mock_message = MagicMock() - mock_message.content = "Response after context management" - mock_message.tool_calls = None - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - self.mock_client.chat.completions.create.return_value = mock_response - - # Call generate - result = self.model.generate("Generate with large context") - - # Verify token counting was used - mock_count_tokens.assert_called() - - # Verify result - assert result == "Response after context management" - - def test_error_handling_for_tool_execution(self): - """Test error handling during tool execution.""" - # Mock a tool call - mock_tool_call = MagicMock() - mock_tool_call.id = "call123" - mock_tool_call.function.name = "ls" - mock_tool_call.function.arguments = '{"dir": "."}' - - # Parse the arguments as expected - self.mock_json_loads.return_value = {"dir": "."} - - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [mock_tool_call] - mock_message.model_dump.return_value = {"role": "assistant", "tool_calls": [{"type": "function", "function": {"name": "ls", "arguments": '{"dir": "."}'}}]} - - mock_choice = MagicMock() - mock_choice.message = mock_message - - mock_response = MagicMock() - mock_response.choices = [mock_choice] - - # Set up initial response - self.mock_client.chat.completions.create.return_value = mock_response - - # Make tool execution fail - error_message = "Tool execution failed" - self.mock_tool.execute.side_effect = Exception(error_message) - - # Create a second response for after tool failure - mock_message2 = MagicMock() - mock_message2.content = "I encountered an error." - mock_message2.tool_calls = None - - mock_choice2 = MagicMock() - mock_choice2.message = mock_message2 - - mock_response2 = MagicMock() - mock_response2.choices = [mock_choice2] - - # Set up successive responses - self.mock_client.chat.completions.create.side_effect = [mock_response, mock_response2] - - # Call generate - result = self.model.generate("List the files") - - # Verify error was handled gracefully with specific assertions - assert result == "I encountered an error." - # Verify that error details were added to history - error_found = False - for message in self.model.history: - if message.get("role") == "tool" and message.get("name") == "ls": - assert "error" in message.get("content", "").lower() - assert error_message in message.get("content", "") - error_found = True - assert error_found, "Error message not found in history" \ No newline at end of file diff --git a/test_dir/test_ollama_model_context.py b/test_dir/test_ollama_model_context.py deleted file mode 100644 index 3660408..0000000 --- a/test_dir/test_ollama_model_context.py +++ /dev/null @@ -1,268 +0,0 @@ -""" -Tests for the Ollama Model context management functionality. - -To run these tests specifically: - python -m pytest test_dir/test_ollama_model_context.py - -To run a specific test: - python -m pytest test_dir/test_ollama_model_context.py::TestOllamaModelContext::test_manage_ollama_context_truncation_needed - -To run all tests related to context management: - python -m pytest -k "ollama_context" -""" -import os -import logging -import json -import glob -from unittest.mock import patch, MagicMock, mock_open - -import pytest -from rich.console import Console -from pathlib import Path -import sys - -# Ensure src is in the path for imports -src_path = str(Path(__file__).parent.parent / "src") -if src_path not in sys.path: - sys.path.insert(0, src_path) - -from cli_code.models.ollama import OllamaModel, OLLAMA_MAX_CONTEXT_TOKENS -from cli_code.config import Config - -# Define skip reason for clarity -SKIP_REASON = "Skipping model tests in CI or if imports fail to avoid dependency issues." -IMPORTS_AVAILABLE = True # Assume imports are available unless check fails -IN_CI = os.environ.get('CI', 'false').lower() == 'true' -SHOULD_SKIP_TESTS = IN_CI - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestOllamaModelContext: - """Tests for the OllamaModel's context management functionality.""" - - @pytest.fixture - def mock_openai(self): - """Mock the OpenAI client dependency.""" - with patch('cli_code.models.ollama.OpenAI') as mock: - mock_instance = MagicMock() - mock.return_value = mock_instance - yield mock_instance - - @pytest.fixture - def ollama_model(self, mock_openai): - """Fixture providing an OllamaModel instance (get_tool NOT patched).""" - mock_console = MagicMock() - model = OllamaModel(api_url="http://mock-url", console=mock_console, model_name="mock-model") - model.client = mock_openai - model.history = [] - model.system_prompt = "System prompt for testing" - model.add_to_history({"role": "system", "content": model.system_prompt}) - yield model - - def test_add_to_history(self, ollama_model): - """Test adding messages to the conversation history.""" - # Initial history should contain only the system prompt - assert len(ollama_model.history) == 1 - assert ollama_model.history[0]["role"] == "system" - - # Add a user message - user_message = {"role": "user", "content": "Test message"} - ollama_model.add_to_history(user_message) - - # Check that message was added - assert len(ollama_model.history) == 2 - assert ollama_model.history[1] == user_message - - def test_clear_history(self, ollama_model): - """Test clearing the conversation history.""" - # Add a few messages - ollama_model.add_to_history({"role": "user", "content": "User message"}) - ollama_model.add_to_history({"role": "assistant", "content": "Assistant response"}) - assert len(ollama_model.history) == 3 # System + 2 added messages - - # Clear history - ollama_model.clear_history() - - # Check that history was cleared and system prompt was re-added - assert len(ollama_model.history) == 1 - assert ollama_model.history[0]["role"] == "system" - assert ollama_model.history[0]["content"] == ollama_model.system_prompt - - @patch("src.cli_code.models.ollama.count_tokens") - def test_manage_ollama_context_no_truncation_needed(self, mock_count_tokens, ollama_model): - """Test _manage_ollama_context when truncation is not needed.""" - # Setup count_tokens to return a small number of tokens - mock_count_tokens.return_value = OLLAMA_MAX_CONTEXT_TOKENS // 4 # Well under the limit - - # Add some messages - ollama_model.add_to_history({"role": "user", "content": "User message 1"}) - ollama_model.add_to_history({"role": "assistant", "content": "Assistant response 1"}) - initial_history_length = len(ollama_model.history) - - # Call the manage context method - ollama_model._manage_ollama_context() - - # Assert that history was not modified since we're under the token limit - assert len(ollama_model.history) == initial_history_length - - @patch("src.cli_code.models.ollama.count_tokens") - def test_manage_ollama_context_truncation_needed(self, mock_count_tokens, ollama_model): - """Test _manage_ollama_context when truncation is needed.""" - # Reset the mock to ensure consistent behavior - mock_count_tokens.reset_mock() - - # Initial history should be just the system message - assert len(ollama_model.history) == 1 - assert ollama_model.history[0]["role"] == "system" - - # Initial total token setup - return a small value so messages are added without truncation - mock_count_tokens.return_value = 10 # Each message is very small - - # Add many messages to the history - for i in range(5): - ollama_model.add_to_history({"role": "user", "content": f"User message {i}"}) - ollama_model.add_to_history({"role": "assistant", "content": f"Assistant response {i}"}) - - # Add a special last message to track - last_message = {"role": "user", "content": "This is the very last message"} - ollama_model.add_to_history(last_message) - - # Verify we now have 12 messages (1 system + 5 user + 5 assistant + 1 last) - assert len(ollama_model.history) == 12 - initial_history_length = len(ollama_model.history) - - # Now modify the mock to ensure truncation will happen in _manage_ollama_context - # Make each message very large to force aggressive truncation - mock_count_tokens.return_value = OLLAMA_MAX_CONTEXT_TOKENS // 2 # Each message is half the limit - - # Call the function that should truncate history - ollama_model._manage_ollama_context() - - # After truncation, verify the history was actually truncated - assert len(ollama_model.history) < initial_history_length, f"Expected fewer than {initial_history_length} messages, got {len(ollama_model.history)}" - - # Verify system message is still at position 0 - assert ollama_model.history[0]["role"] == "system" - - # Verify the most recent message is still present (last message we added) - assert ollama_model.history[-1] == last_message - - @patch("src.cli_code.models.ollama.count_tokens") - def test_manage_ollama_context_preserves_recent_messages(self, mock_count_tokens, ollama_model): - """Test _manage_ollama_context preserves recent messages.""" - # Set up token count to exceed the limit to trigger truncation - mock_count_tokens.side_effect = lambda text: OLLAMA_MAX_CONTEXT_TOKENS * 2 # Double the limit - - # Add a system message first - system_message = {"role": "system", "content": "System instruction"} - ollama_model.history = [system_message] - - # Add multiple messages to the history - for i in range(20): - ollama_model.add_to_history({"role": "user", "content": f"User message {i}"}) - ollama_model.add_to_history({"role": "assistant", "content": f"Assistant response {i}"}) - - # Mark some recent messages to verify they're preserved - recent_messages = [ - {"role": "user", "content": "Important recent user message"}, - {"role": "assistant", "content": "Important recent assistant response"} - ] - - for msg in recent_messages: - ollama_model.add_to_history(msg) - - # Call the function that should truncate history - ollama_model._manage_ollama_context() - - # Verify system message is preserved - assert ollama_model.history[0]["role"] == "system" - assert ollama_model.history[0]["content"] == "System instruction" - - # Verify the most recent messages are preserved at the end of history - assert ollama_model.history[-2:] == recent_messages - - def test_get_initial_context_with_rules_directory(self, tmp_path, ollama_model): - """Test _get_initial_context when .rules directory exists with markdown files.""" - # Arrange: Create .rules dir and files in tmp_path - rules_dir = tmp_path / ".rules" - rules_dir.mkdir() - (rules_dir / "context.md").write_text("# Context Rule\nRule one content.") - (rules_dir / "tools.md").write_text("# Tools Rule\nRule two content.") - (rules_dir / "other.txt").write_text("Ignore this file.") # Non-md file - - original_cwd = os.getcwd() - os.chdir(tmp_path) - - # Act - context = ollama_model._get_initial_context() - - # Teardown - os.chdir(original_cwd) - - # Assert - assert "Project rules and guidelines:" in context - assert "```markdown" in context - assert "# Content from context.md" in context - assert "Rule one content." in context - assert "# Content from tools.md" in context - assert "Rule two content." in context - assert "Ignore this file" not in context - ollama_model.console.print.assert_any_call("[dim]Context initialized from .rules/*.md files.[/dim]") - - def test_get_initial_context_with_readme(self, tmp_path, ollama_model): - """Test _get_initial_context when README.md exists but no .rules directory.""" - # Arrange: Create README.md in tmp_path - readme_content = "# Project README\nThis is the project readme." - (tmp_path / "README.md").write_text(readme_content) - - original_cwd = os.getcwd() - os.chdir(tmp_path) - - # Act - context = ollama_model._get_initial_context() - - # Teardown - os.chdir(original_cwd) - - # Assert - assert "Project README:" in context - assert "```markdown" in context - assert readme_content in context - ollama_model.console.print.assert_any_call("[dim]Context initialized from README.md.[/dim]") - - def test_get_initial_context_fallback_to_ls_outcome(self, tmp_path, ollama_model): - """Test _get_initial_context fallback by checking the resulting context.""" - # Arrange: tmp_path is empty except for one dummy file - dummy_file_name = "dummy_test_file.txt" - (tmp_path / dummy_file_name).touch() - - original_cwd = os.getcwd() - os.chdir(tmp_path) - - # Act - # Let the real _get_initial_context -> get_tool -> LsTool execute - context = ollama_model._get_initial_context() - - # Teardown - os.chdir(original_cwd) - - # Assert - # Check that the context string indicates ls was used and contains the dummy file - assert "Current directory contents" in context - assert dummy_file_name in context - ollama_model.console.print.assert_any_call("[dim]Directory context acquired via 'ls'.[/dim]") - - def test_prepare_openai_tools(self, ollama_model): - """Test that tools are prepared for the OpenAI API format.""" - # Rather than mocking a specific method, just check that the result is well-formed - # This relies on the actual implementation, not a mock of _prepare_openai_tools - - # The method should return a list of dictionaries with function definitions - tools = ollama_model._prepare_openai_tools() - - # Basic validation that we get a list of tool definitions - assert isinstance(tools, list) - if tools: # If there are any tools - assert isinstance(tools[0], dict) - assert "type" in tools[0] - assert tools[0]["type"] == "function" - assert "function" in tools[0] \ No newline at end of file diff --git a/test_dir/test_ollama_model_coverage.py b/test_dir/test_ollama_model_coverage.py deleted file mode 100644 index ee7b221..0000000 --- a/test_dir/test_ollama_model_coverage.py +++ /dev/null @@ -1,411 +0,0 @@ -""" -Tests specifically for the OllamaModel class to improve code coverage. -This file focuses on testing methods and branches that aren't well covered. -""" - -import os -import json -import unittest -from unittest.mock import patch, MagicMock, mock_open, call -import pytest -import unittest.mock as mock -import sys - -# Check if running in CI -IS_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Handle imports -try: - # Mock the OpenAI import check first - sys.modules['openai'] = MagicMock() - - from cli_code.models.ollama import OllamaModel - import requests - from rich.console import Console - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type checking - OllamaModel = MagicMock - Console = MagicMock - requests = MagicMock - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IS_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestOllamaModelCoverage: - """Test suite for OllamaModel class methods that need more coverage.""" - - def setup_method(self, method): - """Set up test environment.""" - # Skip tests if running with pytest and not in CI (temporarily disabled) - # if not IS_CI and "pytest" in sys.modules: - # pytest.skip("Skipping tests when running with pytest outside of CI") - - # Set up console mock - self.mock_console = MagicMock() - - # Set up openai module and OpenAI class - self.openai_patch = patch.dict('sys.modules', {'openai': MagicMock()}) - self.openai_patch.start() - - # Mock the OpenAI class and client - self.openai_class_mock = MagicMock() - - # Set up a more complete client mock with proper structure - self.openai_instance_mock = MagicMock() - - # Mock ChatCompletion structure - self.mock_response = MagicMock() - self.mock_choice = MagicMock() - self.mock_message = MagicMock() - - # Set up the nested structure - self.mock_message.content = "Test response" - self.mock_message.tool_calls = [] - self.mock_message.model_dump.return_value = {"role": "assistant", "content": "Test response"} - - self.mock_choice.message = self.mock_message - - self.mock_response.choices = [self.mock_choice] - - # Connect the response to the client - self.openai_instance_mock.chat.completions.create.return_value = self.mock_response - - # Connect the instance to the class - self.openai_class_mock.return_value = self.openai_instance_mock - - # Patch modules with our mocks - self.openai_module_patch = patch('src.cli_code.models.ollama.OpenAI', self.openai_class_mock) - self.openai_module_patch.start() - - # Set up request mocks - self.requests_post_patch = patch('requests.post') - self.mock_requests_post = self.requests_post_patch.start() - self.mock_requests_post.return_value.status_code = 200 - self.mock_requests_post.return_value.json.return_value = {"message": {"content": "Test response"}} - - self.requests_get_patch = patch('requests.get') - self.mock_requests_get = self.requests_get_patch.start() - self.mock_requests_get.return_value.status_code = 200 - self.mock_requests_get.return_value.json.return_value = {"models": [{"name": "llama2", "description": "Llama 2 7B"}]} - - # Set up tool mocks - self.get_tool_patch = patch('src.cli_code.models.ollama.get_tool') - self.mock_get_tool = self.get_tool_patch.start() - self.mock_tool = MagicMock() - self.mock_tool.execute.return_value = "Tool execution result" - self.mock_get_tool.return_value = self.mock_tool - - # Set up file system mocks - self.isdir_patch = patch('os.path.isdir') - self.mock_isdir = self.isdir_patch.start() - self.mock_isdir.return_value = False - - self.isfile_patch = patch('os.path.isfile') - self.mock_isfile = self.isfile_patch.start() - self.mock_isfile.return_value = False - - self.glob_patch = patch('glob.glob') - self.mock_glob = self.glob_patch.start() - - self.open_patch = patch('builtins.open', mock_open(read_data="Test content")) - self.mock_open = self.open_patch.start() - - # Initialize the OllamaModel with proper parameters - self.model = OllamaModel("http://localhost:11434", self.mock_console, "llama2") - - def teardown_method(self, method): - """Clean up after test.""" - # Stop all patches - self.openai_patch.stop() - self.openai_module_patch.stop() - self.requests_post_patch.stop() - self.requests_get_patch.stop() - self.get_tool_patch.stop() - self.isdir_patch.stop() - self.isfile_patch.stop() - self.glob_patch.stop() - self.open_patch.stop() - - def test_initialization(self): - """Test initialization of OllamaModel.""" - model = OllamaModel("http://localhost:11434", self.mock_console, "llama2") - - assert model.api_url == "http://localhost:11434" - assert model.model_name == "llama2" - assert len(model.history) == 1 # Just the system prompt initially - - def test_list_models(self): - """Test listing available models.""" - # Mock OpenAI models.list response - mock_model = MagicMock() - mock_model.id = "llama2" - # Create a mock response object with data attribute - mock_response = MagicMock() - mock_response.data = [mock_model] - # Set up the client.models.list mock - self.model.client.models.list.return_value = mock_response - - result = self.model.list_models() - - # Verify client models list was called - self.model.client.models.list.assert_called_once() - - # Verify result format - assert len(result) == 1 - assert result[0]["id"] == "llama2" - assert "name" in result[0] - - def test_list_models_with_error(self): - """Test listing models when API returns error.""" - # Set up mock to raise an exception - self.model.client.models.list.side_effect = Exception("API error") - - result = self.model.list_models() - - # Verify error handling - assert result is None - # Verify console prints an error message - self.mock_console.print.assert_any_call(mock.ANY) # Using ANY matcher as the exact message might vary - - def test_get_initial_context_with_rules_dir(self): - """Test getting initial context from .rules directory.""" - # Set up mocks - self.mock_isdir.return_value = True - self.mock_glob.return_value = [".rules/context.md", ".rules/tools.md"] - - context = self.model._get_initial_context() - - # Verify directory check - self.mock_isdir.assert_called_with(".rules") - - # Verify glob search - self.mock_glob.assert_called_with(".rules/*.md") - - # Verify files were read - assert self.mock_open.call_count == 2 - - # Check result content - assert "Project rules and guidelines:" in context - - def test_get_initial_context_with_readme(self): - """Test getting initial context from README.md when no .rules directory.""" - # Set up mocks - self.mock_isdir.return_value = False - self.mock_isfile.return_value = True - - context = self.model._get_initial_context() - - # Verify README check - self.mock_isfile.assert_called_with("README.md") - - # Verify file reading - self.mock_open.assert_called_once_with("README.md", "r", encoding="utf-8", errors="ignore") - - # Check result content - assert "Project README:" in context - - def test_get_initial_context_with_ls_fallback(self): - """Test getting initial context via ls when no .rules or README.""" - # Set up mocks - self.mock_isdir.return_value = False - self.mock_isfile.return_value = False - - # Force get_tool to be called with "ls" before _get_initial_context runs - # This simulates what would happen in the actual method - self.mock_get_tool("ls") - self.mock_tool.execute.return_value = "Directory listing content" - - context = self.model._get_initial_context() - - # Verify tool was used - self.mock_get_tool.assert_called_with("ls") - # Check result content - assert "Current directory contents" in context - - def test_generate_with_exit_command(self): - """Test generating with /exit command.""" - # Direct mock for exit command to avoid the entire generate flow - with patch.object(self.model, 'generate', wraps=self.model.generate) as mock_generate: - # For the /exit command, override with None - mock_generate.side_effect = lambda prompt: None if prompt == "/exit" else mock_generate.return_value - - result = self.model.generate("/exit") - assert result is None - - def test_generate_with_help_command(self): - """Test generating with /help command.""" - # Direct mock for help command to avoid the entire generate flow - with patch.object(self.model, 'generate', wraps=self.model.generate) as mock_generate: - # For the /help command, override with a specific response - mock_generate.side_effect = lambda prompt: "Interactive Commands:\n/help - Show this help menu\n/exit - Exit the CLI" if prompt == "/help" else mock_generate.return_value - - result = self.model.generate("/help") - assert "Interactive Commands:" in result - - def test_generate_function_call_extraction_success(self): - """Test successful extraction of function calls from LLM response.""" - with patch.object(self.model, '_prepare_openai_tools'): - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Set up mocks for get_tool and tool execution - self.mock_get_tool.return_value = self.mock_tool - self.mock_tool.execute.return_value = "Tool execution result" - - # Set up a side effect that simulates the tool calling behavior - def side_effect(prompt): - # Call get_tool with "ls" when the prompt is "List files" - if prompt == "List files": - self.mock_get_tool("ls") - self.mock_tool.execute(path=".") - return "Here are the files: Tool execution result" - return "Default response" - - mock_generate.side_effect = side_effect - - # Call the function to test - result = self.model.generate("List files") - - # Verify the tool was called - self.mock_get_tool.assert_called_with("ls") - self.mock_tool.execute.assert_called_with(path=".") - - def test_generate_function_call_extraction_malformed_json(self): - """Test handling of malformed JSON in function call extraction.""" - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Simulate malformed JSON response - mock_generate.return_value = "I'll help you list files in the current directory. But there was a JSON parsing error." - - result = self.model.generate("List files with malformed JSON") - - # Verify error handling - assert "I'll help you list files" in result - # Tool shouldn't be called due to malformed JSON - self.mock_tool.execute.assert_not_called() - - def test_generate_function_call_missing_name(self): - """Test handling of function call with missing name field.""" - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Simulate missing name field response - mock_generate.return_value = "I'll help you list files in the current directory. But there was a missing name field." - - result = self.model.generate("List files with missing name") - - # Verify error handling - assert "I'll help you list files" in result - # Tool shouldn't be called due to missing name - self.mock_tool.execute.assert_not_called() - - def test_generate_with_api_error(self): - """Test generating when API returns error.""" - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Simulate API error - mock_generate.return_value = "Error generating response: Server error" - - result = self.model.generate("Hello with API error") - - # Verify error handling - assert "Error generating response" in result - - def test_generate_task_complete(self): - """Test handling of task_complete function call.""" - with patch.object(self.model, '_prepare_openai_tools'): - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Set up task_complete tool - task_complete_tool = MagicMock() - task_complete_tool.execute.return_value = "Task completed successfully with details" - - # Set up a side effect that simulates the tool calling behavior - def side_effect(prompt): - if prompt == "Complete task": - # Override get_tool to return our task_complete_tool - self.mock_get_tool.return_value = task_complete_tool - # Simulate the get_tool and execute calls - self.mock_get_tool("task_complete") - task_complete_tool.execute(summary="Task completed successfully") - return "Task completed successfully with details" - return "Default response" - - mock_generate.side_effect = side_effect - - result = self.model.generate("Complete task") - - # Verify task completion handling - self.mock_get_tool.assert_called_with("task_complete") - task_complete_tool.execute.assert_called_with(summary="Task completed successfully") - assert result == "Task completed successfully with details" - - def test_generate_with_missing_tool(self): - """Test handling when referenced tool is not found.""" - with patch.object(self.model, '_prepare_openai_tools'): - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Set up a side effect that simulates the missing tool scenario - def side_effect(prompt): - if prompt == "Use nonexistent tool": - # Set up get_tool to return None for nonexistent_tool - self.mock_get_tool.return_value = None - # Simulate the get_tool call - self.mock_get_tool("nonexistent_tool") - return "Error: Tool 'nonexistent_tool' not found." - return "Default response" - - mock_generate.side_effect = side_effect - - result = self.model.generate("Use nonexistent tool") - - # Verify error handling - self.mock_get_tool.assert_called_with("nonexistent_tool") - assert "Tool 'nonexistent_tool' not found" in result - - def test_generate_tool_execution_error(self): - """Test handling when tool execution raises an error.""" - with patch.object(self.model, '_prepare_openai_tools'): - with patch.object(self.model, 'generate', autospec=True) as mock_generate: - # Set up a side effect that simulates the tool execution error - def side_effect(prompt): - if prompt == "List files with error": - # Set up tool to raise exception - self.mock_tool.execute.side_effect = Exception("Tool execution failed") - # Simulate the get_tool and execute calls - self.mock_get_tool("ls") - try: - self.mock_tool.execute(path=".") - except Exception: - pass - return "Error executing tool ls: Tool execution failed" - return "Default response" - - mock_generate.side_effect = side_effect - - result = self.model.generate("List files with error") - - # Verify error handling - self.mock_get_tool.assert_called_with("ls") - assert "Error executing tool ls" in result - - def test_clear_history(self): - """Test history clearing functionality.""" - # Add some items to history - self.model.add_to_history({"role": "user", "content": "Test message"}) - - # Clear history - self.model.clear_history() - - # Check that history is reset with just the system prompt - assert len(self.model.history) == 1 - assert self.model.history[0]["role"] == "system" - - def test_add_to_history(self): - """Test adding messages to history.""" - initial_length = len(self.model.history) - - # Add a user message - self.model.add_to_history({"role": "user", "content": "Test user message"}) - - # Check that message was added - assert len(self.model.history) == initial_length + 1 - assert self.model.history[-1]["role"] == "user" - assert self.model.history[-1]["content"] == "Test user message" \ No newline at end of file diff --git a/test_dir/test_ollama_model_error_handling.py b/test_dir/test_ollama_model_error_handling.py deleted file mode 100644 index 44bafb7..0000000 --- a/test_dir/test_ollama_model_error_handling.py +++ /dev/null @@ -1,361 +0,0 @@ -import pytest -import json -from unittest.mock import MagicMock, patch, call -import sys -from pathlib import Path - -# Ensure src is in the path for imports -src_path = str(Path(__file__).parent.parent / "src") -if src_path not in sys.path: - sys.path.insert(0, src_path) - -from cli_code.models.ollama import OllamaModel, MAX_OLLAMA_ITERATIONS - - -class TestOllamaModelErrorHandling: - """Tests for error handling in the OllamaModel class.""" - - @pytest.fixture - def mock_console(self): - console = MagicMock() - console.print = MagicMock() - console.status = MagicMock() - # Make status return a context manager - status_cm = MagicMock() - console.status.return_value = status_cm - status_cm.__enter__ = MagicMock(return_value=None) - status_cm.__exit__ = MagicMock(return_value=None) - return console - - @pytest.fixture - def mock_client(self): - client = MagicMock() - client.chat.completions.create = MagicMock() - client.models.list = MagicMock() - return client - - @pytest.fixture - def mock_questionary(self): - questionary = MagicMock() - confirm = MagicMock() - questionary.confirm.return_value = confirm - confirm.ask = MagicMock(return_value=True) - return questionary - - def test_generate_without_client(self, mock_console): - """Test generate method when the client is not initialized.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = None # Explicitly set client to None - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Error: Ollama client not initialized" in result - mock_console.print.assert_not_called() - - def test_generate_without_model_name(self, mock_console): - """Test generate method when no model name is specified.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console) - model.model_name = None # Explicitly set model_name to None - model.client = MagicMock() # Add a mock client - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Error: No Ollama model name configured" in result - mock_console.print.assert_not_called() - - @patch('cli_code.models.ollama.get_tool') - def test_generate_with_invalid_tool_call(self, mock_get_tool, mock_console, mock_client): - """Test generate method with invalid JSON in tool arguments.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - model.add_to_history = MagicMock() # Mock history management - - # Create mock response with tool call that has invalid JSON - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [ - MagicMock( - function=MagicMock( - name="test_tool", - arguments='invalid json' - ), - id="test_id" - ) - ] - - mock_response = MagicMock() - mock_response.choices = [MagicMock( - message=mock_message, - finish_reason="tool_calls" - )] - - mock_client.chat.completions.create.return_value = mock_response - - # Execute - with patch('cli_code.models.ollama.json.loads', side_effect=json.JSONDecodeError("Expecting value", "", 0)): - result = model.generate("test prompt") - - # Assert - assert "reached maximum iterations" in result - # Verify the log message was recorded (we'd need to patch logging.error and check call args) - - @patch('cli_code.models.ollama.get_tool') - @patch('cli_code.models.ollama.SENSITIVE_TOOLS', ['edit']) - @patch('cli_code.models.ollama.questionary') - def test_generate_with_user_rejection(self, mock_questionary, mock_get_tool, mock_console, mock_client): - """Test generate method when user rejects a sensitive tool execution.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - - # Create mock response with a sensitive tool call - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [ - MagicMock( - function=MagicMock( - name="edit", - arguments='{"file_path": "test.txt", "content": "test content"}' - ), - id="test_id" - ) - ] - - mock_response = MagicMock() - mock_response.choices = [MagicMock( - message=mock_message, - finish_reason="tool_calls" - )] - - mock_client.chat.completions.create.return_value = mock_response - - # Make user reject the confirmation - confirm_mock = MagicMock() - confirm_mock.ask.return_value = False - mock_questionary.confirm.return_value = confirm_mock - - # Mock the tool function - mock_tool = MagicMock() - mock_get_tool.return_value = mock_tool - - # Execute - result = model.generate("test prompt") - - # Assert - assert "rejected" in result or "maximum iterations" in result - - def test_list_models_error(self, mock_console, mock_client): - """Test list_models method when an error occurs.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - - # Make client.models.list raise an exception - mock_client.models.list.side_effect = Exception("Test error") - - # Execute - result = model.list_models() - - # Assert - assert result is None - mock_console.print.assert_called() - assert any("Error contacting Ollama endpoint" in str(call_args) for call_args in mock_console.print.call_args_list) - - def test_add_to_history_invalid_message(self, mock_console): - """Test add_to_history with an invalid message.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model._manage_ollama_context = MagicMock() # Mock to avoid side effects - original_history_len = len(model.history) - - # Add invalid message (not a dict) - model.add_to_history("not a dict") - - # Assert - # System message will be there, but invalid message should not be added - assert len(model.history) == original_history_len - model._manage_ollama_context.assert_not_called() - - def test_manage_ollama_context_empty_history(self, mock_console): - """Test _manage_ollama_context with empty history.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - original_history = model.history.copy() # Save the original which includes system prompt - - # Execute - model._manage_ollama_context() - - # Assert - assert model.history == original_history # Should remain the same with system prompt - - @patch('cli_code.models.ollama.count_tokens') - def test_manage_ollama_context_serialization_error(self, mock_count_tokens, mock_console): - """Test _manage_ollama_context when serialization fails.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - # Add a message that will cause serialization error (contains an unserializable object) - model.history = [ - {"role": "system", "content": "System message"}, - {"role": "user", "content": "User message"}, - {"role": "assistant", "content": MagicMock()} # Unserializable - ] - - # Make count_tokens return a low value to avoid truncation - mock_count_tokens.return_value = 10 - - # Execute - with patch('cli_code.models.ollama.json.dumps', side_effect=TypeError("Object is not JSON serializable")): - model._manage_ollama_context() - - # Assert - history should remain unchanged - assert len(model.history) == 3 - - def test_generate_max_iterations(self, mock_console, mock_client): - """Test generate method when max iterations is reached.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - model._prepare_openai_tools = MagicMock(return_value=[{"type": "function", "function": {"name": "test_tool"}}]) - - # Create mock response with tool call - mock_message = MagicMock() - mock_message.content = None - mock_message.tool_calls = [ - MagicMock( - function=MagicMock( - name="test_tool", - arguments='{"param1": "value1"}' - ), - id="test_id" - ) - ] - - mock_response = MagicMock() - mock_response.choices = [MagicMock( - message=mock_message, - finish_reason="tool_calls" - )] - - # Mock the client to always return a tool call (which would lead to an infinite loop without max iterations) - mock_client.chat.completions.create.return_value = mock_response - - # Mock get_tool to return a tool that always succeeds - tool_mock = MagicMock() - tool_mock.execute.return_value = "Tool result" - - # Execute - this should hit the max iterations - with patch('cli_code.models.ollama.get_tool', return_value=tool_mock): - with patch('cli_code.models.ollama.MAX_OLLAMA_ITERATIONS', 2): # Lower max iterations for test - result = model.generate("test prompt") - - # Assert - assert "(Agent reached maximum iterations)" in result - - def test_prepare_openai_tools_without_available_tools(self, mock_console): - """Test _prepare_openai_tools when AVAILABLE_TOOLS is empty.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - - # Execute - with patch('cli_code.models.ollama.AVAILABLE_TOOLS', {}): - result = model._prepare_openai_tools() - - # Assert - assert result is None - - def test_prepare_openai_tools_conversion_error(self, mock_console): - """Test _prepare_openai_tools when conversion fails.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - - # Mock tool instance - tool_mock = MagicMock() - tool_declaration = MagicMock() - tool_declaration.name = "test_tool" - tool_declaration.description = "Test tool" - tool_declaration.parameters = MagicMock() - tool_declaration.parameters._pb = MagicMock() - tool_mock.get_function_declaration.return_value = tool_declaration - - # Execute - with a mocked error during conversion - with patch('cli_code.models.ollama.AVAILABLE_TOOLS', {"test_tool": tool_mock}): - with patch('cli_code.models.ollama.MessageToDict', side_effect=Exception("Conversion error")): - result = model._prepare_openai_tools() - - # Assert - assert result is None or len(result) == 0 # Should be empty list or None - - @patch('cli_code.models.ollama.log') # Patch log - def test_generate_with_connection_error(self, mock_log, mock_console, mock_client): - """Test generate method when a connection error occurs during API call.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - - # Simulate a connection error (e.g., RequestError from httpx) - # Assuming the ollama client might raise something like requests.exceptions.ConnectionError or httpx.RequestError - # We'll use a generic Exception and check the message for now. - # If a specific exception class is known, use it instead. - connection_err = Exception("Failed to connect") - mock_client.chat.completions.create.side_effect = connection_err - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Error connecting to Ollama" in result or "Failed to connect" in result - mock_log.error.assert_called() # Check that an error was logged - # Check specific log message if needed - log_call_args, _ = mock_log.error.call_args - assert "Error during Ollama agent iteration" in log_call_args[0] - - @patch('cli_code.models.ollama.log') # Patch log - def test_generate_with_timeout_error(self, mock_log, mock_console, mock_client): - """Test generate method when a timeout error occurs during API call.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - - # Simulate a timeout error - # Use a generic Exception, check message. Replace if specific exception is known (e.g., httpx.TimeoutException) - timeout_err = Exception("Request timed out") - mock_client.chat.completions.create.side_effect = timeout_err - - # Execute - result = model.generate("test prompt") - - # Assert - assert "Error connecting to Ollama" in result or "timed out" in result - mock_log.error.assert_called() - log_call_args, _ = mock_log.error.call_args - assert "Error during Ollama agent iteration" in log_call_args[0] - - @patch('cli_code.models.ollama.log') # Patch log - def test_generate_with_server_error(self, mock_log, mock_console, mock_client): - """Test generate method when a server error occurs during API call.""" - # Setup - model = OllamaModel("http://localhost:11434", mock_console, "llama3") - model.client = mock_client - - # Simulate a server error (e.g., HTTP 500) - # Use a generic Exception, check message. Replace if specific exception is known (e.g., ollama.APIError?) - server_err = Exception("Internal Server Error") - mock_client.chat.completions.create.side_effect = server_err - - # Execute - result = model.generate("test prompt") - - # Assert - # Check for a generic error message indicating an unexpected issue - assert "Error interacting with Ollama" in result # Check for the actual prefix - assert "Internal Server Error" in result # Check the specific error message is included - mock_log.error.assert_called() - log_call_args, _ = mock_log.error.call_args - assert "Error during Ollama agent iteration" in log_call_args[0] \ No newline at end of file diff --git a/test_dir/test_quality_tools_original.py b/test_dir/test_quality_tools_original.py deleted file mode 100644 index 623bc3b..0000000 --- a/test_dir/test_quality_tools_original.py +++ /dev/null @@ -1,287 +0,0 @@ -""" -Tests for code quality tools. -""" -import os -import subprocess -import pytest -from unittest.mock import patch, MagicMock - -# Direct import for coverage tracking -import src.cli_code.tools.quality_tools -from src.cli_code.tools.quality_tools import _run_quality_command, LinterCheckerTool, FormatterTool - - -class TestRunQualityCommand: - """Tests for the _run_quality_command helper function.""" - - @patch("subprocess.run") - def test_run_quality_command_success(self, mock_run): - """Test successful command execution.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Successful output" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "TestTool Result (Exit Code: 0)" in result - assert "Successful output" in result - assert "-- Errors --" not in result - mock_run.assert_called_once_with( - ["test", "command"], - capture_output=True, - text=True, - check=False, - timeout=120 - ) - - @patch("subprocess.run") - def test_run_quality_command_with_errors(self, mock_run): - """Test command execution with errors.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.stdout = "Output" - mock_process.stderr = "Error message" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "TestTool Result (Exit Code: 1)" in result - assert "Output" in result - assert "-- Errors --" in result - assert "Error message" in result - - @patch("subprocess.run") - def test_run_quality_command_no_output(self, mock_run): - """Test command execution with no output.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "TestTool Result (Exit Code: 0)" in result - assert "(No output)" in result - - @patch("subprocess.run") - def test_run_quality_command_long_output(self, mock_run): - """Test command execution with output that exceeds length limit.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "A" * 3000 # More than the 2000 character limit - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute function - result = _run_quality_command(["test", "command"], "TestTool") - - # Verify results - assert "... (output truncated)" in result - assert len(result) < 3000 - - def test_run_quality_command_file_not_found(self): - """Test when the command is not found.""" - # Setup side effect - with patch("subprocess.run", side_effect=FileNotFoundError("No such file or directory: 'nonexistent'")): - # Execute function - result = _run_quality_command(["nonexistent"], "TestTool") - - # Verify results - assert "Error: Command 'nonexistent' not found" in result - assert "Is 'nonexistent' installed and in PATH?" in result - - def test_run_quality_command_timeout(self): - """Test when the command times out.""" - # Setup side effect - with patch("subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="slow_command", timeout=120)): - # Execute function - result = _run_quality_command(["slow_command"], "TestTool") - - # Verify results - assert "Error: TestTool run timed out" in result - - def test_run_quality_command_unexpected_error(self): - """Test when an unexpected error occurs.""" - # Setup side effect - with patch("subprocess.run", side_effect=Exception("Unexpected error")): - # Execute function - result = _run_quality_command(["command"], "TestTool") - - # Verify results - assert "Error running TestTool" in result - assert "Unexpected error" in result - - -class TestLinterCheckerTool: - """Tests for the LinterCheckerTool class.""" - - def test_init(self): - """Test initialization of LinterCheckerTool.""" - tool = LinterCheckerTool() - assert tool.name == "linter_checker" - assert "Runs a code linter" in tool.description - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_linter_checker_with_defaults(self, mock_run_command): - """Test linter check with default parameters.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute() - - # Verify results - assert result == "Linter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["ruff", "check", os.path.abspath(".")] - assert args[1] == "Linter" - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_linter_checker_with_custom_path(self, mock_run_command): - """Test linter check with custom path.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute(path="src") - - # Verify results - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["ruff", "check", os.path.abspath("src")] - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_linter_checker_with_custom_command(self, mock_run_command): - """Test linter check with custom linter command.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute(linter_command="flake8") - - # Verify results - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["flake8", os.path.abspath(".")] - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_linter_checker_with_complex_command(self, mock_run_command): - """Test linter check with complex command including arguments.""" - # Setup mock - mock_run_command.return_value = "Linter output" - - # Execute tool - tool = LinterCheckerTool() - result = tool.execute(linter_command="flake8 --max-line-length=100") - - # Verify results - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["flake8", "--max-line-length=100", os.path.abspath(".")] - - def test_linter_checker_with_parent_directory_traversal(self): - """Test linter check with parent directory traversal.""" - tool = LinterCheckerTool() - result = tool.execute(path="../dangerous") - - # Verify results - assert "Error: Invalid path" in result - assert "Cannot access parent directories" in result - - -class TestFormatterTool: - """Tests for the FormatterTool class.""" - - def test_init(self): - """Test initialization of FormatterTool.""" - tool = FormatterTool() - assert tool.name == "formatter" - assert "Runs a code formatter" in tool.description - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_formatter_with_defaults(self, mock_run_command): - """Test formatter with default parameters.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute() - - # Verify results - assert result == "Formatter output" - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["black", os.path.abspath(".")] - assert args[1] == "Formatter" - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_formatter_with_custom_path(self, mock_run_command): - """Test formatter with custom path.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute(path="src") - - # Verify results - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["black", os.path.abspath("src")] - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_formatter_with_custom_command(self, mock_run_command): - """Test formatter with custom formatter command.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute(formatter_command="prettier") - - # Verify results - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["prettier", os.path.abspath(".")] - - @patch("cli_code.tools.quality_tools._run_quality_command") - def test_formatter_with_complex_command(self, mock_run_command): - """Test formatter with complex command including arguments.""" - # Setup mock - mock_run_command.return_value = "Formatter output" - - # Execute tool - tool = FormatterTool() - result = tool.execute(formatter_command="black -l 100") - - # Verify results - mock_run_command.assert_called_once() - args, kwargs = mock_run_command.call_args - assert args[0] == ["black", "-l", "100", os.path.abspath(".")] - - def test_formatter_with_parent_directory_traversal(self): - """Test formatter with parent directory traversal.""" - tool = FormatterTool() - result = tool.execute(path="../dangerous") - - # Verify results - assert "Error: Invalid path" in result - assert "Cannot access parent directories" in result \ No newline at end of file diff --git a/test_dir/test_summarizer_tool_original.py b/test_dir/test_summarizer_tool_original.py deleted file mode 100644 index 5fb0f3a..0000000 --- a/test_dir/test_summarizer_tool_original.py +++ /dev/null @@ -1,262 +0,0 @@ -""" -Tests for the summarizer tool module. -""" -import os -import sys -import unittest -from unittest.mock import patch, MagicMock, mock_open - -# Direct import for coverage tracking -import src.cli_code.tools.summarizer_tool -from src.cli_code.tools.summarizer_tool import SummarizeCodeTool, MAX_LINES_FOR_FULL_CONTENT, MAX_CHARS_FOR_FULL_CONTENT - -# Mock classes for google.generativeai -class MockCandidate: - def __init__(self, text, finish_reason="STOP"): - self.content = MagicMock() - self.content.parts = [MagicMock(text=text)] - self.finish_reason = MagicMock() - self.finish_reason.name = finish_reason - -class MockResponse: - def __init__(self, text=None, finish_reason="STOP"): - self.candidates = [MockCandidate(text, finish_reason)] if text is not None else [] - -class TestSummarizeCodeTool(unittest.TestCase): - """Tests for the SummarizeCodeTool class.""" - - def setUp(self): - """Set up test fixtures""" - # Create a mock model - self.mock_model = MagicMock() - self.tool = SummarizeCodeTool(model_instance=self.mock_model) - - def test_init(self): - """Test initialization of SummarizeCodeTool.""" - self.assertEqual(self.tool.name, "summarize_code") - self.assertTrue("summary" in self.tool.description.lower()) - self.assertEqual(self.tool.model, self.mock_model) - - def test_init_without_model(self): - """Test initialization without model.""" - tool = SummarizeCodeTool() - self.assertIsNone(tool.model) - - @patch("os.path.exists") - @patch("os.path.isfile") - @patch("os.path.getsize") - @patch("builtins.open", new_callable=mock_open, read_data="Small file content") - def test_execute_small_file(self, mock_file, mock_getsize, mock_isfile, mock_exists): - """Test execution with a small file that returns full content.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = 100 # Small file - - # Execute with a test file path - result = self.tool.execute(file_path="test_file.py") - - # Verify results - self.assertIn("Full Content of test_file.py", result) - self.assertIn("Small file content", result) - # Ensure the model was not called for small files - self.mock_model.generate_content.assert_not_called() - - @patch("os.path.exists") - @patch("os.path.isfile") - @patch("os.path.getsize") - @patch("builtins.open") - def test_execute_large_file(self, mock_open, mock_getsize, mock_isfile, mock_exists): - """Test execution with a large file that generates a summary.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = MAX_CHARS_FOR_FULL_CONTENT + 1000 # Large file - - # Mock the file reading - mock_file = MagicMock() - mock_file.__enter__.return_value.read.return_value = "Large file content" * 1000 - mock_open.return_value = mock_file - - # Mock the model response - mock_response = MockResponse(text="This is a summary of the file") - self.mock_model.generate_content.return_value = mock_response - - # Execute with a test file path - result = self.tool.execute(file_path="large_file.py") - - # Verify results - self.assertIn("Summary of large_file.py", result) - self.assertIn("This is a summary of the file", result) - self.mock_model.generate_content.assert_called_once() - - @patch("os.path.exists") - def test_file_not_found(self, mock_exists): - """Test handling of a non-existent file.""" - mock_exists.return_value = False - - # Execute with a non-existent file - result = self.tool.execute(file_path="nonexistent.py") - - # Verify results - self.assertIn("Error: File not found", result) - self.mock_model.generate_content.assert_not_called() - - @patch("os.path.exists") - @patch("os.path.isfile") - def test_not_a_file(self, mock_isfile, mock_exists): - """Test handling of a path that is not a file.""" - mock_exists.return_value = True - mock_isfile.return_value = False - - # Execute with a directory path - result = self.tool.execute(file_path="directory/") - - # Verify results - self.assertIn("Error: Path is not a file", result) - self.mock_model.generate_content.assert_not_called() - - def test_parent_directory_traversal(self): - """Test protection against parent directory traversal.""" - # Execute with a path containing parent directory traversal - result = self.tool.execute(file_path="../dangerous.py") - - # Verify results - self.assertIn("Error: Invalid file path", result) - self.mock_model.generate_content.assert_not_called() - - def test_missing_model(self): - """Test execution when model is not provided.""" - # Create a tool without a model - tool = SummarizeCodeTool() - - # Execute without a model - result = tool.execute(file_path="test.py") - - # Verify results - self.assertIn("Error: Summarization tool not properly configured", result) - - @patch("os.path.exists") - @patch("os.path.isfile") - @patch("os.path.getsize") - @patch("builtins.open") - def test_empty_file(self, mock_open, mock_getsize, mock_isfile, mock_exists): - """Test handling of an empty file for summarization.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = MAX_CHARS_FOR_FULL_CONTENT + 1000 # Large but empty file - - # Mock the file reading to return empty content - mock_file = MagicMock() - mock_file.__enter__.return_value.read.return_value = "" - mock_open.return_value = mock_file - - # Execute with a test file path - result = self.tool.execute(file_path="empty_file.py") - - # Verify results - self.assertIn("Summary of empty_file.py", result) - self.assertIn("(File is empty)", result) - # Model should not be called for empty files - self.mock_model.generate_content.assert_not_called() - - @patch("os.path.exists") - @patch("os.path.isfile") - @patch("os.path.getsize") - @patch("builtins.open") - def test_file_read_error(self, mock_open, mock_getsize, mock_isfile, mock_exists): - """Test handling of errors when reading a file.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = 100 # Small file - mock_open.side_effect = IOError("Error reading file") - - # Execute with a test file path - result = self.tool.execute(file_path="error_file.py") - - # Verify results - self.assertIn("Error reading file", result) - self.mock_model.generate_content.assert_not_called() - - @patch("os.path.exists") - @patch("os.path.isfile") - @patch("os.path.getsize") - @patch("builtins.open") - def test_summarization_error(self, mock_open, mock_getsize, mock_isfile, mock_exists): - """Test handling of errors during summarization.""" - # Setup mocks - mock_exists.return_value = True - mock_isfile.return_value = True - mock_getsize.return_value = MAX_CHARS_FOR_FULL_CONTENT + 1000 # Large file - - # Mock the file reading - mock_file = MagicMock() - mock_file.__enter__.return_value.read.return_value = "Large file content" * 1000 - mock_open.return_value = mock_file - - # Mock the model to raise an exception - self.mock_model.generate_content.side_effect = Exception("Summarization error") - - # Execute with a test file path - result = self.tool.execute(file_path="error_summarize.py") - - # Verify results - self.assertIn("Error generating summary", result) - self.mock_model.generate_content.assert_called_once() - - def test_extract_text_success(self): - """Test successful text extraction from summary response.""" - # Create a mock response with text - mock_response = MockResponse(text="Extracted summary text") - - # Extract text - result = self.tool._extract_text_from_summary_response(mock_response) - - # Verify results - self.assertEqual(result, "Extracted summary text") - - def test_extract_text_no_candidates(self): - """Test text extraction when no candidates are available.""" - # Create a mock response without candidates - mock_response = MockResponse() - mock_response.candidates = [] - - # Extract text - result = self.tool._extract_text_from_summary_response(mock_response) - - # Verify results - self.assertEqual(result, "(Summarization failed: No candidates)") - - def test_extract_text_failed_finish_reason(self): - """Test text extraction when finish reason is not STOP.""" - # Create a mock response with a failed finish reason - mock_response = MockResponse(text="Partial text", finish_reason="ERROR") - - # Extract text - result = self.tool._extract_text_from_summary_response(mock_response) - - # Verify results - self.assertEqual(result, "(Summarization failed: ERROR)") - - def test_extract_text_exception(self): - """Test handling of exceptions during text extraction.""" - # Create a test response with a structure that will cause an exception - # when accessing candidates - - # Create a response object that raises an exception when candidates is accessed - class ExceptionRaisingResponse: - @property - def candidates(self): - raise Exception("Extraction error") - - # Call the method directly - result = self.tool._extract_text_from_summary_response(ExceptionRaisingResponse()) - - # Verify results - self.assertEqual(result, "(Error extracting summary text)") - - -if __name__ == "__main__": - unittest.main() \ No newline at end of file diff --git a/test_dir/test_system_tools.py b/test_dir/test_system_tools.py deleted file mode 100644 index d35d280..0000000 --- a/test_dir/test_system_tools.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Tests for system_tools module to improve code coverage. -""" -import os -import pytest -from unittest.mock import patch, MagicMock -import subprocess - -# Direct import for coverage tracking -import src.cli_code.tools.system_tools -from src.cli_code.tools.system_tools import BashTool - - -def test_bash_tool_init(): - """Test BashTool initialization.""" - tool = BashTool() - assert tool.name == "bash" - assert "Execute a bash command" in tool.description - assert isinstance(tool.BANNED_COMMANDS, list) - assert len(tool.BANNED_COMMANDS) > 0 - - -def test_bash_tool_banned_command(): - """Test BashTool rejects banned commands.""" - tool = BashTool() - - # Try a banned command (using the first one in the list) - banned_cmd = tool.BANNED_COMMANDS[0] - result = tool.execute(f"{banned_cmd} some_args") - - assert "not allowed for security reasons" in result - assert banned_cmd in result - - -@patch("subprocess.Popen") -def test_bash_tool_successful_command(mock_popen): - """Test BashTool executes commands successfully.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.communicate.return_value = ("Command output", "") - mock_popen.return_value = mock_process - - # Execute a simple command - tool = BashTool() - result = tool.execute("echo 'hello world'") - - # Verify results - assert result == "Command output" - mock_popen.assert_called_once() - mock_process.communicate.assert_called_once() - - -@patch("subprocess.Popen") -def test_bash_tool_command_error(mock_popen): - """Test BashTool handling of command errors.""" - # Setup mock to simulate command failure - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.communicate.return_value = ("", "Command failed") - mock_popen.return_value = mock_process - - # Execute a command that will fail - tool = BashTool() - result = tool.execute("invalid_command") - - # Verify error handling - assert "exited with status 1" in result - assert "STDERR:\nCommand failed" in result - mock_popen.assert_called_once() - - -@patch("subprocess.Popen") -def test_bash_tool_timeout(mock_popen): - """Test BashTool handling of timeouts.""" - # Setup mock to simulate timeout - mock_process = MagicMock() - mock_process.communicate.side_effect = subprocess.TimeoutExpired("cmd", 1) - mock_popen.return_value = mock_process - - # Execute command with short timeout - tool = BashTool() - result = tool.execute("sleep 10", timeout=1) # 1 second timeout - - # Verify timeout handling - assert "Command timed out" in result - mock_process.kill.assert_called_once() - - -def test_bash_tool_invalid_timeout(): - """Test BashTool with invalid timeout value.""" - with patch("subprocess.Popen") as mock_popen: - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.communicate.return_value = ("Command output", "") - mock_popen.return_value = mock_process - - # Execute with invalid timeout - tool = BashTool() - result = tool.execute("echo test", timeout="not-a-number") - - # Verify default timeout was used - mock_process.communicate.assert_called_once_with(timeout=30) - assert result == "Command output" - - -@patch("subprocess.Popen") -def test_bash_tool_general_exception(mock_popen): - """Test BashTool handling of general exceptions.""" - # Setup mock to raise an exception - mock_popen.side_effect = Exception("Something went wrong") - - # Execute command - tool = BashTool() - result = tool.execute("some command") - - # Verify exception handling - assert "Error executing command" in result - assert "Something went wrong" in result \ No newline at end of file diff --git a/test_dir/test_system_tools_comprehensive.py b/test_dir/test_system_tools_comprehensive.py deleted file mode 100644 index bcc4e7a..0000000 --- a/test_dir/test_system_tools_comprehensive.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -Comprehensive tests for the system_tools module. -""" - -import os -import sys -import pytest -import subprocess -import time -from unittest.mock import patch, MagicMock - -# Setup proper import path -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'))) - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Try importing the module -try: - from cli_code.tools.system_tools import BashTool - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy class for testing - class BashTool: - name = "bash" - description = "Execute a bash command" - BANNED_COMMANDS = ["curl", "wget", "ssh"] - - def execute(self, command, timeout=30000): - return f"Mock execution of: {command}" - -# Skip tests if imports not available and not in CI -SHOULD_SKIP = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI environment" - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -class TestBashTool: - """Test cases for the BashTool class.""" - - def test_init(self): - """Test initialization of BashTool.""" - tool = BashTool() - assert tool.name == "bash" - assert tool.description == "Execute a bash command" - assert isinstance(tool.BANNED_COMMANDS, list) - assert len(tool.BANNED_COMMANDS) > 0 - - def test_banned_commands(self): - """Test that banned commands are rejected.""" - tool = BashTool() - - # Test each banned command - for banned_cmd in tool.BANNED_COMMANDS: - result = tool.execute(f"{banned_cmd} some_args") - if IMPORTS_AVAILABLE: - assert "not allowed for security reasons" in result - assert banned_cmd in result - - def test_execute_simple_command(self): - """Test executing a simple command.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - tool = BashTool() - result = tool.execute("echo 'hello world'") - assert "hello world" in result - - def test_execute_with_error(self): - """Test executing a command that returns an error.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - tool = BashTool() - result = tool.execute("ls /nonexistent_directory") - assert "Command exited with status" in result - assert "STDERR" in result - - @patch('subprocess.Popen') - def test_timeout_handling(self, mock_popen): - """Test handling of command timeouts.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - # Setup mock to simulate timeout - mock_process = MagicMock() - mock_process.communicate.side_effect = subprocess.TimeoutExpired(cmd="sleep 100", timeout=0.1) - mock_popen.return_value = mock_process - - tool = BashTool() - result = tool.execute("sleep 100", timeout=100) # 100ms timeout - - assert "Command timed out" in result - - @patch('subprocess.Popen') - def test_exception_handling(self, mock_popen): - """Test general exception handling.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - # Setup mock to raise exception - mock_popen.side_effect = Exception("Test exception") - - tool = BashTool() - result = tool.execute("echo test") - - assert "Error executing command" in result - assert "Test exception" in result - - def test_timeout_conversion(self): - """Test conversion of timeout parameter.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - tool = BashTool() - - # Test with invalid timeout - with patch('subprocess.Popen') as mock_popen: - mock_process = MagicMock() - mock_process.communicate.return_value = ("output", "") - mock_process.returncode = 0 - mock_popen.return_value = mock_process - - tool.execute("echo test", timeout="invalid") - - # Should use default timeout (30 seconds) - mock_process.communicate.assert_called_with(timeout=30) - - def test_long_output_handling(self): - """Test handling of commands with large output.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - tool = BashTool() - - # Generate a large output - result = tool.execute("python -c \"print('x' * 10000)\"") - - # Verify the tool can handle large outputs - if IMPORTS_AVAILABLE: - assert len(result) >= 10000 - assert result.count('x') >= 10000 - - def test_command_with_arguments(self): - """Test executing a command with arguments.""" - if not IMPORTS_AVAILABLE: - pytest.skip("Full implementation not available") - - tool = BashTool() - - # Test with multiple arguments - result = tool.execute("echo arg1 arg2 arg3") - assert "arg1 arg2 arg3" in result or "Mock execution" in result - - # Test with quoted arguments - result = tool.execute("echo 'argument with spaces'") - assert "argument with spaces" in result or "Mock execution" in result - - # Test with environment variables - result = tool.execute("echo $HOME") - # No assertion on content, just make sure it runs \ No newline at end of file diff --git a/test_dir/test_task_complete_tool.py b/test_dir/test_task_complete_tool.py deleted file mode 100644 index cdbf869..0000000 --- a/test_dir/test_task_complete_tool.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Tests for the TaskCompleteTool. -""" -import pytest -from unittest.mock import patch - -from cli_code.tools.task_complete_tool import TaskCompleteTool - - -def test_task_complete_tool_init(): - """Test TaskCompleteTool initialization.""" - tool = TaskCompleteTool() - assert tool.name == "task_complete" - assert "Signals task completion" in tool.description - - -def test_execute_with_valid_summary(): - """Test execution with a valid summary.""" - tool = TaskCompleteTool() - summary = "This is a valid summary of task completion." - result = tool.execute(summary) - - assert result == summary - - -def test_execute_with_short_summary(): - """Test execution with a summary that's too short.""" - tool = TaskCompleteTool() - summary = "Shrt" # Less than 5 characters - result = tool.execute(summary) - - assert "insufficient" in result - assert result != summary - - -def test_execute_with_empty_summary(): - """Test execution with an empty summary.""" - tool = TaskCompleteTool() - summary = "" - result = tool.execute(summary) - - assert "insufficient" in result - assert result != summary - - -def test_execute_with_none_summary(): - """Test execution with None as summary.""" - tool = TaskCompleteTool() - summary = None - - with patch("cli_code.tools.task_complete_tool.log") as mock_log: - result = tool.execute(summary) - - # Verify logging behavior - should be called at least once - assert mock_log.warning.call_count >= 1 - # Check that one of the warnings is about non-string type - assert any("non-string summary type" in str(args[0]) for args, _ in mock_log.warning.call_args_list) - # Check that one of the warnings is about short summary - assert any("missing or very short" in str(args[0]) for args, _ in mock_log.warning.call_args_list) - - assert "Task marked as complete" in result - - -def test_execute_with_non_string_summary(): - """Test execution with a non-string summary.""" - tool = TaskCompleteTool() - summary = 12345 # Integer, not a string - - with patch("cli_code.tools.task_complete_tool.log") as mock_log: - result = tool.execute(summary) - - # Verify logging behavior - assert mock_log.warning.call_count >= 1 - assert any("non-string summary type" in str(args[0]) for args, _ in mock_log.warning.call_args_list) - - # The integer should be converted to a string - assert result == "12345" - - -def test_execute_with_quoted_summary(): - """Test execution with a summary that has quotes and spaces to be cleaned.""" - tool = TaskCompleteTool() - summary = ' "This summary has quotes and spaces" ' - result = tool.execute(summary) - - # The quotes and spaces should be removed - assert result == "This summary has quotes and spaces" - - -def test_execute_with_complex_cleaning(): - """Test execution with a summary that requires complex cleaning.""" - tool = TaskCompleteTool() - summary = '\n\t "\' Nested quotes and whitespace \'" \t\n' - result = tool.execute(summary) - - # All the nested quotes and whitespace should be removed - assert result == "Nested quotes and whitespace" \ No newline at end of file diff --git a/test_dir/test_test_runner_tool.py b/test_dir/test_test_runner_tool.py deleted file mode 100644 index fdf1a69..0000000 --- a/test_dir/test_test_runner_tool.py +++ /dev/null @@ -1,235 +0,0 @@ -""" -Tests for the TestRunnerTool class. -""" - -import pytest -from unittest.mock import MagicMock, patch -import subprocess -import logging - -from src.cli_code.tools.test_runner import TestRunnerTool - - -@pytest.fixture -def test_runner_tool(): - """Provides an instance of TestRunnerTool.""" - return TestRunnerTool() - - -def test_initialization(): - """Test that the tool initializes correctly with the right name and description.""" - tool = TestRunnerTool() - assert tool.name == "test_runner" - assert "test runner" in tool.description.lower() - assert "pytest" in tool.description - - -def test_successful_test_run(test_runner_tool): - """Test executing a successful test run.""" - with patch("subprocess.run") as mock_run: - # Configure the mock to simulate a successful test run - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "All tests passed!" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute the tool - result = test_runner_tool.execute(test_path="tests/") - - # Verify the command that was run - mock_run.assert_called_once_with( - ["pytest", "tests/"], - capture_output=True, - text=True, - check=False, - timeout=300, - ) - - # Check the result - assert "SUCCESS" in result - assert "All tests passed!" in result - - -def test_failed_test_run(test_runner_tool): - """Test executing a failed test run.""" - with patch("subprocess.run") as mock_run: - # Configure the mock to simulate a failed test run - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.stdout = "1 test failed" - mock_process.stderr = "Error details" - mock_run.return_value = mock_process - - # Execute the tool - result = test_runner_tool.execute() - - # Verify the command that was run - mock_run.assert_called_once_with( - ["pytest"], - capture_output=True, - text=True, - check=False, - timeout=300, - ) - - # Check the result - assert "FAILED" in result - assert "1 test failed" in result - assert "Error details" in result - - -def test_with_options(test_runner_tool): - """Test executing tests with additional options.""" - with patch("subprocess.run") as mock_run: - # Configure the mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "All tests passed with options!" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute the tool with options - result = test_runner_tool.execute( - options="-v --cov=src --junit-xml=results.xml" - ) - - # Verify the command that was run with all the options - mock_run.assert_called_once_with( - ["pytest", "-v", "--cov=src", "--junit-xml=results.xml"], - capture_output=True, - text=True, - check=False, - timeout=300, - ) - - # Check the result - assert "SUCCESS" in result - assert "All tests passed with options!" in result - - -def test_with_different_runner(test_runner_tool): - """Test using a different test runner than pytest.""" - with patch("subprocess.run") as mock_run: - # Configure the mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Tests passed with unittest!" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute the tool with a different runner command - result = test_runner_tool.execute( - runner_command="python -m unittest" - ) - - # Verify the command that was run - mock_run.assert_called_once_with( - ["python -m unittest"], - capture_output=True, - text=True, - check=False, - timeout=300, - ) - - # Check the result - assert "SUCCESS" in result - assert "using 'python -m unittest'" in result - assert "Tests passed with unittest!" in result - - -def test_command_not_found(test_runner_tool): - """Test handling of command not found error.""" - with patch("subprocess.run") as mock_run: - # Configure the mock to raise FileNotFoundError - mock_run.side_effect = FileNotFoundError("No such file or directory") - - # Execute the tool with a command that doesn't exist - result = test_runner_tool.execute(runner_command="nonexistent_command") - - # Check the result - assert "Error" in result - assert "not found" in result - assert "nonexistent_command" in result - - -def test_timeout_error(test_runner_tool): - """Test handling of timeout error.""" - with patch("subprocess.run") as mock_run: - # Configure the mock to raise TimeoutExpired - mock_run.side_effect = subprocess.TimeoutExpired(cmd="pytest", timeout=300) - - # Execute the tool - result = test_runner_tool.execute() - - # Check the result - assert "Error" in result - assert "exceeded the timeout limit" in result - - -def test_general_error(test_runner_tool): - """Test handling of general unexpected errors.""" - with patch("subprocess.run") as mock_run: - # Configure the mock to raise a general exception - mock_run.side_effect = Exception("Something went wrong") - - # Execute the tool - result = test_runner_tool.execute() - - # Check the result - assert "Error" in result - assert "Something went wrong" in result - - -def test_invalid_options_parsing(test_runner_tool): - """Test handling of invalid options string.""" - with patch("subprocess.run") as mock_run, \ - patch("shlex.split") as mock_split, \ - patch("src.cli_code.tools.test_runner.log") as mock_log: - - # Configure shlex.split to raise ValueError - mock_split.side_effect = ValueError("Invalid option string") - - # Configure subprocess.run for normal execution after the error - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Tests passed anyway" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute the tool with invalid options - result = test_runner_tool.execute(options="--invalid='unclosed quote") - - # Verify warning was logged - mock_log.warning.assert_called_once() - - # Verify run was called without the options - mock_run.assert_called_once_with( - ["pytest"], - capture_output=True, - text=True, - check=False, - timeout=300, - ) - - # Check the result - assert "SUCCESS" in result - - -def test_no_tests_collected(test_runner_tool): - """Test handling of pytest exit code 5 (no tests collected).""" - with patch("subprocess.run") as mock_run: - # Configure the mock - mock_process = MagicMock() - mock_process.returncode = 5 - mock_process.stdout = "No tests collected" - mock_process.stderr = "" - mock_run.return_value = mock_process - - # Execute the tool - result = test_runner_tool.execute() - - # Check the result - assert "FAILED" in result - assert "exit code 5" in result.lower() - assert "no tests were found" in result.lower() \ No newline at end of file diff --git a/test_dir/test_tools_base.py b/test_dir/test_tools_base.py deleted file mode 100644 index 66af64f..0000000 --- a/test_dir/test_tools_base.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Tests for the BaseTool base class. -""" -import pytest -from unittest.mock import patch, MagicMock - -from cli_code.tools.base import BaseTool - - -class TestTool(BaseTool): - """A concrete implementation of BaseTool for testing.""" - - name = "test_tool" - description = "Test tool for testing purposes" - - def execute(self, param1: str, param2: int = 0, param3: bool = False): - """Execute the test tool. - - Args: - param1: A string parameter - param2: An integer parameter with default - param3: A boolean parameter with default - - Returns: - A string response - """ - return f"Executed with {param1}, {param2}, {param3}" - - -def test_tool_execute(): - """Test the execute method of the concrete implementation.""" - tool = TestTool() - result = tool.execute("test", 42, True) - - assert result == "Executed with test, 42, True" - - # Test with default values - result = tool.execute("test") - assert result == "Executed with test, 0, False" - - -def test_get_function_declaration(): - """Test the get_function_declaration method.""" - # Create a simple test that works without mocking - declaration = TestTool.get_function_declaration() - - # Basic assertions about the declaration that don't depend on implementation details - assert declaration is not None - assert declaration.name == "test_tool" - assert declaration.description == "Test tool for testing purposes" - - # Create a simple representation of the parameters to test - # This avoids depending on the exact Schema implementation - param_repr = str(declaration.parameters) - - # Check if key parameters are mentioned in the string representation - assert "param1" in param_repr - assert "param2" in param_repr - assert "param3" in param_repr - assert "STRING" in param_repr # Uppercase in the string representation - assert "INTEGER" in param_repr # Uppercase in the string representation - assert "BOOLEAN" in param_repr # Uppercase in the string representation - assert "required" in param_repr - - -def test_get_function_declaration_no_name(): - """Test get_function_declaration when name is missing.""" - class NoNameTool(BaseTool): - name = None - description = "Tool with no name" - - def execute(self, param: str): - return f"Executed with {param}" - - with patch("cli_code.tools.base.log") as mock_log: - declaration = NoNameTool.get_function_declaration() - assert declaration is None - mock_log.warning.assert_called_once() - - -def test_abstract_class_methods(): - """Test that BaseTool cannot be instantiated directly.""" - with pytest.raises(TypeError): - BaseTool() \ No newline at end of file diff --git a/test_dir/test_tools_basic.py b/test_dir/test_tools_basic.py deleted file mode 100644 index d3b0b2f..0000000 --- a/test_dir/test_tools_basic.py +++ /dev/null @@ -1,284 +0,0 @@ -""" -Basic tests for tools without requiring API access. -These tests focus on increasing coverage for tool classes. -""" - -from unittest import TestCase, skipIf -from unittest.mock import MagicMock, patch -import os -import tempfile -from pathlib import Path - -# Import necessary modules safely -try: - from src.cli_code.tools.base import BaseTool - from src.cli_code.tools.file_tools import ViewTool, EditTool, GrepTool, GlobTool - from src.cli_code.tools.quality_tools import _run_quality_command, LinterCheckerTool, FormatterTool - from src.cli_code.tools.summarizer_tool import SummarizeCodeTool - from src.cli_code.tools.system_tools import BashTool - from src.cli_code.tools.task_complete_tool import TaskCompleteTool - from src.cli_code.tools.tree_tool import TreeTool - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type hints - class BaseTool: pass - class ViewTool: pass - class EditTool: pass - class GrepTool: pass - class GlobTool: pass - class LinterCheckerTool: pass - class FormatterTool: pass - class SummarizeCodeTool: pass - class BashTool: pass - class TaskCompleteTool: pass - class TreeTool: pass - - -@skipIf(not IMPORTS_AVAILABLE, "Required tool imports not available") -class TestFileTools(TestCase): - """Test file-related tools without requiring actual file access.""" - - def setUp(self): - """Set up test environment with temporary directory.""" - self.temp_dir = tempfile.TemporaryDirectory() - self.temp_path = Path(self.temp_dir.name) - - # Create a test file in the temp directory - self.test_file = self.temp_path / "test_file.txt" - with open(self.test_file, "w") as f: - f.write("Line 1\nLine 2\nLine 3\nTest pattern found here\nLine 5\n") - - def tearDown(self): - """Clean up the temporary directory.""" - self.temp_dir.cleanup() - - def test_view_tool_initialization(self): - """Test ViewTool initialization and properties.""" - view_tool = ViewTool() - - self.assertEqual(view_tool.name, "view") - self.assertTrue("View specific sections" in view_tool.description) - - def test_glob_tool_initialization(self): - """Test GlobTool initialization and properties.""" - glob_tool = GlobTool() - - self.assertEqual(glob_tool.name, "glob") - self.assertEqual(glob_tool.description, "Find files/directories matching specific glob patterns recursively.") - - @patch("subprocess.check_output") - def test_grep_tool_execution(self, mock_check_output): - """Test GrepTool execution with mocked subprocess call.""" - # Configure mock to return a simulated grep output - mock_result = b"test_file.txt:4:Test pattern found here\n" - mock_check_output.return_value = mock_result - - # Create and run the tool - grep_tool = GrepTool() - - # Mock the regex.search to avoid pattern validation issues - with patch("re.compile") as mock_compile: - mock_regex = MagicMock() - mock_regex.search.return_value = True - mock_compile.return_value = mock_regex - - # Also patch open to avoid file reading - with patch("builtins.open", mock_open = MagicMock()): - with patch("os.walk") as mock_walk: - # Setup mock walk to return our test file - mock_walk.return_value = [(str(self.temp_path), [], ["test_file.txt"])] - - result = grep_tool.execute( - pattern="pattern", - path=str(self.temp_path) - ) - - # Check result contains expected output - self.assertIn("No matches found", result) - - @patch("builtins.open") - def test_edit_tool_with_mock(self, mock_open): - """Test EditTool basics with mocked file operations.""" - # Configure mock file operations - mock_file_handle = MagicMock() - mock_open.return_value.__enter__.return_value = mock_file_handle - - # Create and run the tool - edit_tool = EditTool() - result = edit_tool.execute( - file_path=str(self.test_file), - content="New content for the file" - ) - - # Verify file was opened and written to - mock_open.assert_called_with(str(self.test_file), 'w', encoding='utf-8') - mock_file_handle.write.assert_called_with("New content for the file") - - # Check result indicates success - self.assertIn("Successfully wrote content", result) - - -@skipIf(not IMPORTS_AVAILABLE, "Required tool imports not available") -class TestQualityTools(TestCase): - """Test code quality tools without requiring actual command execution.""" - - @patch("subprocess.run") - def test_run_quality_command_success(self, mock_run): - """Test the _run_quality_command function with successful command.""" - # Configure mock for successful command execution - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Command output" - mock_run.return_value = mock_process - - # Call the function with command list and name - result = _run_quality_command(["test", "command"], "test-command") - - # Verify subprocess was called with correct arguments - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - self.assertEqual(args[0], ["test", "command"]) - - # Check result has expected structure and values - self.assertIn("Command output", result) - - @patch("subprocess.run") - def test_linter_checker_tool(self, mock_run): - """Test LinterCheckerTool execution.""" - # Configure mock for linter execution - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "No issues found" - mock_run.return_value = mock_process - - # Create and run the tool - linter_tool = LinterCheckerTool() - - # Use proper parameter passing - result = linter_tool.execute( - path="test_file.py", - linter_command="flake8" - ) - - # Verify result contains expected output - self.assertIn("No issues found", result) - - @patch("subprocess.run") - def test_formatter_tool(self, mock_run): - """Test FormatterTool execution.""" - # Configure mock for formatter execution - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "Formatted file" - mock_run.return_value = mock_process - - # Create and run the tool - formatter_tool = FormatterTool() - - # Use proper parameter passing - result = formatter_tool.execute( - path="test_file.py", - formatter_command="black" - ) - - # Verify result contains expected output - self.assertIn("Formatted file", result) - - -@skipIf(not IMPORTS_AVAILABLE, "Required tool imports not available") -class TestSystemTools(TestCase): - """Test system tools without requiring actual command execution.""" - - @patch("subprocess.Popen") - def test_bash_tool(self, mock_popen): - """Test BashTool execution.""" - # Configure mock for command execution - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.communicate.return_value = ("Command output", "") - mock_popen.return_value = mock_process - - # Create and run the tool - bash_tool = BashTool() - - # Call with proper parameters - BashTool.execute(command, timeout=30000) - result = bash_tool.execute("ls -la") - - # Verify subprocess was called - mock_popen.assert_called_once() - - # Check result has expected output - self.assertEqual("Command output", result) - - -@skipIf(not IMPORTS_AVAILABLE, "Required tool imports not available") -class TestTaskCompleteTool(TestCase): - """Test TaskCompleteTool without requiring actual API calls.""" - - def test_task_complete_tool(self): - """Test TaskCompleteTool execution.""" - # Create and run the tool - task_tool = TaskCompleteTool() - - # TaskCompleteTool.execute takes summary parameter - result = task_tool.execute(summary="Task completed successfully!") - - # Check result contains the message - self.assertIn("Task completed successfully!", result) - - -@skipIf(not IMPORTS_AVAILABLE, "Required tool imports not available") -class TestTreeTool(TestCase): - """Test TreeTool without requiring actual filesystem access.""" - - @patch("subprocess.run") - def test_tree_tool(self, mock_run): - """Test TreeTool execution.""" - # Configure mock for tree command - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ( - ".\n" - "├── dir1\n" - "│ └── file1.txt\n" - "└── dir2\n" - " └── file2.txt\n" - ) - mock_run.return_value = mock_process - - # Create and run the tool - tree_tool = TreeTool() - - # Pass parameters correctly as separate arguments (not a dict) - result = tree_tool.execute(path="/tmp", depth=2) - - # Verify subprocess was called - mock_run.assert_called_once() - - # Check result contains tree output - self.assertIn("dir1", result) - - -@skipIf(not IMPORTS_AVAILABLE, "Required tool imports not available") -class TestSummarizerTool(TestCase): - """Test SummarizeCodeTool without requiring actual API calls.""" - - @patch("google.generativeai.GenerativeModel") - def test_summarizer_tool_initialization(self, mock_model_class): - """Test SummarizeCodeTool initialization.""" - # Configure mock model - mock_model = MagicMock() - mock_model_class.return_value = mock_model - - # Create the tool with mock patching for the initialization - with patch.object(SummarizeCodeTool, "__init__", return_value=None): - summarizer_tool = SummarizeCodeTool() - - # Set essential attributes manually since init is mocked - summarizer_tool.name = "summarize_code" - summarizer_tool.description = "Summarize code in a file or directory" - - # Verify properties - self.assertEqual(summarizer_tool.name, "summarize_code") - self.assertTrue("Summarize" in summarizer_tool.description) \ No newline at end of file diff --git a/test_dir/test_tools_init_coverage.py b/test_dir/test_tools_init_coverage.py deleted file mode 100644 index 9dca021..0000000 --- a/test_dir/test_tools_init_coverage.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Tests specifically for the tools module initialization to improve code coverage. -This file focuses on testing the __init__.py module functions and branch coverage. -""" - -import os -import unittest -from unittest.mock import patch, MagicMock -import pytest -import logging - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Direct import for coverage tracking -import src.cli_code.tools - -# Handle imports -try: - from src.cli_code.tools import get_tool, AVAILABLE_TOOLS - from src.cli_code.tools.base import BaseTool - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Create dummy classes for type checking - get_tool = MagicMock - AVAILABLE_TOOLS = {} - BaseTool = MagicMock - -# Set up conditional skipping -SHOULD_SKIP_TESTS = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI" - - -@pytest.mark.skipif(SHOULD_SKIP_TESTS, reason=SKIP_REASON) -class TestToolsInitModule: - """Test suite for tools module initialization and tool retrieval.""" - - def setup_method(self): - """Set up test fixtures.""" - # Mock logging to prevent actual log outputs - self.logging_patch = patch('src.cli_code.tools.logging') - self.mock_logging = self.logging_patch.start() - - # Store original AVAILABLE_TOOLS for restoration later - self.original_tools = AVAILABLE_TOOLS.copy() - - def teardown_method(self): - """Tear down test fixtures.""" - self.logging_patch.stop() - - # Restore original AVAILABLE_TOOLS - global AVAILABLE_TOOLS - AVAILABLE_TOOLS.clear() - AVAILABLE_TOOLS.update(self.original_tools) - - def test_get_tool_valid(self): - """Test retrieving a valid tool.""" - # Most tools should be available - assert 'ls' in AVAILABLE_TOOLS, "Basic 'ls' tool should be available" - - # Get a tool instance - ls_tool = get_tool('ls') - - # Verify instance creation - assert ls_tool is not None - assert hasattr(ls_tool, 'execute'), "Tool should have execute method" - - def test_get_tool_missing(self): - """Test retrieving a non-existent tool.""" - # Try to get a non-existent tool - non_existent_tool = get_tool('non_existent_tool') - - # Verify error handling - assert non_existent_tool is None - self.mock_logging.warning.assert_called_with( - "Tool 'non_existent_tool' not found in AVAILABLE_TOOLS." - ) - - def test_get_tool_summarize_code(self): - """Test handling of the special summarize_code tool case.""" - # Temporarily add a mock summarize_code tool to AVAILABLE_TOOLS - mock_summarize_tool = MagicMock() - global AVAILABLE_TOOLS - AVAILABLE_TOOLS['summarize_code'] = mock_summarize_tool - - # Try to get the tool - result = get_tool('summarize_code') - - # Verify special case handling - assert result is None - self.mock_logging.error.assert_called_with( - "get_tool() called for 'summarize_code', which requires special instantiation with model instance." - ) - - def test_get_tool_instantiation_error(self): - """Test handling of tool instantiation errors.""" - # Create a mock tool class that raises an exception when instantiated - mock_error_tool = MagicMock() - mock_error_tool.side_effect = Exception("Instantiation error") - - # Add the error-raising tool to AVAILABLE_TOOLS - global AVAILABLE_TOOLS - AVAILABLE_TOOLS['error_tool'] = mock_error_tool - - # Try to get the tool - result = get_tool('error_tool') - - # Verify error handling - assert result is None - self.mock_logging.error.assert_called() # Should log the error - - def test_all_standard_tools_available(self): - """Test that all standard tools are registered correctly.""" - # Define the core tools that should always be available - core_tools = ['view', 'edit', 'ls', 'grep', 'glob', 'tree'] - - # Check each core tool - for tool_name in core_tools: - assert tool_name in AVAILABLE_TOOLS, f"Core tool '{tool_name}' should be available" - - # Also check that the tool can be instantiated - tool_instance = get_tool(tool_name) - assert tool_instance is not None, f"Tool '{tool_name}' should be instantiable" - assert isinstance(tool_instance, BaseTool), f"Tool '{tool_name}' should be a BaseTool subclass" - - @patch('src.cli_code.tools.AVAILABLE_TOOLS', {}) - def test_empty_tools_dict(self): - """Test behavior when AVAILABLE_TOOLS is empty.""" - # Try to get a tool from an empty dict - result = get_tool('ls') - - # Verify error handling - assert result is None - self.mock_logging.warning.assert_called_with( - "Tool 'ls' not found in AVAILABLE_TOOLS." - ) - - def test_optional_tools_registration(self): - """Test that optional tools are conditionally registered.""" - # Check a few optional tools that should be registered if imports succeeded - optional_tools = ['bash', 'task_complete', 'create_directory', 'linter_checker', 'formatter', 'test_runner'] - - for tool_name in optional_tools: - if tool_name in AVAILABLE_TOOLS: - # Tool is available, test instantiation - tool_instance = get_tool(tool_name) - assert tool_instance is not None, f"Optional tool '{tool_name}' should be instantiable if available" - assert isinstance(tool_instance, BaseTool), f"Tool '{tool_name}' should be a BaseTool subclass" \ No newline at end of file diff --git a/test_dir/test_tree_tool_edge_cases.py b/test_dir/test_tree_tool_edge_cases.py deleted file mode 100644 index 4764843..0000000 --- a/test_dir/test_tree_tool_edge_cases.py +++ /dev/null @@ -1,236 +0,0 @@ -""" -Tests for edge cases in the TreeTool functionality. - -To run these tests specifically: - python -m pytest test_dir/test_tree_tool_edge_cases.py - -To run a specific test: - python -m pytest test_dir/test_tree_tool_edge_cases.py::TestTreeToolEdgeCases::test_tree_empty_result - -To run all tests related to tree tool: - python -m pytest -k "tree_tool" -""" -import os -import subprocess -import sys -from pathlib import Path -import pytest -from unittest.mock import patch, MagicMock, mock_open, call - -from src.cli_code.tools.tree_tool import TreeTool, DEFAULT_TREE_DEPTH, MAX_TREE_DEPTH - - -class TestTreeToolEdgeCases: - """Tests for edge cases of the TreeTool class.""" - - @patch("subprocess.run") - def test_tree_complex_path_handling(self, mock_run): - """Test tree command with a complex path containing spaces and special characters.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "path with spaces\n└── file.txt" - mock_run.return_value = mock_process - - # Execute tool with path containing spaces - tool = TreeTool() - complex_path = "path with spaces" - result = tool.execute(path=complex_path) - - # Verify results - assert "path with spaces" in result - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", str(DEFAULT_TREE_DEPTH), complex_path] - - @patch("subprocess.run") - def test_tree_empty_result(self, mock_run): - """Test tree command with an empty result.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "" # Empty output - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert result == "" # Should return the empty string as is - - @patch("subprocess.run") - def test_tree_special_characters_in_output(self, mock_run): - """Test tree command with special characters in the output.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file-with-dashes.txt\n├── file_with_underscores.txt\n├── 特殊字符.txt" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert "file-with-dashes.txt" in result - assert "file_with_underscores.txt" in result - assert "特殊字符.txt" in result - - @patch("subprocess.run") - def test_tree_with_negative_depth(self, mock_run): - """Test tree command with a negative depth value.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n└── file.txt" - mock_run.return_value = mock_process - - # Execute tool with negative depth - tool = TreeTool() - result = tool.execute(depth=-5) - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - # Should be clamped to minimum depth of 1 - assert args[0] == ["tree", "-L", "1"] - - @patch("subprocess.run") - def test_tree_with_float_depth(self, mock_run): - """Test tree command with a float depth value.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n└── file.txt" - mock_run.return_value = mock_process - - # Execute tool with float depth - tool = TreeTool() - result = tool.execute(depth=2.7) - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - # FloatingPointError: The TreeTool doesn't convert floats to int, it passes them as strings - assert args[0] == ["tree", "-L", "2.7"] - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - @patch("pathlib.Path.is_dir") - @patch("os.walk") - def test_fallback_nested_directories(self, mock_walk, mock_is_dir, mock_exists, mock_resolve): - """Test fallback tree implementation with nested directories.""" - # Setup mocks - mock_resolve.return_value = Path("test_dir") - mock_exists.return_value = True - mock_is_dir.return_value = True - - # Setup mock directory structure: - # test_dir/ - # ├── dir1/ - # │ ├── subdir1/ - # │ │ └── file3.txt - # │ └── file2.txt - # └── file1.txt - mock_walk.return_value = [ - ("test_dir", ["dir1"], ["file1.txt"]), - ("test_dir/dir1", ["subdir1"], ["file2.txt"]), - ("test_dir/dir1/subdir1", [], ["file3.txt"]), - ] - - # Execute fallback tree implementation - tool = TreeTool() - result = tool._fallback_tree_implementation("test_dir", 3) - - # Verify results - assert "." in result - assert "file1.txt" in result - assert "dir1/" in result - assert "file2.txt" in result - assert "subdir1/" in result - assert "file3.txt" in result - - @patch("subprocess.run") - def test_tree_command_os_error(self, mock_run): - """Test tree command raising an OSError.""" - # Setup mock to raise OSError - mock_run.side_effect = OSError("Simulated OS error") - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.return_value = "Fallback tree output" - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert result == "Fallback tree output" - mock_fallback.assert_called_once_with(".", DEFAULT_TREE_DEPTH) - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - @patch("pathlib.Path.is_dir") - @patch("os.walk") - def test_fallback_empty_directory(self, mock_walk, mock_is_dir, mock_exists, mock_resolve): - """Test fallback tree implementation with an empty directory.""" - # Setup mocks - mock_resolve.return_value = Path("empty_dir") - mock_exists.return_value = True - mock_is_dir.return_value = True - - # Empty directory - mock_walk.return_value = [ - ("empty_dir", [], []), - ] - - # Execute fallback tree implementation - tool = TreeTool() - result = tool._fallback_tree_implementation("empty_dir", 3) - - # Verify results - assert "." in result - assert len(result.splitlines()) == 1 # Only the root directory line - - @patch("subprocess.run") - def test_tree_command_with_long_path(self, mock_run): - """Test tree command with a very long path.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "very/long/path\n└── file.txt" - mock_run.return_value = mock_process - - # Very long path - long_path = "/".join(["directory"] * 20) # Creates a very long path - - # Execute tool - tool = TreeTool() - result = tool.execute(path=long_path) - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", str(DEFAULT_TREE_DEPTH), long_path] - - @patch("subprocess.run") - def test_tree_command_path_does_not_exist(self, mock_run): - """Test tree command with a path that doesn't exist.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 1 - mock_process.stderr = "tree: nonexistent_path: No such file or directory" - mock_run.return_value = mock_process - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.return_value = "Error: Path 'nonexistent_path' does not exist." - - # Execute tool - tool = TreeTool() - result = tool.execute(path="nonexistent_path") - - # Verify results - assert "does not exist" in result - mock_fallback.assert_called_once_with("nonexistent_path", DEFAULT_TREE_DEPTH) \ No newline at end of file diff --git a/test_dir/test_tree_tool_original.py b/test_dir/test_tree_tool_original.py deleted file mode 100644 index d8b9bbd..0000000 --- a/test_dir/test_tree_tool_original.py +++ /dev/null @@ -1,396 +0,0 @@ -""" -Tests for the tree tool module. -""" -import os -import subprocess -import tempfile -from pathlib import Path -import pytest -from unittest.mock import patch, MagicMock, mock_open - -# Direct import for coverage tracking -import src.cli_code.tools.tree_tool -from src.cli_code.tools.tree_tool import TreeTool, DEFAULT_TREE_DEPTH, MAX_TREE_DEPTH - - -class TestTreeTool: - """Tests for the TreeTool class.""" - - def test_init(self): - """Test initialization of TreeTool.""" - tool = TreeTool() - assert tool.name == "tree" - assert "Displays the directory structure as a tree" in tool.description - assert "depth" in tool.args_schema - assert "path" in tool.args_schema - assert len(tool.required_args) == 0 # All args are optional - - @patch("subprocess.run") - def test_tree_command_success(self, mock_run): - """Test successful execution of tree command.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1\n └── file2.txt" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert "file1.txt" in result - assert "dir1" in result - assert "file2.txt" in result - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", str(DEFAULT_TREE_DEPTH)] - assert kwargs.get("capture_output") is True - assert kwargs.get("text") is True - - @patch("subprocess.run") - def test_tree_with_custom_path(self, mock_run): - """Test tree command with custom path.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = "test_dir\n├── file1.txt\n└── file2.txt" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute(path="test_dir") - - # Verify results - assert "test_dir" in result - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", str(DEFAULT_TREE_DEPTH), "test_dir"] - - @patch("subprocess.run") - def test_tree_with_custom_depth(self, mock_run): - """Test tree command with custom depth.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute(depth=2) - - # Verify results - assert "file1.txt" in result - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", "2"] # Depth should be converted to string - - @patch("subprocess.run") - def test_tree_with_string_depth(self, mock_run): - """Test tree command with depth as string.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute(depth="2") # String instead of int - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", "2"] # Should be converted properly - - @patch("subprocess.run") - def test_tree_with_invalid_depth_string(self, mock_run): - """Test tree command with invalid depth string.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute(depth="invalid") # Invalid depth string - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", str(DEFAULT_TREE_DEPTH)] # Should use default - - @patch("subprocess.run") - def test_tree_with_too_large_depth(self, mock_run): - """Test tree command with depth larger than maximum.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute(depth=MAX_TREE_DEPTH + 5) # Too large - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", str(MAX_TREE_DEPTH)] # Should be clamped to max - - @patch("subprocess.run") - def test_tree_with_too_small_depth(self, mock_run): - """Test tree command with depth smaller than minimum.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - mock_process.stdout = ".\n├── file1.txt\n└── dir1" - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute(depth=0) # Too small - - # Verify results - mock_run.assert_called_once() - args, kwargs = mock_run.call_args - assert args[0] == ["tree", "-L", "1"] # Should be clamped to min (1) - - @patch("subprocess.run") - def test_tree_truncate_long_output(self, mock_run): - """Test tree command with very long output that gets truncated.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 0 - # Create an output with 201 lines (more than the 200 line limit) - mock_process.stdout = "\n".join([f"line{i}" for i in range(201)]) - mock_run.return_value = mock_process - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert "... (output truncated)" in result - # Result should have only 200 lines + truncation message - assert len(result.splitlines()) == 201 - # The 200th line should be "line199" - assert "line199" in result - # The 201st line (which would be "line200") should NOT be in the result - assert "line200" not in result - - @patch("subprocess.run") - def test_tree_command_not_found_fallback(self, mock_run): - """Test fallback when tree command is not found.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 127 # Command not found - mock_process.stderr = "tree: command not found" - mock_run.return_value = mock_process - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.return_value = "Fallback tree output" - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert result == "Fallback tree output" - mock_fallback.assert_called_once_with(".", DEFAULT_TREE_DEPTH) - - @patch("subprocess.run") - def test_tree_command_error_fallback(self, mock_run): - """Test fallback when tree command returns an error.""" - # Setup mock - mock_process = MagicMock() - mock_process.returncode = 1 # Error - mock_process.stderr = "Some error" - mock_run.return_value = mock_process - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.return_value = "Fallback tree output" - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert result == "Fallback tree output" - mock_fallback.assert_called_once_with(".", DEFAULT_TREE_DEPTH) - - @patch("subprocess.run") - def test_tree_command_file_not_found(self, mock_run): - """Test when the 'tree' command itself isn't found.""" - # Setup mock - mock_run.side_effect = FileNotFoundError("No such file or directory: 'tree'") - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.return_value = "Fallback tree output" - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert result == "Fallback tree output" - mock_fallback.assert_called_once_with(".", DEFAULT_TREE_DEPTH) - - @patch("subprocess.run") - def test_tree_command_timeout(self, mock_run): - """Test tree command timeout.""" - # Setup mock - mock_run.side_effect = subprocess.TimeoutExpired(cmd="tree", timeout=15) - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert "Error: Tree command timed out" in result - assert "too large or complex" in result - - @patch("subprocess.run") - def test_tree_command_unexpected_error_with_fallback_success(self, mock_run): - """Test unexpected error with successful fallback.""" - # Setup mock - mock_run.side_effect = Exception("Unexpected error") - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.return_value = "Fallback tree output" - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert result == "Fallback tree output" - mock_fallback.assert_called_once_with(".", DEFAULT_TREE_DEPTH) - - @patch("subprocess.run") - def test_tree_command_unexpected_error_with_fallback_failure(self, mock_run): - """Test unexpected error with failed fallback.""" - # Setup mock - mock_run.side_effect = Exception("Unexpected error") - - # Mock the fallback implementation - with patch.object(TreeTool, '_fallback_tree_implementation') as mock_fallback: - mock_fallback.side_effect = Exception("Fallback error") - - # Execute tool - tool = TreeTool() - result = tool.execute() - - # Verify results - assert "An unexpected error occurred" in result - assert "Unexpected error" in result - mock_fallback.assert_called_once_with(".", DEFAULT_TREE_DEPTH) - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - @patch("pathlib.Path.is_dir") - @patch("os.walk") - def test_fallback_tree_implementation(self, mock_walk, mock_is_dir, mock_exists, mock_resolve): - """Test the fallback tree implementation.""" - # Setup mocks - mock_resolve.return_value = Path("test_dir") - mock_exists.return_value = True - mock_is_dir.return_value = True - mock_walk.return_value = [ - ("test_dir", ["dir1", "dir2"], ["file1.txt"]), - ("test_dir/dir1", [], ["file2.txt"]), - ("test_dir/dir2", [], ["file3.txt"]) - ] - - # Execute fallback - tool = TreeTool() - result = tool._fallback_tree_implementation("test_dir") - - # Verify results - assert "." in result # Root directory - assert "dir1" in result # Subdirectories - assert "dir2" in result - assert "file1.txt" in result # Files - assert "file2.txt" in result - assert "file3.txt" in result - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - def test_fallback_tree_nonexistent_path(self, mock_exists, mock_resolve): - """Test fallback tree with nonexistent path.""" - # Setup mocks - mock_resolve.return_value = Path("nonexistent") - mock_exists.return_value = False - - # Execute fallback - tool = TreeTool() - result = tool._fallback_tree_implementation("nonexistent") - - # Verify results - assert "Error: Path 'nonexistent' does not exist" in result - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - @patch("pathlib.Path.is_dir") - def test_fallback_tree_not_a_directory(self, mock_is_dir, mock_exists, mock_resolve): - """Test fallback tree with a file path.""" - # Setup mocks - mock_resolve.return_value = Path("file.txt") - mock_exists.return_value = True - mock_is_dir.return_value = False - - # Execute fallback - tool = TreeTool() - result = tool._fallback_tree_implementation("file.txt") - - # Verify results - assert "Error: Path 'file.txt' is not a directory" in result - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - @patch("pathlib.Path.is_dir") - @patch("os.walk") - def test_fallback_tree_truncate_long_output(self, mock_walk, mock_is_dir, mock_exists, mock_resolve): - """Test fallback tree with very long output that gets truncated.""" - # Setup mocks - mock_resolve.return_value = Path("test_dir") - mock_exists.return_value = True - mock_is_dir.return_value = True - - # Create a directory structure with more than 200 files - dirs = [("test_dir", [], [f"file{i}.txt" for i in range(201)])] - mock_walk.return_value = dirs - - # Execute fallback - tool = TreeTool() - result = tool._fallback_tree_implementation("test_dir") - - # Verify results - assert "... (output truncated)" in result - assert len(result.splitlines()) <= 201 # 200 lines + truncation message - - @patch("pathlib.Path.resolve") - @patch("pathlib.Path.exists") - @patch("pathlib.Path.is_dir") - @patch("os.walk") - def test_fallback_tree_error(self, mock_walk, mock_is_dir, mock_exists, mock_resolve): - """Test error in fallback tree implementation.""" - # Setup mocks - mock_resolve.return_value = Path("test_dir") - mock_exists.return_value = True - mock_is_dir.return_value = True - mock_walk.side_effect = Exception("Unexpected error") - - # Execute fallback - tool = TreeTool() - result = tool._fallback_tree_implementation("test_dir") - - # Verify results - assert "Error generating directory tree" in result - assert "Unexpected error" in result \ No newline at end of file diff --git a/test_dir/test_utils.py b/test_dir/test_utils.py deleted file mode 100644 index 8c02ec1..0000000 --- a/test_dir/test_utils.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Tests for utility functions in src/cli_code/utils.py. -""" - -import pytest -from unittest.mock import patch, MagicMock - -# Update import to use absolute import path including 'src' -from src.cli_code.utils import count_tokens - -# Force module import for coverage -import src.cli_code.utils - - -def test_count_tokens_simple(): - """Test count_tokens with simple strings using tiktoken.""" - # These counts are based on gpt-4 tokenizer via tiktoken - assert count_tokens("Hello world") == 2 - assert count_tokens("This is a test.") == 5 - assert count_tokens("") == 0 - assert count_tokens(" ") == 1 # Spaces are often single tokens - - -def test_count_tokens_special_chars(): - """Test count_tokens with special characters using tiktoken.""" - assert count_tokens("Hello, world! How are you?") == 8 - # Emojis can be multiple tokens - # Note: Actual token count for emojis can vary - assert count_tokens("Testing emojis 👍🚀") > 3 - - -@patch("tiktoken.encoding_for_model") -def test_count_tokens_tiktoken_fallback(mock_encoding_for_model): - """Test count_tokens fallback mechanism when tiktoken fails.""" - # Simulate tiktoken raising an exception - mock_encoding_for_model.side_effect = Exception("Tiktoken error") - - # Test fallback (length // 4) - assert count_tokens("This is exactly sixteen chars") == 7 # 28 // 4 - assert count_tokens("Short") == 1 # 5 // 4 - assert count_tokens("") == 0 # 0 // 4 - assert count_tokens("123") == 0 # 3 // 4 - assert count_tokens("1234") == 1 # 4 // 4 - - -@patch("tiktoken.encoding_for_model") -def test_count_tokens_tiktoken_mocked_success(mock_encoding_for_model): - """Test count_tokens main path with tiktoken mocked.""" - # Create a mock encoding object with a mock encode method - mock_encode = MagicMock() - mock_encode.encode.return_value = [1, 2, 3, 4, 5] # Simulate encoding returning 5 tokens - - # Configure the mock context manager returned by encoding_for_model - mock_encoding_for_model.return_value = mock_encode - - assert count_tokens("Some text that doesn't matter now") == 5 - mock_encoding_for_model.assert_called_once_with("gpt-4") - mock_encode.encode.assert_called_once_with("Some text that doesn't matter now") diff --git a/test_dir/test_utils_comprehensive.py b/test_dir/test_utils_comprehensive.py deleted file mode 100644 index 5efb981..0000000 --- a/test_dir/test_utils_comprehensive.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Comprehensive tests for the utils module. -""" - -import unittest -import pytest -import sys -import os -from unittest.mock import patch, MagicMock - -# Setup proper import path -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'))) - -# Check if running in CI -IN_CI = os.environ.get('CI', 'false').lower() == 'true' - -# Try importing the module -try: - from cli_code.utils import count_tokens - IMPORTS_AVAILABLE = True -except ImportError: - IMPORTS_AVAILABLE = False - # Define a dummy function for testing when module is not available - def count_tokens(text): - return len(text) // 4 - -# Skip tests if imports not available and not in CI -SHOULD_SKIP = not IMPORTS_AVAILABLE and not IN_CI -SKIP_REASON = "Required imports not available and not in CI environment" - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_tiktoken -class TestUtilsModule(unittest.TestCase): - """Test cases for the utils module functions.""" - - def test_count_tokens_with_tiktoken(self): - """Test token counting with tiktoken available.""" - # Test with empty string - assert count_tokens("") == 0 - - # Test with short texts - assert count_tokens("Hello") > 0 - assert count_tokens("Hello, world!") > count_tokens("Hello") - - # Test with longer content - long_text = "This is a longer piece of text that should contain multiple tokens. " * 10 - assert count_tokens(long_text) > 20 - - # Test with special characters - special_chars = "!@#$%^&*()_+={}[]|\\:;\"'<>,.?/" - assert count_tokens(special_chars) > 0 - - # Test with numbers - numbers = "12345 67890" - assert count_tokens(numbers) > 0 - - # Test with unicode characters - unicode_text = "こんにちは世界" # Hello world in Japanese - assert count_tokens(unicode_text) > 0 - - # Test with code snippets - code_snippet = """ - def example_function(param1, param2): - \"\"\"This is a docstring.\"\"\" - result = param1 + param2 - return result - """ - assert count_tokens(code_snippet) > 10 - - -@pytest.mark.skipif(SHOULD_SKIP, reason=SKIP_REASON) -@pytest.mark.requires_tiktoken -def test_count_tokens_mocked_failure(monkeypatch): - """Test the fallback method when tiktoken raises an exception.""" - def mock_encoding_that_fails(*args, **kwargs): - raise ImportError("Simulated import error") - - # Mock the tiktoken encoding to simulate a failure - if IMPORTS_AVAILABLE: - with patch('tiktoken.encoding_for_model', mock_encoding_that_fails): - # Test that the function returns a value using the fallback method - text = "This is a test string" - expected_approx = len(text) // 4 - result = count_tokens(text) - - # The fallback method is approximate, but should be close to this value - assert result == expected_approx - else: - # Skip if imports not available - pytest.skip("Imports not available to perform this test") \ No newline at end of file diff --git a/tests/tools/test_file_tools.py b/tests/tools/test_file_tools.py new file mode 100644 index 0000000..b70a09e --- /dev/null +++ b/tests/tools/test_file_tools.py @@ -0,0 +1,451 @@ +""" +Tests for the file operation tools. +""" + +import os +import pytest +import builtins +from unittest.mock import patch, MagicMock, mock_open + +# Import tools from the correct path +from src.cli_code.tools.file_tools import ViewTool, EditTool, GrepTool, GlobTool + +# --- Test Fixtures --- + +@pytest.fixture +def view_tool(): + """Provides an instance of ViewTool.""" + return ViewTool() + +@pytest.fixture +def edit_tool(): + """Provides an instance of EditTool.""" + return EditTool() + +@pytest.fixture +def grep_tool(): + """Provides an instance of GrepTool.""" + return GrepTool() + +@pytest.fixture +def glob_tool(): + """Provides an instance of GlobTool.""" + return GlobTool() + +@pytest.fixture +def test_fs(tmp_path): + """Creates a temporary file structure for view/edit testing.""" + small_file = tmp_path / "small.txt" + small_file.write_text("Line 1\nLine 2\nLine 3\nLine 4\nLine 5", encoding="utf-8") + + empty_file = tmp_path / "empty.txt" + empty_file.write_text("", encoding="utf-8") + + large_file_content = "L" * (60 * 1024) # Assuming MAX_CHARS is around 50k + large_file = tmp_path / "large.txt" + large_file.write_text(large_file_content, encoding="utf-8") + + test_dir = tmp_path / "test_dir" + test_dir.mkdir() + + return tmp_path + +@pytest.fixture +def grep_fs(tmp_path): + """Creates a temporary file structure for grep testing.""" + # Root files + (tmp_path / "file1.txt").write_text("Hello world\nSearch pattern here\nAnother line") + (tmp_path / "file2.log").write_text("Log entry 1\nAnother search hit") + (tmp_path / ".hiddenfile").write_text("Should be ignored") + + # Subdirectory + sub_dir = tmp_path / "subdir" + sub_dir.mkdir() + (sub_dir / "file3.txt").write_text("Subdir file\nContains pattern match") + (sub_dir / "file4.dat").write_text("Data file, no match") + + # Nested subdirectory + nested_dir = sub_dir / "nested" + nested_dir.mkdir() + (nested_dir / "file5.txt").write_text("Deeply nested pattern hit") + + # __pycache__ directory + pycache_dir = tmp_path / "__pycache__" + pycache_dir.mkdir() + (pycache_dir / "cache.pyc").write_text("ignore me pattern") + + return tmp_path + +# --- ViewTool Tests --- + +def test_view_small_file_entirely(view_tool, test_fs): + file_path = str(test_fs / "small.txt") + result = view_tool.execute(file_path=file_path) + expected_prefix = f"--- Full Content of {file_path} ---" + assert expected_prefix in result + assert "1 Line 1" in result + assert "5 Line 5" in result + assert len(result.strip().split('\n')) == 6 # Prefix + 5 lines + +def test_view_with_offset(view_tool, test_fs): + file_path = str(test_fs / "small.txt") + result = view_tool.execute(file_path=file_path, offset=3) + expected_prefix = f"--- Content of {file_path} (Lines 3-5) ---" + assert expected_prefix in result + assert "1 Line 1" not in result + assert "2 Line 2" not in result + assert "3 Line 3" in result + assert "5 Line 5" in result + assert len(result.strip().split('\n')) == 4 # Prefix + 3 lines + +def test_view_with_limit(view_tool, test_fs): + file_path = str(test_fs / "small.txt") + result = view_tool.execute(file_path=file_path, limit=2) + expected_prefix = f"--- Content of {file_path} (Lines 1-2) ---" + assert expected_prefix in result + assert "1 Line 1" in result + assert "2 Line 2" in result + assert "3 Line 3" not in result + assert len(result.strip().split('\n')) == 3 # Prefix + 2 lines + +def test_view_with_offset_and_limit(view_tool, test_fs): + file_path = str(test_fs / "small.txt") + result = view_tool.execute(file_path=file_path, offset=2, limit=2) + expected_prefix = f"--- Content of {file_path} (Lines 2-3) ---" + assert expected_prefix in result + assert "1 Line 1" not in result + assert "2 Line 2" in result + assert "3 Line 3" in result + assert "4 Line 4" not in result + assert len(result.strip().split('\n')) == 3 # Prefix + 2 lines + +def test_view_empty_file(view_tool, test_fs): + file_path = str(test_fs / "empty.txt") + result = view_tool.execute(file_path=file_path) + expected_prefix = f"--- Full Content of {file_path} ---" + assert expected_prefix in result + assert "(File is empty or slice resulted in no lines)" in result + +def test_view_non_existent_file(view_tool, test_fs): + file_path = str(test_fs / "nonexistent.txt") + result = view_tool.execute(file_path=file_path) + assert f"Error: File not found: {file_path}" in result + +def test_view_directory(view_tool, test_fs): + dir_path = str(test_fs / "test_dir") + result = view_tool.execute(file_path=dir_path) + assert f"Error: Cannot view a directory: {dir_path}" in result + +def test_view_invalid_path_parent_access(view_tool, test_fs): + # Note: tmp_path makes it hard to truly test ../ escaping sandbox + # We check if the tool's internal logic catches it anyway. + file_path = "../some_file.txt" + result = view_tool.execute(file_path=file_path) + assert f"Error: Invalid file path '{file_path}'. Cannot access parent directories." in result + +# Patch MAX_CHARS_FOR_FULL_CONTENT for this specific test +@patch('src.cli_code.tools.file_tools.MAX_CHARS_FOR_FULL_CONTENT', 1024) +def test_view_large_file_without_offset_limit(view_tool, test_fs): + file_path = str(test_fs / "large.txt") + result = view_tool.execute(file_path=file_path) + assert f"Error: File '{file_path}' is large. Use the 'summarize_code' tool" in result + +def test_view_offset_beyond_file_length(view_tool, test_fs): + file_path = str(test_fs / "small.txt") + result = view_tool.execute(file_path=file_path, offset=10) + expected_prefix = f"--- Content of {file_path} (Lines 10-9) ---" # End index reflects slice start + len + assert expected_prefix in result + assert "(File is empty or slice resulted in no lines)" in result + +def test_view_limit_zero(view_tool, test_fs): + file_path = str(test_fs / "small.txt") + result = view_tool.execute(file_path=file_path, limit=0) + expected_prefix = f"--- Content of {file_path} (Lines 1-0) ---" # End index calculation + assert expected_prefix in result + assert "(File is empty or slice resulted in no lines)" in result + +@patch('builtins.open', new_callable=mock_open) +def test_view_general_exception(mock_open_func, view_tool, test_fs): + mock_open_func.side_effect = Exception("Unexpected error") + file_path = str(test_fs / "small.txt") # Need a path for the tool to attempt + result = view_tool.execute(file_path=file_path) + assert "Error viewing file: Unexpected error" in result + +# --- EditTool Tests --- + +def test_edit_create_new_file_with_content(edit_tool, test_fs): + file_path = test_fs / "new_file.txt" + content = "Hello World!" + result = edit_tool.execute(file_path=str(file_path), content=content) + assert "Successfully wrote content" in result + assert file_path.read_text() == content + +def test_edit_overwrite_existing_file(edit_tool, test_fs): + file_path_obj = test_fs / "small.txt" + original_content = file_path_obj.read_text() + new_content = "Overwritten!" + result = edit_tool.execute(file_path=str(file_path_obj), content=new_content) + assert "Successfully wrote content" in result + assert file_path_obj.read_text() == new_content + assert file_path_obj.read_text() != original_content + +def test_edit_replace_string(edit_tool, test_fs): + file_path = test_fs / "small.txt" # Content: "Line 1\nLine 2..." + result = edit_tool.execute(file_path=str(file_path), old_string="Line 2", new_string="Replaced Line") + assert "Successfully replaced first occurrence" in result + content = file_path.read_text() + assert "Line 1" in content + assert "Line 2" not in content + assert "Replaced Line" in content + assert "Line 3" in content + +def test_edit_delete_string(edit_tool, test_fs): + file_path = test_fs / "small.txt" + result = edit_tool.execute(file_path=str(file_path), old_string="Line 3\n", new_string="") # Include newline for exact match + assert "Successfully deleted first occurrence" in result + content = file_path.read_text() + assert "Line 2" in content + assert "Line 3" not in content + assert "Line 4" in content # Should follow Line 2 + +def test_edit_replace_string_not_found(edit_tool, test_fs): + file_path_obj = test_fs / "small.txt" + original_content = file_path_obj.read_text() + result = edit_tool.execute(file_path=str(file_path_obj), old_string="NonExistent", new_string="Replaced") + assert "Error: `old_string` not found" in result + assert file_path_obj.read_text() == original_content # File unchanged + +def test_edit_replace_in_non_existent_file(edit_tool, test_fs): + file_path = str(test_fs / "nonexistent.txt") + result = edit_tool.execute(file_path=file_path, old_string="a", new_string="b") + assert "Error: File not found for replacement" in result + +def test_edit_create_empty_file(edit_tool, test_fs): + file_path = test_fs / "new_empty.txt" + result = edit_tool.execute(file_path=str(file_path)) + assert "Successfully created/emptied file" in result + assert file_path.exists() + assert file_path.read_text() == "" + +def test_edit_create_file_with_dirs(edit_tool, test_fs): + file_path = test_fs / "new_dir" / "nested_file.txt" + content = "Nested content." + result = edit_tool.execute(file_path=str(file_path), content=content) + assert "Successfully wrote content" in result + assert file_path.exists() + assert file_path.read_text() == content + assert file_path.parent.is_dir() + +def test_edit_content_priority_warning(edit_tool, test_fs): + file_path = test_fs / "priority_test.txt" + content = "Content wins." + # Patch logging to check for warning + with patch('src.cli_code.tools.file_tools.log') as mock_log: + result = edit_tool.execute(file_path=str(file_path), content=content, old_string="a", new_string="b") + assert "Successfully wrote content" in result + assert file_path.read_text() == content + mock_log.warning.assert_called_once_with("Prioritizing 'content' over 'old/new_string'.") + +def test_edit_invalid_path_parent_access(edit_tool): + file_path = "../some_other_file.txt" + result = edit_tool.execute(file_path=file_path, content="test") + assert f"Error: Invalid file path '{file_path}'." in result + +def test_edit_directory(edit_tool, test_fs): + dir_path = str(test_fs / "test_dir") + # Test writing content to a directory + result_content = edit_tool.execute(file_path=dir_path, content="test") + assert f"Error: Cannot edit a directory: {dir_path}" in result_content + # Test replacing in a directory + result_replace = edit_tool.execute(file_path=dir_path, old_string="a", new_string="b") + assert f"Error reading file for replacement: [Errno 21] Is a directory: '{dir_path}'" in result_replace + +def test_edit_invalid_arguments(edit_tool): + file_path = "test.txt" + result = edit_tool.execute(file_path=file_path, old_string="a") # Missing new_string + assert "Error: Invalid arguments" in result + result = edit_tool.execute(file_path=file_path, new_string="b") # Missing old_string + assert "Error: Invalid arguments" in result + +@patch('builtins.open', new_callable=mock_open) +def test_edit_general_exception(mock_open_func, edit_tool): + mock_open_func.side_effect = IOError("Disk full") + file_path = "some_file.txt" + result = edit_tool.execute(file_path=file_path, content="test") + assert "Error editing file: Disk full" in result + +@patch('builtins.open', new_callable=mock_open) +def test_edit_read_exception_during_replace(mock_open_func, edit_tool): + # Mock setup: successful exists check, then fail on read + m = mock_open_func.return_value + m.read.side_effect = IOError("Read error") + + with patch('os.path.exists', return_value=True): + result = edit_tool.execute(file_path="existing.txt", old_string="a", new_string="b") + assert "Error reading file for replacement: Read error" in result + +# --- GrepTool Tests --- + +def test_grep_basic(grep_tool, grep_fs): + # Run from root of grep_fs + os.chdir(grep_fs) + result = grep_tool.execute(pattern="pattern") + # Should find in file1.txt, file3.txt, file5.txt + # Should NOT find in file2.log, file4.dat, .hiddenfile, __pycache__ + assert "./file1.txt:2: Search pattern here" in result + assert "subdir/file3.txt:2: Contains pattern match" in result + assert "subdir/nested/file5.txt:1: Deeply nested pattern hit" in result + assert "file2.log" not in result + assert "file4.dat" not in result + assert ".hiddenfile" not in result + assert "__pycache__" not in result + assert len(result.strip().split('\n')) == 3 + +def test_grep_in_subdir(grep_tool, grep_fs): + # Run from root, but specify subdir path + os.chdir(grep_fs) + result = grep_tool.execute(pattern="pattern", path="subdir") + assert "./file3.txt:2: Contains pattern match" in result + assert "nested/file5.txt:1: Deeply nested pattern hit" in result + assert "file1.txt" not in result + assert "file4.dat" not in result + assert len(result.strip().split('\n')) == 2 + +def test_grep_include_txt(grep_tool, grep_fs): + os.chdir(grep_fs) + # Include only .txt files in the root dir + result = grep_tool.execute(pattern="pattern", include="*.txt") + assert "./file1.txt:2: Search pattern here" in result + assert "subdir" not in result # Non-recursive by default + assert "file2.log" not in result + assert len(result.strip().split('\n')) == 1 + +def test_grep_include_recursive(grep_tool, grep_fs): + os.chdir(grep_fs) + # Include all .txt files recursively + result = grep_tool.execute(pattern="pattern", include="**/*.txt") + assert "./file1.txt:2: Search pattern here" in result + assert "subdir/file3.txt:2: Contains pattern match" in result + assert "subdir/nested/file5.txt:1: Deeply nested pattern hit" in result + assert "file2.log" not in result + assert len(result.strip().split('\n')) == 3 + +def test_grep_no_matches(grep_tool, grep_fs): + os.chdir(grep_fs) + pattern = "NonExistentPattern" + result = grep_tool.execute(pattern=pattern) + assert f"No matches found for pattern: {pattern}" in result + +def test_grep_include_no_matches(grep_tool, grep_fs): + os.chdir(grep_fs) + result = grep_tool.execute(pattern="pattern", include="*.nonexistent") + # The execute method returns based on regex matches, not file finding. + # If no files are found by glob, the loop won't run, results empty. + assert f"No matches found for pattern: pattern" in result + +def test_grep_invalid_regex(grep_tool, grep_fs): + os.chdir(grep_fs) + invalid_pattern = "[" + result = grep_tool.execute(pattern=invalid_pattern) + assert f"Error: Invalid regex pattern: {invalid_pattern}" in result + +def test_grep_invalid_path_parent(grep_tool): + result = grep_tool.execute(pattern="test", path="../somewhere") + assert "Error: Invalid path '../somewhere'." in result + +def test_grep_path_is_file(grep_tool, grep_fs): + os.chdir(grep_fs) + file_path = "file1.txt" + result = grep_tool.execute(pattern="test", path=file_path) + assert f"Error: Path is not a directory: {file_path}" in result + +@patch('builtins.open', new_callable=mock_open) +def test_grep_read_oserror(mock_open_method, grep_tool, grep_fs): + os.chdir(grep_fs) + # Make open raise OSError for a specific file + original_open = builtins.open + def patched_open(*args, **kwargs): + # Need to handle the file path correctly within the test + abs_file1_path = str(grep_fs / 'file1.txt') + abs_file2_path = str(grep_fs / 'file2.log') + if args[0] == abs_file1_path: + raise OSError("Permission denied") + # Allow reading file2.log + elif args[0] == abs_file2_path: + # If mocking open completely, need to provide mock file object + return mock_open(read_data="Log entry 1\nAnother search hit")(*args, **kwargs) + else: + # Fallback for other potential opens, or raise error + raise FileNotFoundError(f"Unexpected open call in test: {args[0]}") + mock_open_method.side_effect = patched_open + + # Patch glob to ensure file1.txt is considered + with patch('glob.glob', return_value=[str(grep_fs / 'file1.txt'), str(grep_fs / 'file2.log')]): + result = grep_tool.execute(pattern="search", include="*.*") + # Should only find the match in file2.log, skipping file1.txt due to OSError + assert "file1.txt" not in result + assert "./file2.log:2: Another search hit" in result + assert len(result.strip().split('\n')) == 1 + +@patch('glob.glob') +def test_grep_glob_exception(mock_glob, grep_tool, grep_fs): + os.chdir(grep_fs) + mock_glob.side_effect = Exception("Glob error") + result = grep_tool.execute(pattern="test", include="*.txt") + assert "Error finding files with include pattern: Glob error" in result + +@patch('os.walk') +def test_grep_general_exception(mock_walk, grep_tool): + # Need to change directory for os.walk patching to be effective if tool uses relative paths + # However, the tool converts path to absolute, so patching os.walk directly should work + mock_walk.side_effect = Exception("Walk error") + result = grep_tool.execute(pattern="test", path=".") # Execute in current dir + assert "Error searching files: Walk error" in result + +# --- GlobTool Tests --- + +def test_glob_basic(glob_tool, grep_fs): # Reusing grep_fs structure + os.chdir(grep_fs) + result = glob_tool.execute(pattern="*.txt") + results_list = sorted(result.strip().split('\n')) + assert "./file1.txt" in results_list + assert "./subdir/file3.txt" not in results_list # Not recursive + assert len(results_list) == 1 + +def test_glob_in_subdir(glob_tool, grep_fs): + os.chdir(grep_fs) + result = glob_tool.execute(pattern="*.txt", path="subdir") + results_list = sorted(result.strip().split('\n')) + assert "./file3.txt" in results_list + assert "./nested/file5.txt" not in results_list # Not recursive within subdir + assert len(results_list) == 1 + +def test_glob_recursive(glob_tool, grep_fs): + os.chdir(grep_fs) + result = glob_tool.execute(pattern="**/*.txt") + results_list = sorted(result.strip().split('\n')) + assert "./file1.txt" in results_list + assert "subdir/file3.txt" in results_list + assert "subdir/nested/file5.txt" in results_list + assert len(results_list) == 3 + +def test_glob_no_matches(glob_tool, grep_fs): + os.chdir(grep_fs) + result = glob_tool.execute(pattern="*.nonexistent") + assert "No files or directories found matching pattern: *.nonexistent" in result + +def test_glob_invalid_path_parent(glob_tool): + result = glob_tool.execute(pattern="*.txt", path="../somewhere") + assert "Error: Invalid path '../somewhere'." in result + +def test_glob_path_is_file(glob_tool, grep_fs): + os.chdir(grep_fs) + file_path = "file1.txt" + result = glob_tool.execute(pattern="*.txt", path=file_path) + assert f"Error: Path is not a directory: {file_path}" in result + +@patch('glob.glob') +def test_glob_general_exception(mock_glob, glob_tool): + mock_glob.side_effect = Exception("Globbing failed") + result = glob_tool.execute(pattern="*.txt") + assert "Error finding files: Globbing failed" in result \ No newline at end of file