Skip to content
This repository was archived by the owner on Apr 23, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/python-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ jobs:
- name: Test with pytest and Generate Coverage
env:
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
PYTHONPATH: ${{ github.workspace }}/src
# Ensure API key exists for tests that might need it, but allow skipping
run: |
# List available test files to debug
Expand All @@ -59,7 +60,7 @@ jobs:

# Run pytest with more comprehensive coverage
# Generate both XML and HTML reports
python -m pytest --cov=src/cli_code --cov-report=term --cov-report=xml --cov-report=html --verbose test_dir/
python -m pytest --cov=cli_code --cov-report=term --cov-report=xml --cov-report=html --verbose test_dir/

# Display the overall coverage percentage
echo "Overall coverage percentage:"
Expand Down
5 changes: 5 additions & 0 deletions .pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[pytest]
markers =
integration: marks tests as integration tests (requires API keys)
slow: marks tests as slow running
python_paths = src
23 changes: 19 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -235,15 +235,30 @@ pip install -e ".[dev]"
# Run tests
python -m pytest

# Run coverage analysis
./run_coverage.sh
# Run coverage analysis with the new convenience script
python run_tests_with_coverage.py --html

# Run SonarCloud analysis locally (requires sonar-scanner CLI)
./run_sonar_scan.sh
# For more options:
python run_tests_with_coverage.py --help
```

The project uses [pytest](https://docs.pytest.org/) for testing and [SonarCloud](https://sonarcloud.io/) for code quality and coverage analysis.

### Code Coverage

We've implemented comprehensive code coverage tracking to ensure the quality and reliability of the codebase. Coverage reports are generated in HTML and XML formats for:

- Local development with the `run_tests_with_coverage.py` script
- CI/CD pipeline with GitHub Actions
- SonarCloud analysis for visualizing coverage over time

To improve code coverage, focus on:
1. Adding tests for any new code
2. Identifying and filling gaps in existing test coverage
3. Testing edge cases and error handling paths

Our coverage goal is to maintain at least 70% overall code coverage.

## Contributing

Contributions are welcome! Please feel free to submit a Pull Request.
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,9 @@ omit = [
"*/docs/*",
"*/test_*",
"*/__pycache__/*",
"*/venv/*",
"*/.pytest_cache/*",
"*/site-packages/*",
]

# Add these new sections for better coverage configuration
Expand Down
92 changes: 92 additions & 0 deletions run_tests_with_coverage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
#!/usr/bin/env python
"""
Script to run tests with coverage reporting.

This script makes it easy to run the test suite with coverage reporting
and see which parts of the code need more test coverage.

Usage:
python run_tests_with_coverage.py
"""

import os
import sys
import subprocess
import argparse
import webbrowser
from pathlib import Path


def main():
parser = argparse.ArgumentParser(description="Run tests with coverage reporting")
parser.add_argument("--html", action="store_true", help="Open HTML report after running")
parser.add_argument("--xml", action="store_true", help="Generate XML report")
parser.add_argument("--skip-tests", action="store_true", help="Skip running tests and just report on existing coverage data")
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()

# Get the root directory of the project
root_dir = Path(__file__).parent

# Change to the root directory
os.chdir(root_dir)

# Add the src directory to Python path to ensure proper imports
sys.path.insert(0, str(root_dir / 'src'))

if not args.skip_tests:
# Ensure we have the necessary packages
print("Installing required packages...")
subprocess.run([sys.executable, "-m", "pip", "install", "pytest", "pytest-cov"],
check=False)

# Run pytest with coverage
print("\nRunning tests with coverage...")
cmd = [
sys.executable, "-m", "pytest",
"--cov=cli_code",
"--cov-report=term",
]

# Add XML report if requested
if args.xml:
cmd.append("--cov-report=xml")

# Always generate HTML report
cmd.append("--cov-report=html")

# Add verbosity if requested
if args.verbose:
cmd.append("-v")

# Run tests
result = subprocess.run(cmd + ["test_dir/"], check=False)

if result.returncode != 0:
print("\n⚠️ Some tests failed! See above for details.")
else:
print("\n✅ All tests passed!")

# Parse coverage results
try:
html_report = root_dir / "coverage_html" / "index.html"

if html_report.exists():
if args.html:
print(f"\nOpening HTML coverage report: {html_report}")
webbrowser.open(f"file://{html_report.absolute()}")
else:
print(f"\nHTML coverage report available at: file://{html_report.absolute()}")

xml_report = root_dir / "coverage.xml"
if args.xml and xml_report.exists():
print(f"XML coverage report available at: {xml_report}")

except Exception as e:
print(f"Error processing coverage reports: {e}")

print("\nDone!")


if __name__ == "__main__":
main()
185 changes: 185 additions & 0 deletions test_dir/test_config_missing_methods.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
"""
Tests for Config class methods that might have been missed in existing tests.
"""

import os
import tempfile
import pytest
from pathlib import Path
import yaml
from unittest.mock import patch, mock_open

from cli_code.config import Config


@pytest.fixture
def temp_config_dir():
"""Creates a temporary directory for the config file."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)


@pytest.fixture
def mock_config():
"""Return a Config instance with mocked file operations."""
with patch('cli_code.config.Config._load_dotenv'), \
patch('cli_code.config.Config._ensure_config_exists'), \
patch('cli_code.config.Config._load_config', return_value={}), \
patch('cli_code.config.Config._apply_env_vars'):
config = Config()
# Set some test data
config.config = {
"google_api_key": "test-google-key",
"default_provider": "gemini",
"default_model": "models/gemini-1.0-pro",
"ollama_api_url": "http://localhost:11434",
"ollama_default_model": "llama2",
"settings": {
"max_tokens": 1000,
"temperature": 0.7,
}
}
yield config


def test_get_credential(mock_config):
"""Test get_credential method."""
# Test existing provider
assert mock_config.get_credential("google") == "test-google-key"

# Test non-existing provider
assert mock_config.get_credential("non_existing") is None

# Test with empty config
mock_config.config = {}
assert mock_config.get_credential("google") is None


def test_set_credential(mock_config):
"""Test set_credential method."""
# Test setting existing provider
mock_config.set_credential("google", "new-google-key")
assert mock_config.config["google_api_key"] == "new-google-key"

# Test setting new provider
mock_config.set_credential("openai", "test-openai-key")
assert mock_config.config["openai_api_key"] == "test-openai-key"

# Test with None value
mock_config.set_credential("google", None)
assert mock_config.config["google_api_key"] is None


def test_get_default_provider(mock_config):
"""Test get_default_provider method."""
# Test with existing provider
assert mock_config.get_default_provider() == "gemini"

# Test with no provider set
mock_config.config["default_provider"] = None
assert mock_config.get_default_provider() == "gemini" # Should return default

# Test with empty config
mock_config.config = {}
assert mock_config.get_default_provider() == "gemini" # Should return default


def test_set_default_provider(mock_config):
"""Test set_default_provider method."""
# Test setting valid provider
mock_config.set_default_provider("openai")
assert mock_config.config["default_provider"] == "openai"

# Test setting None (should use default)
mock_config.set_default_provider(None)
assert mock_config.config["default_provider"] == "gemini"


def test_get_default_model(mock_config):
"""Test get_default_model method."""
# Test without provider (use default provider)
assert mock_config.get_default_model() == "models/gemini-1.0-pro"

# Test with specific provider
assert mock_config.get_default_model("ollama") == "llama2"

# Test with non-existing provider
assert mock_config.get_default_model("non_existing") is None


def test_set_default_model(mock_config):
"""Test set_default_model method."""
# Test with default provider
mock_config.set_default_model("new-model")
assert mock_config.config["default_model"] == "new-model"

# Test with specific provider
mock_config.set_default_model("new-ollama-model", "ollama")
assert mock_config.config["ollama_default_model"] == "new-ollama-model"

# Test with new provider
mock_config.set_default_model("anthropic-model", "anthropic")
assert mock_config.config["anthropic_default_model"] == "anthropic-model"


def test_get_setting(mock_config):
"""Test get_setting method."""
# Test existing setting
assert mock_config.get_setting("max_tokens") == 1000
assert mock_config.get_setting("temperature") == 0.7

# Test non-existing setting with default
assert mock_config.get_setting("non_existing", "default_value") == "default_value"

# Test with empty settings
mock_config.config["settings"] = {}
assert mock_config.get_setting("max_tokens", 2000) == 2000


def test_set_setting(mock_config):
"""Test set_setting method."""
# Test updating existing setting
mock_config.set_setting("max_tokens", 2000)
assert mock_config.config["settings"]["max_tokens"] == 2000

# Test adding new setting
mock_config.set_setting("new_setting", "new_value")
assert mock_config.config["settings"]["new_setting"] == "new_value"

# Test with no settings dict
mock_config.config.pop("settings")
mock_config.set_setting("test_setting", "test_value")
assert mock_config.config["settings"]["test_setting"] == "test_value"


def test_save_config():
"""Test _save_config method."""
with patch('builtins.open', mock_open()) as mock_file, \
patch('yaml.dump') as mock_yaml_dump, \
patch('cli_code.config.Config._load_dotenv'), \
patch('cli_code.config.Config._ensure_config_exists'), \
patch('cli_code.config.Config._load_config', return_value={}), \
patch('cli_code.config.Config._apply_env_vars'):

config = Config()
config.config = {"test": "data"}
config._save_config()

mock_file.assert_called_once()
mock_yaml_dump.assert_called_once_with({"test": "data"}, mock_file(), default_flow_style=False)


def test_save_config_error():
"""Test error handling in _save_config method."""
with patch('builtins.open', side_effect=PermissionError("Permission denied")), \
patch('cli_code.config.log.error') as mock_log_error, \
patch('cli_code.config.Config._load_dotenv'), \
patch('cli_code.config.Config._ensure_config_exists'), \
patch('cli_code.config.Config._load_config', return_value={}), \
patch('cli_code.config.Config._apply_env_vars'):

config = Config()
config._save_config()

# Verify error was logged
assert mock_log_error.called
Loading
Loading